repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
django-wodnas/django-tinymce | tinymce/widgets.py | 6 | 4860 | # Copyright (c) 2008 Joost Cassee
# Licensed under the terms of the MIT License (see LICENSE.txt)
"""
This TinyMCE widget was copied and extended from this code by John D'Agostino:
http://code.djangoproject.com/wiki/CustomWidgetsTinyMCE
"""
from django import forms
from django.conf import settings
from django.contrib.admin import widgets as admin_widgets
from django.core.urlresolvers import reverse
from django.forms.widgets import flatatt
try:
from django.utils.encoding import smart_unicode
except ImportError:
from django.forms.util import smart_unicode
from django.utils.html import escape
from django.utils import simplejson
from django.utils.datastructures import SortedDict
from django.utils.safestring import mark_safe
from django.utils.translation import get_language, ugettext as _
import tinymce.settings
class TinyMCE(forms.Textarea):
"""
TinyMCE widget. Set settings.TINYMCE_JS_URL to set the location of the
javascript file. Default is "MEDIA_URL + 'js/tiny_mce/tiny_mce.js'".
You can customize the configuration with the mce_attrs argument to the
constructor.
In addition to the standard configuration you can set the
'content_language' parameter. It takes the value of the 'language'
parameter by default.
In addition to the default settings from settings.TINYMCE_DEFAULT_CONFIG,
this widget sets the 'language', 'directionality' and
'spellchecker_languages' parameters by default. The first is derived from
the current Django language, the others from the 'content_language'
parameter.
"""
def __init__(self, content_language=None, attrs=None, mce_attrs=None):
super(TinyMCE, self).__init__(attrs)
if mce_attrs is None:
mce_attrs = {}
self.mce_attrs = mce_attrs
if content_language is None:
content_language = mce_attrs.get('language', None)
self.content_language = content_language
def render(self, name, value, attrs=None):
if value is None: value = ''
value = smart_unicode(value)
final_attrs = self.build_attrs(attrs)
final_attrs['name'] = name
assert 'id' in final_attrs, "TinyMCE widget attributes must contain 'id'"
mce_config = tinymce.settings.DEFAULT_CONFIG.copy()
mce_config.update(get_language_config(self.content_language))
if tinymce.settings.USE_FILEBROWSER:
mce_config['file_browser_callback'] = "djangoFileBrowser"
mce_config.update(self.mce_attrs)
mce_config['mode'] = 'exact'
mce_config['elements'] = final_attrs['id']
mce_config['strict_loading_mode'] = 1
mce_json = simplejson.dumps(mce_config)
html = [u'<textarea%s>%s</textarea>' % (flatatt(final_attrs), escape(value))]
if tinymce.settings.USE_COMPRESSOR:
compressor_config = {
'plugins': mce_config.get('plugins', ''),
'themes': mce_config.get('theme', 'advanced'),
'languages': mce_config.get('language', ''),
'diskcache': True,
'debug': False,
}
compressor_json = simplejson.dumps(compressor_config)
html.append(u'<script type="text/javascript">tinyMCE_GZ.init(%s)</script>' % compressor_json)
html.append(u'<script type="text/javascript">tinyMCE.init(%s)</script>' % mce_json)
return mark_safe(u'\n'.join(html))
def _media(self):
if tinymce.settings.USE_COMPRESSOR:
js = [reverse('tinymce-compressor')]
else:
js = [tinymce.settings.JS_URL]
if tinymce.settings.USE_FILEBROWSER:
js.append(reverse('tinymce-filebrowser'))
return forms.Media(js=js)
media = property(_media)
class AdminTinyMCE(admin_widgets.AdminTextareaWidget, TinyMCE):
pass
def get_language_config(content_language=None):
language = get_language()[:2]
if content_language:
content_language = content_language[:2]
else:
content_language = language
config = {}
config['language'] = language
lang_names = SortedDict()
for lang, name in settings.LANGUAGES:
if lang[:2] not in lang_names: lang_names[lang[:2]] = []
lang_names[lang[:2]].append(_(name))
sp_langs = []
for lang, names in lang_names.items():
if lang == content_language:
default = '+'
else:
default = ''
sp_langs.append(u'%s%s=%s' % (default, ' / '.join(names), lang))
config['spellchecker_languages'] = ','.join(sp_langs)
if content_language in settings.LANGUAGES_BIDI:
config['directionality'] = 'rtl'
else:
config['directionality'] = 'ltr'
if tinymce.settings.USE_SPELLCHECKER:
config['spellchecker_rpc_url'] = reverse('tinymce.views.spell_check')
return config
| mit |
hoh/Billabong | billabong/check.py | 1 | 2931 | # Copyright (c) 2015 "Hugo Herter http://hugoherter.com"
#
# This file is part of Billabong.
#
# Intercom is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Check the integrity of the data."""
import logging
from base64 import b64decode
from .encryption import hashing, decrypt_blob
from .utils import read_in_chunks
from .exceptions import CheckError
from .settings import inventory, stores
def compute_hash(file_object, chunk_size=1024):
"""Return the hash of a file object.
Compute the hash of the content of a file object using
the given hashing function, by reading it chunk by chunk.
"""
file_hash = hashing()
for chunk in read_in_chunks(file_object, chunk_size):
file_hash.update(chunk)
return file_hash
def check_data(id_=None, record=None, raises=False):
"""Check the integrity of the data for a record id or record."""
if id_ and not record:
record = inventory.get_record(id_)
elif record and not id_:
id_ = record['id']
else:
raise ValueError("Missing value for 'id' or 'meta'.")
blob_id = record['blob']
check_enc_data(blob_id, raises)
key = b64decode(record['key'])
hash_ = record['hash']
check_clear_data(blob_id, key, hash_)
def check_enc_data(blob_id, raises=False):
"""Check the validity of an encrypted blob."""
enc_path = stores[0]._blob_path(blob_id)
with open(enc_path, 'rb') as enc_file:
enc_hash = compute_hash(enc_file)
if blob_id == enc_hash.hexdigest():
return True
else:
reason = ("Encrypted data does not match the hash for id '{}'"
.format(blob_id))
if raises:
raise CheckError(reason)
else:
logging.error(reason)
return False
def check_clear_data(id_, key, hash_, raises=False):
"""Check the validity of the clear data inside a blob."""
clear_data = decrypt_blob(stores[0], id_, key)
clear_hash = hashing()
for chunk in clear_data:
clear_hash.update(chunk)
if hash_ == "sha256-" + clear_hash.hexdigest():
return True
else:
reason = ("Clear data does not match the hash for id '{}'"
.format(id_))
if raises:
raise CheckError(reason)
else:
logging.error(reason)
return False
| agpl-3.0 |
valkjsaaa/sl4a | python/xmpppy/doc/examples/commandsbot.py | 87 | 7937 | #!/usr/bin/python
""" The example of using xmpppy's Ad-Hoc Commands (JEP-0050) implementation.
"""
import xmpp
from xmpp.protocol import *
options = {
'JID': '[email protected]',
'Password': '********',
}
class TestCommand(xmpp.commands.Command_Handler_Prototype):
""" Example class. You should read source if you wish to understate how it works. This one
actually does some calculations."""
name = 'testcommand'
description = 'Circle calculations'
def __init__(self, jid=''):
""" Initialize some internals. Set the first request handler to self.calcTypeForm.
"""
xmpp.commands.Command_Handler_Prototype.__init__(self,jid)
self.initial = {
'execute': self.initialForm
}
def initialForm(self, conn, request):
""" Assign a session id and send the first form. """
sessionid = self.getSessionID()
self.sessions[sessionid] = {
'jid':request.getFrom(),
'data':{'type':None}
}
# simulate that the client sent sessionid, so calcTypeForm will be able
# to continue
request.getTag(name="command").setAttr('sessionid', sessionid)
return self.calcTypeForm(conn, request)
def calcTypeForm(self, conn, request):
""" Send first form to the requesting user. """
# get the session data
sessionid = request.getTagAttr('command','sessionid')
session = self.sessions[sessionid]
# What to do when a user sends us a response? Note, that we should always
# include 'execute', as it is a default action when requester does not send
# exact action to do (should be set to the same as 'next' or 'complete' fields)
session['actions'] = {
'cancel': self.cancel,
'next': self.calcTypeFormAccept,
'execute': self.calcTypeFormAccept,
}
# The form to send
calctypefield = xmpp.DataField(
name='calctype',
desc='Calculation Type',
value=session['data']['type'],
options=[
['Calculate the diameter of a circle','circlediameter'],
['Calculate the area of a circle','circlearea']
],
typ='list-single',
required=1)
# We set label attribute... seems that the xmpppy.DataField cannot do that
calctypefield.setAttr('label', 'Calculation Type')
form = xmpp.DataForm(
title='Select type of operation',
data=[
'Use the combobox to select the type of calculation you would like'\
'to do, then click Next.',
calctypefield])
# Build a reply with the form
reply = request.buildReply('result')
replypayload = [
xmpp.Node('actions',
attrs={'execute':'next'},
payload=[xmpp.Node('next')]),
form]
reply.addChild(
name='command',
namespace=NS_COMMANDS,
attrs={
'node':request.getTagAttr('command','node'),
'sessionid':sessionid,
'status':'executing'},
payload=replypayload)
self._owner.send(reply) # Question: self._owner or conn?
raise xmpp.NodeProcessed
def calcTypeFormAccept(self, conn, request):
""" Load the calcType form filled in by requester, then reply with
the second form. """
# get the session data
sessionid = request.getTagAttr('command','sessionid')
session = self.sessions[sessionid]
# load the form
node = request.getTag(name='command').getTag(name='x',namespace=NS_DATA)
form = xmpp.DataForm(node=node)
# retrieve the data
session['data']['type'] = form.getField('calctype').getValue()
# send second form
return self.calcDataForm(conn, request)
def calcDataForm(self, conn, request, notavalue=None):
""" Send a form asking for diameter. """
# get the session data
sessionid = request.getTagAttr('command','sessionid')
session = self.sessions[sessionid]
# set the actions taken on requester's response
session['actions'] = {
'cancel': self.cancel,
'prev': self.calcTypeForm,
'next': self.calcDataFormAccept,
'execute': self.calcDataFormAccept
}
# create a form
radiusfield = xmpp.DataField(desc='Radius',name='radius',typ='text-single')
radiusfield.setAttr('label', 'Radius')
form = xmpp.DataForm(
title = 'Enter the radius',
data=[
'Enter the radius of the circle (numbers only)',
radiusfield])
# build a reply stanza
reply = request.buildReply('result')
replypayload = [
xmpp.Node('actions',
attrs={'execute':'complete'},
payload=[xmpp.Node('complete'),xmpp.Node('prev')]),
form]
if notavalue:
replypayload.append(xmpp.Node('note',
attrs={'type': 'warn'},
payload=['You have to enter valid number.']))
reply.addChild(
name='command',
namespace=NS_COMMANDS,
attrs={
'node':request.getTagAttr('command','node'),
'sessionid':request.getTagAttr('command','sessionid'),
'status':'executing'},
payload=replypayload)
self._owner.send(reply)
raise xmpp.NodeProcessed
def calcDataFormAccept(self, conn, request):
""" Load the calcType form filled in by requester, then reply with the result. """
# get the session data
sessionid = request.getTagAttr('command','sessionid')
session = self.sessions[sessionid]
# load the form
node = request.getTag(name='command').getTag(name='x',namespace=NS_DATA)
form = xmpp.DataForm(node=node)
# retrieve the data; if the entered value is not a number, return to second stage
try:
value = float(form.getField('radius').getValue())
except:
self.calcDataForm(conn, request, notavalue=True)
# calculate the answer
from math import pi
if session['data']['type'] == 'circlearea':
result = (value**2) * pi
else:
result = 2 * value * pi
# build the result form
form = xmpp.DataForm(
typ='result',
data=[xmpp.DataField(desc='result', name='result', value=result)])
# build the reply stanza
reply = request.buildReply('result')
reply.addChild(
name='command',
namespace=NS_COMMANDS,
attrs={
'node':request.getTagAttr('command','node'),
'sessionid':sessionid,
'status':'completed'},
payload=[form])
self._owner.send(reply)
# erase the data about session
del self.sessions[sessionid]
raise xmpp.NodeProcessed
def cancel(self, conn, request):
""" Requester canceled the session, send a short reply. """
# get the session id
sessionid = request.getTagAttr('command','sessionid')
# send the reply
reply = request.buildReply('result')
reply.addChild(
name='command',
namespace=NS_COMMANDS,
attrs={
'node':request.getTagAttr('command','node'),
'sessionid':sessionid,
'status':'cancelled'})
self._owner.send(reply)
# erase the data about session
del self.sessions[sessionid]
raise xmpp.NodeProcessed
class ConnectionError: pass
class AuthorizationError: pass
class NotImplemented: pass
class Bot:
""" The main bot class. """
def __init__(self, JID, Password):
""" Create a new bot. Connect to the server and log in. """
# connect...
jid = xmpp.JID(JID)
self.connection = xmpp.Client(jid.getDomain(), debug=['always', 'browser', 'testcommand'])
result = self.connection.connect()
if result is None:
raise ConnectionError
# authorize
result = self.connection.auth(jid.getNode(), Password)
if result is None:
raise AuthorizationError
# plugins
# disco - needed by commands
# warning: case of "plugin" method names are important!
# to attach a command to Commands class, use .plugin()
# to attach anything to Client class, use .PlugIn()
self.disco = xmpp.browser.Browser()
self.disco.PlugIn(self.connection)
self.disco.setDiscoHandler({
'info': {
'ids': [{
'category': 'client',
'type': 'pc',
'name': 'Bot'
}],
'features': [NS_DISCO_INFO],
}
})
self.commands = xmpp.commands.Commands(self.disco)
self.commands.PlugIn(self.connection)
self.command_test = TestCommand()
self.command_test.plugin(self.commands)
# presence
self.connection.sendInitPresence(requestRoster=0)
def loop(self):
""" Do nothing except handling new xmpp stanzas. """
try:
while self.connection.Process(1):
pass
except KeyboardInterrupt:
pass
bot = Bot(**options)
bot.loop()
| apache-2.0 |
mat12/mytest | lib/python/Components/ChoiceList.py | 7 | 2176 | from MenuList import MenuList
from Tools.Directories import SCOPE_ACTIVE_SKIN, resolveFilename
from enigma import RT_HALIGN_LEFT, eListboxPythonMultiContent, gFont, getDesktop
from Tools.LoadPixmap import LoadPixmap
from Tools.Directories import fileExists
import skin
def ChoiceEntryComponent(key="", text=None):
screenwidth = getDesktop(0).size().width()
if not text: text = ["--"]
res = [ text ]
if text[0] == "--":
if screenwidth and screenwidth == 1920:
res.append((eListboxPythonMultiContent.TYPE_TEXT, 0, 00, 900, 45, 0, RT_HALIGN_LEFT, "-"*200))
else:
res.append((eListboxPythonMultiContent.TYPE_TEXT, 0, 00, 800, 25, 0, RT_HALIGN_LEFT, "-"*200))
else:
if screenwidth and screenwidth == 1920:
res.append((eListboxPythonMultiContent.TYPE_TEXT, 100, 7, 900, 45, 0, RT_HALIGN_LEFT, text[0]))
else:
res.append((eListboxPythonMultiContent.TYPE_TEXT, 45, 00, 800, 25, 0, RT_HALIGN_LEFT, text[0]))
if key:
if key == "expandable":
pngfile = resolveFilename(SCOPE_ACTIVE_SKIN, "icons/expandable.png")
elif key == "expanded":
pngfile = resolveFilename(SCOPE_ACTIVE_SKIN, "icons/expanded.png")
elif key == "verticalline":
pngfile = resolveFilename(SCOPE_ACTIVE_SKIN, "icons/verticalline.png")
else:
pngfile = resolveFilename(SCOPE_ACTIVE_SKIN, "buttons/key_%s.png" % key)
if fileExists(pngfile):
png = LoadPixmap(pngfile)
if screenwidth and screenwidth == 1920:
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHABLEND, 10, 5, 63, 48, png))
else:
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHABLEND, 5, 0, 35, 25, png))
return res
class ChoiceList(MenuList):
def __init__(self, list, selection = 0, enableWrapAround=False):
MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent)
font = skin.fonts["ChoiceList"]
self.l.setFont(0, gFont(font[0], font[1]))
self.l.setItemHeight(font[2])
self.ItemHeight = font[2]
self.selection = selection
def postWidgetCreate(self, instance):
MenuList.postWidgetCreate(self, instance)
self.moveToIndex(self.selection)
self.instance.setWrapAround(True)
def getItemHeight(self):
return self.ItemHeight
| gpl-2.0 |
Gheehnest/three.js | utils/exporters/blender/addons/io_three/exporter/api/material.py | 55 | 8333 | from bpy import data, types
from .. import constants, logger
from .constants import MULTIPLY, WIRE, IMAGE
def _material(func):
"""
:param func:
"""
def inner(name, *args, **kwargs):
"""
:param name:
:param *args:
:param **kwargs:
"""
if isinstance(name, types.Material):
material = name
else:
material = data.materials[name]
return func(material, *args, **kwargs)
return inner
@_material
def ambient_color(material):
"""
:param material:
:return: rgb value
:rtype: tuple
"""
logger.debug("material.ambient_color(%s)", material)
diffuse = diffuse_color(material)
return (material.ambient * diffuse[0],
material.ambient * diffuse[1],
material.ambient * diffuse[2])
@_material
def blending(material):
"""
:param material:
:return: THREE_blending_type value
"""
logger.debug("material.blending(%s)", material)
try:
blend = material.THREE_blending_type
except AttributeError:
logger.debug("No THREE_blending_type attribute found")
blend = constants.NORMAL_BLENDING
return blend
@_material
def bump_map(material):
"""
:param material:
:return: texture node for bump
"""
logger.debug("material.bump_map(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_normal and not \
texture.texture.use_normal_map:
return texture.texture
@_material
def bump_scale(material):
"""
:param material:
:rtype: float
"""
return normal_scale(material)
@_material
def depth_test(material):
"""
:param material:
:return: THREE_depth_test value
:rtype: bool
"""
logger.debug("material.depth_test(%s)", material)
try:
test = material.THREE_depth_test
except AttributeError:
logger.debug("No THREE_depth_test attribute found")
test = True
return test
@_material
def depth_write(material):
"""
:param material:
:return: THREE_depth_write value
:rtype: bool
"""
logger.debug("material.depth_write(%s)", material)
try:
write = material.THREE_depth_write
except AttributeError:
logger.debug("No THREE_depth_write attribute found")
write = True
return write
@_material
def double_sided(material):
"""
:param material:
:return: THREE_double_sided value
:rtype: bool
"""
logger.debug("material.double_sided(%s)", material)
try:
write = material.THREE_double_sided
except AttributeError:
logger.debug("No THREE_double_sided attribute found")
write = False
return write
@_material
def diffuse_color(material):
"""
:param material:
:return: rgb value
:rtype: tuple
"""
logger.debug("material.diffuse_color(%s)", material)
return (material.diffuse_intensity * material.diffuse_color[0],
material.diffuse_intensity * material.diffuse_color[1],
material.diffuse_intensity * material.diffuse_color[2])
@_material
def diffuse_map(material):
"""
:param material:
:return: texture node for map
"""
logger.debug("material.diffuse_map(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_color_diffuse and not \
texture.blend_type == MULTIPLY:
return texture.texture
@_material
def emissive_color(material):
"""
:param material:
:return: rgb value
:rtype: tuple
"""
logger.debug("material.emissive_color(%s)", material)
diffuse = diffuse_color(material)
return (material.emit * diffuse[0],
material.emit * diffuse[1],
material.emit * diffuse[2])
@_material
def light_map(material):
"""
:param material:
:return: texture node for light maps
"""
logger.debug("material.light_map(%s)", material)
for texture in _valid_textures(material, strict_use=False):
if texture.use_map_color_diffuse and \
texture.blend_type == MULTIPLY:
return texture.texture
@_material
def normal_scale(material):
"""
:param material:
:rtype: float
"""
logger.debug("material.normal_scale(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_normal:
return texture.normal_factor
@_material
def normal_map(material):
"""
:param material:
:return: texture node for normals
"""
logger.debug("material.normal_map(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_normal and \
texture.texture.use_normal_map:
return texture.texture
@_material
def opacity(material):
"""
:param material:
:rtype: float
"""
logger.debug("material.opacity(%s)", material)
return round(material.alpha, 2)
@_material
def shading(material):
"""
:param material:
:return: shading type (phong or lambert)
"""
logger.debug("material.shading(%s)", material)
dispatch = {
True: constants.PHONG,
False: constants.LAMBERT
}
return dispatch[material.specular_intensity > 0.0]
@_material
def specular_coef(material):
"""
:param material:
:rtype: float
"""
logger.debug("material.specular_coef(%s)", material)
return material.specular_hardness
@_material
def specular_color(material):
"""
:param material:
:return: rgb value
:rtype: tuple
"""
logger.debug("material.specular_color(%s)", material)
return (material.specular_intensity * material.specular_color[0],
material.specular_intensity * material.specular_color[1],
material.specular_intensity * material.specular_color[2])
@_material
def specular_map(material):
"""
:param material:
:return: texture node for specular
"""
logger.debug("material.specular_map(%s)", material)
for texture in _valid_textures(material):
if texture.use_map_specular:
return texture.texture
@_material
def transparent(material):
"""
:param material:
:rtype: bool
"""
logger.debug("material.transparent(%s)", material)
return material.use_transparency
@_material
def type(material):
"""
:param material:
:return: THREE compatible shader type
"""
logger.debug("material.type(%s)", material)
if material.diffuse_shader != 'LAMBERT':
material_type = constants.BASIC
elif material.specular_intensity > 0:
material_type = constants.PHONG
else:
material_type = constants.LAMBERT
return material_type
@_material
def use_vertex_colors(material):
"""
:param material:
:rtype: bool
"""
logger.debug("material.use_vertex_colors(%s)", material)
return material.use_vertex_color_paint
def used_materials():
"""
:return: list of materials that are in use
:rtype: generator
"""
logger.debug("material.used_materials()")
for material in data.materials:
if material.users > 0:
yield material.name
@_material
def visible(material):
"""
:param material:
:return: THREE_visible value
:rtype: bool
"""
logger.debug("material.visible(%s)", material)
try:
vis = material.THREE_visible
except AttributeError:
logger.debug("No THREE_visible attribute found")
vis = True
return vis
@_material
def wireframe(material):
"""
:param material:
:rtype: bool
"""
logger.debug("material.wireframe(%s)", material)
return material.type == WIRE
def _valid_textures(material, strict_use=True):
"""
:param material:
:rtype: generator
"""
for texture in material.texture_slots:
if not texture:
continue
if strict_use:
in_use = texture.use
else:
in_use = True
if not in_use:
continue
if not texture.texture or texture.texture.type != IMAGE:
logger.warning("Unable to export non-image texture %s", texture)
continue
logger.debug("Valid texture found %s", texture)
yield texture
| mit |
ferrants/ansible | test/units/parsing/test_splitter.py | 204 | 4425 | # coding: utf-8
# (c) 2015, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from nose import tools
from ansible.compat.tests import unittest
from ansible.parsing.splitter import split_args, parse_kv
# Tests using nose's test generators cannot use unittest base class.
# http://nose.readthedocs.org/en/latest/writing_tests.html#test-generators
class TestSplitter_Gen:
SPLIT_DATA = (
(u'a',
[u'a'],
{u'_raw_params': u'a'}),
(u'a=b',
[u'a=b'],
{u'a': u'b'}),
(u'a="foo bar"',
[u'a="foo bar"'],
{u'a': u'foo bar'}),
(u'"foo bar baz"',
[u'"foo bar baz"'],
{u'_raw_params': '"foo bar baz"'}),
(u'foo bar baz',
[u'foo', u'bar', u'baz'],
{u'_raw_params': u'foo bar baz'}),
(u'a=b c="foo bar"',
[u'a=b', u'c="foo bar"'],
{u'a': u'b', u'c': u'foo bar'}),
(u'a="echo \\"hello world\\"" b=bar',
[u'a="echo \\"hello world\\""', u'b=bar'],
{u'a': u'echo "hello world"', u'b': u'bar'}),
(u'a="multi\nline"',
[u'a="multi\nline"'],
{u'a': u'multi\nline'}),
(u'a="blank\n\nline"',
[u'a="blank\n\nline"'],
{u'a': u'blank\n\nline'}),
(u'a="blank\n\n\nlines"',
[u'a="blank\n\n\nlines"'],
{u'a': u'blank\n\n\nlines'}),
(u'a="a long\nmessage\\\nabout a thing\n"',
[u'a="a long\nmessage\\\nabout a thing\n"'],
{u'a': u'a long\nmessage\\\nabout a thing\n'}),
(u'a="multiline\nmessage1\\\n" b="multiline\nmessage2\\\n"',
[u'a="multiline\nmessage1\\\n"', u'b="multiline\nmessage2\\\n"'],
{u'a': 'multiline\nmessage1\\\n', u'b': u'multiline\nmessage2\\\n'}),
(u'a={{jinja}}',
[u'a={{jinja}}'],
{u'a': u'{{jinja}}'}),
(u'a={{ jinja }}',
[u'a={{ jinja }}'],
{u'a': u'{{ jinja }}'}),
(u'a="{{jinja}}"',
[u'a="{{jinja}}"'],
{u'a': u'{{jinja}}'}),
(u'a={{ jinja }}{{jinja2}}',
[u'a={{ jinja }}{{jinja2}}'],
{u'a': u'{{ jinja }}{{jinja2}}'}),
(u'a="{{ jinja }}{{jinja2}}"',
[u'a="{{ jinja }}{{jinja2}}"'],
{u'a': u'{{ jinja }}{{jinja2}}'}),
(u'a={{jinja}} b={{jinja2}}',
[u'a={{jinja}}', u'b={{jinja2}}'],
{u'a': u'{{jinja}}', u'b': u'{{jinja2}}'}),
(u'a="{{jinja}}\n" b="{{jinja2}}\n"',
[u'a="{{jinja}}\n"', u'b="{{jinja2}}\n"'],
{u'a': u'{{jinja}}\n', u'b': u'{{jinja2}}\n'}),
(u'a="café eñyei"',
[u'a="café eñyei"'],
{u'a': u'café eñyei'}),
(u'a=café b=eñyei',
[u'a=café', u'b=eñyei'],
{u'a': u'café', u'b': u'eñyei'}),
)
def check_split_args(self, args, expected):
tools.eq_(split_args(args), expected)
def test_split_args(self):
for datapoint in self.SPLIT_DATA:
yield self.check_split_args, datapoint[0], datapoint[1]
def check_parse_kv(self, args, expected):
tools.eq_(parse_kv(args), expected)
def test_parse_kv(self):
for datapoint in self.SPLIT_DATA:
try:
yield self.check_parse_kv, datapoint[0], datapoint[2]
except: pass
| gpl-3.0 |
admire93/youtube-dl | youtube_dl/extractor/sharesix.py | 128 | 2692 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
parse_duration,
)
class ShareSixIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?sharesix\.com/(?:f/)?(?P<id>[0-9a-zA-Z]+)'
_TESTS = [
{
'url': 'http://sharesix.com/f/OXjQ7Y6',
'md5': '9e8e95d8823942815a7d7c773110cc93',
'info_dict': {
'id': 'OXjQ7Y6',
'ext': 'mp4',
'title': 'big_buck_bunny_480p_surround-fix.avi',
'duration': 596,
'width': 854,
'height': 480,
},
},
{
'url': 'http://sharesix.com/lfrwoxp35zdd',
'md5': 'dd19f1435b7cec2d7912c64beeee8185',
'info_dict': {
'id': 'lfrwoxp35zdd',
'ext': 'flv',
'title': 'WhiteBoard___a_Mac_vs_PC_Parody_Cartoon.mp4.flv',
'duration': 65,
'width': 1280,
'height': 720,
},
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
fields = {
'method_free': 'Free'
}
post = compat_urllib_parse.urlencode(fields)
req = compat_urllib_request.Request(url, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
webpage = self._download_webpage(req, video_id,
'Downloading video page')
video_url = self._search_regex(
r"var\slnk1\s=\s'([^']+)'", webpage, 'video URL')
title = self._html_search_regex(
r'(?s)<dt>Filename:</dt>.+?<dd>(.+?)</dd>', webpage, 'title')
duration = parse_duration(
self._search_regex(
r'(?s)<dt>Length:</dt>.+?<dd>(.+?)</dd>',
webpage,
'duration',
fatal=False
)
)
m = re.search(
r'''(?xs)<dt>Width\sx\sHeight</dt>.+?
<dd>(?P<width>\d+)\sx\s(?P<height>\d+)</dd>''',
webpage
)
width = height = None
if m:
width, height = int(m.group('width')), int(m.group('height'))
formats = [{
'format_id': 'sd',
'url': video_url,
'width': width,
'height': height,
}]
return {
'id': video_id,
'title': title,
'duration': duration,
'formats': formats,
}
| unlicense |
elsonrodriguez/madhatter | cobbler/modules/sync_post_restart_services.py | 6 | 2421 | import distutils.sysconfig
import sys
import os
import traceback
import cexceptions
import os
import sys
import xmlrpclib
import cobbler.module_loader as module_loader
import cobbler.utils as utils
plib = distutils.sysconfig.get_python_lib()
mod_path="%s/cobbler" % plib
sys.path.insert(0, mod_path)
def register():
# this pure python trigger acts as if it were a legacy shell-trigger, but is much faster.
# the return of this method indicates the trigger type
return "/var/lib/cobbler/triggers/sync/post/*"
def run(api,args,logger):
settings = api.settings()
manage_dhcp = str(settings.manage_dhcp).lower()
manage_dns = str(settings.manage_dns).lower()
restart_dhcp = str(settings.restart_dhcp).lower()
restart_dns = str(settings.restart_dns).lower()
which_dhcp_module = module_loader.get_module_from_file("dhcp","module",just_name=True).strip()
which_dns_module = module_loader.get_module_from_file("dns","module",just_name=True).strip()
# special handling as we don't want to restart it twice
has_restarted_dnsmasq = False
rc = 0
if manage_dhcp != "0":
if which_dhcp_module == "manage_isc":
if restart_dhcp != "0":
rc = utils.subprocess_call(logger, "dhcpd -t -q", shell=True)
if rc != 0:
logger.error("dhcpd -t failed")
return 1
rc = utils.subprocess_call(logger,"/etc/rc.d/init.d/dhcpd restart", shell=True)
elif which_dhcp_module == "manage_dnsmasq":
if restart_dhcp != "0":
rc = utils.subprocess_call(logger, "/etc/rc.d/init.d/dnsmasq restart")
has_restarted_dnsmasq = True
else:
logger.error("unknown DHCP engine: %s" % which_dhcp_module)
rc = 411
if manage_dns != "0" and restart_dns != "0":
if which_dns_module == "manage_bind":
rc = utils.subprocess_call(logger, "/etc/rc.d/init.d/named restart", shell=True)
elif which_dns_module == "manage_dnsmasq" and not has_restarted_dnsmasq:
rc = utils.subprocess_call(logger, "/etc/rc.d/init.d/dnsmasq restart", shell=True)
elif which_dns_module == "manage_dnsmasq" and has_restarted_dnsmasq:
rc = 0
else:
logger.error("unknown DNS engine: %s" % which_dns_module)
rc = 412
return rc
| gpl-2.0 |
caphrim007/ansible | lib/ansible/module_utils/network/f5/bigiq.py | 15 | 6723 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import time
try:
from f5.bigiq import ManagementRoot
from icontrol.exceptions import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
try:
from library.module_utils.network.f5.common import F5BaseClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import is_ansible_debug
from library.module_utils.network.f5.icontrol import iControlRestSession
except ImportError:
from ansible.module_utils.network.f5.common import F5BaseClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import is_ansible_debug
from ansible.module_utils.network.f5.icontrol import iControlRestSession
class F5Client(F5BaseClient):
def __init__(self, *args, **kwargs):
super(F5Client, self).__init__(*args, **kwargs)
self.provider = self.merge_provider_params()
@property
def api(self):
exc = None
if self._client:
return self._client
for x in range(0, 10):
try:
result = ManagementRoot(
self.provider['server'],
self.provider['user'],
self.provider['password'],
port=self.provider['server_port'],
verify=self.provider['validate_certs']
)
self._client = result
return self._client
except Exception as ex:
exc = ex
time.sleep(1)
error = 'Unable to connect to {0} on port {1}.'.format(
self.provider['server'], self.provider['server_port']
)
if exc is not None:
error += ' The reported error was "{0}".'.format(str(exc))
raise F5ModuleError(error)
class F5RestClient(F5BaseClient):
def __init__(self, *args, **kwargs):
super(F5RestClient, self).__init__(*args, **kwargs)
self.provider = self.merge_provider_params()
@property
def api(self):
exc = None
if self._client:
return self._client
for x in range(0, 10):
try:
provider = self.provider['auth_provider'] or 'local'
url = "https://{0}:{1}/mgmt/shared/authn/login".format(
self.provider['server'], self.provider['server_port']
)
payload = {
'username': self.provider['user'],
'password': self.provider['password'],
}
# - local is a special provider that is baked into the system and
# has no loginReference
if provider != 'local':
login_ref = self.get_login_ref(provider)
payload.update(login_ref)
session = iControlRestSession()
session.verify = self.provider['validate_certs']
response = session.post(url, json=payload)
if response.status not in [200]:
raise F5ModuleError('Status code: {0}. Unexpected Error: {1} for uri: {2}\nText: {3}'.format(
response.status, response.reason, response.url, response.content
))
session.headers['X-F5-Auth-Token'] = response.json()['token']['token']
self._client = session
return self._client
except Exception as ex:
exc = ex
time.sleep(1)
error = 'Unable to connect to {0} on port {1}.'.format(
self.provider['server'], self.provider['server_port']
)
if exc is not None:
error += ' The reported error was "{0}".'.format(str(exc))
raise F5ModuleError(error)
def get_login_ref(self, provider):
info = self.read_provider_info_from_device()
uuids = [os.path.basename(os.path.dirname(x['link'])) for x in info['providers'] if '-' in x['link']]
if provider in uuids:
name = self.get_name_of_provider_id(info, provider)
if not name:
raise F5ModuleError(
"No name found for the provider '{0}'".format(provider)
)
return dict(
loginReference=dict(
link="https://localhost/mgmt/cm/system/authn/providers/{0}/{1}/login".format(name, provider)
)
)
names = [os.path.basename(os.path.dirname(x['link'])) for x in info['providers'] if '-' in x['link']]
if names.count(provider) > 1:
raise F5ModuleError(
"Ambiguous auth_provider provided. Please specify a specific provider ID."
)
uuid = self.get_id_of_provider_name(info, provider)
if not uuid:
raise F5ModuleError(
"No name found for the provider '{0}'".format(provider)
)
return dict(
loginReference=dict(
link="https://localhost/mgmt/cm/system/authn/providers/{0}/{1}/login".format(provider, uuid)
)
)
def get_name_of_provider_id(self, info, provider):
# Add slashes to the provider name so that it specifically finds the provider
# as part of the URL and not a part of another substring
provider = '/' + provider + '/'
for x in info['providers']:
if x['link'].find(provider) > -1:
return x['name']
return None
def get_id_of_provider_name(self, info, provider):
for x in info['providers']:
if x['name'] == provider:
return os.path.basename(os.path.dirname(x['link']))
return None
def read_provider_info_from_device(self):
uri = "https://{0}:{1}/info/system".format(
self.provider['server'], self.provider['server_port']
)
session = iControlRestSession()
session.verify = self.provider['validate_certs']
resp = session.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response
| gpl-3.0 |
alex/pip | pip/index.py | 3 | 38101 | """Routines related to PyPI, indexes"""
from __future__ import absolute_import
import logging
import cgi
from collections import namedtuple
import itertools
import sys
import os
import re
import mimetypes
import posixpath
import warnings
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.compat import ipaddress
from pip.utils import (
cached_property, splitext, normalize_path,
ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS, canonicalize_name)
from pip.utils.deprecation import RemovedInPip9Warning
from pip.utils.logging import indent_log
from pip.exceptions import (
DistributionNotFound, BestVersionAlreadyInstalled, InvalidWheelFilename,
UnsupportedWheel,
)
from pip.download import HAS_TLS, url_to_path, path_to_url
from pip.models import PyPI
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags, supported_tags_noarch, get_platform
from pip._vendor import html5lib, requests, six
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.requests.exceptions import SSLError
__all__ = ['FormatControl', 'fmt_ctl_handle_mutual_exclude', 'PackageFinder']
SECURE_ORIGINS = [
# protocol, hostname, port
# Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
("https", "*", "*"),
("*", "localhost", "*"),
("*", "127.0.0.0/8", "*"),
("*", "::1/128", "*"),
("file", "*", None),
# ssh is always secure.
("ssh", "*", "*"),
]
logger = logging.getLogger(__name__)
class InstallationCandidate(object):
def __init__(self, project, version, location):
self.project = project
self.version = parse_version(version)
self.location = location
self._key = (self.project, self.version, self.location)
def __repr__(self):
return "<InstallationCandidate({0!r}, {1!r}, {2!r})>".format(
self.project, self.version, self.location,
)
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, InstallationCandidate):
return NotImplemented
return method(self._key, other._key)
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(self, find_links, index_urls, allow_all_prereleases=False,
trusted_hosts=None, process_dependency_links=False,
session=None, format_control=None):
"""Create a PackageFinder.
:param format_control: A FormatControl object or None. Used to control
the selection of source packages / binary packages when consulting
the index and links.
"""
if session is None:
raise TypeError(
"PackageFinder() missing 1 required keyword argument: "
"'session'"
)
# Build find_links. If an argument starts with ~, it may be
# a local file relative to a home directory. So try normalizing
# it and if it exists, use the normalized version.
# This is deliberately conservative - it might be fine just to
# blindly normalize anything starting with a ~...
self.find_links = []
for link in find_links:
if link.startswith('~'):
new_link = normalize_path(link)
if os.path.exists(new_link):
link = new_link
self.find_links.append(link)
self.index_urls = index_urls
self.dependency_links = []
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.format_control = format_control or FormatControl(set(), set())
# Domains that we won't emit warnings for when not using HTTPS
self.secure_origins = [
("*", host, "*")
for host in (trusted_hosts if trusted_hosts else [])
]
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
# The Session we'll use to make requests
self.session = session
# If we don't have TLS enabled, then WARN if anyplace we're looking
# relies on TLS.
if not HAS_TLS:
for link in itertools.chain(self.index_urls, self.find_links):
parsed = urllib_parse.urlparse(link)
if parsed.scheme == "https":
logger.warning(
"pip is configured with locations that require "
"TLS/SSL, however the ssl module in Python is not "
"available."
)
break
def add_dependency_links(self, links):
# # FIXME: this shouldn't be global list this, it should only
# # apply to requirements of the package that specifies the
# # dependency_links value
# # FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
warnings.warn(
"Dependency Links processing has been deprecated and will be "
"removed in a future release.",
RemovedInPip9Warning,
)
self.dependency_links.extend(links)
@staticmethod
def _sort_locations(locations, expand_dir=False):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
urls.append(url)
return files, urls
def _candidate_sort_key(self, candidate):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min()
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
support_num = len(supported_tags)
if candidate.location.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(candidate.location.filename)
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel for this platform. It "
"can't be sorted." % wheel.filename
)
pri = -(wheel.support_index_min())
else: # sdist
pri = -(support_num)
return (candidate.version, pri)
def _sort_versions(self, applicable_versions):
"""
Bring the latest version (and wheels) to the front, but maintain the
existing ordering as secondary. See the docstring for `_link_sort_key`
for details. This function is isolated for easier unit testing.
"""
return sorted(
applicable_versions,
key=self._candidate_sort_key,
reverse=True
)
def _validate_secure_origin(self, logger, location):
# Determine if this url used a secure transport mechanism
parsed = urllib_parse.urlparse(str(location))
origin = (parsed.scheme, parsed.hostname, parsed.port)
# The protocol to use to see if the protocol matches.
# Don't count the repository type as part of the protocol: in
# cases such as "git+ssh", only use "ssh". (I.e., Only verify against
# the last scheme.)
protocol = origin[0].rsplit('+', 1)[-1]
# Determine if our origin is a secure origin by looking through our
# hardcoded list of secure origins, as well as any additional ones
# configured on this PackageFinder instance.
for secure_origin in (SECURE_ORIGINS + self.secure_origins):
if protocol != secure_origin[0] and secure_origin[0] != "*":
continue
try:
# We need to do this decode dance to ensure that we have a
# unicode object, even on Python 2.x.
addr = ipaddress.ip_address(
origin[1]
if (
isinstance(origin[1], six.text_type) or
origin[1] is None
)
else origin[1].decode("utf8")
)
network = ipaddress.ip_network(
secure_origin[1]
if isinstance(secure_origin[1], six.text_type)
else secure_origin[1].decode("utf8")
)
except ValueError:
# We don't have both a valid address or a valid network, so
# we'll check this origin against hostnames.
if origin[1] != secure_origin[1] and secure_origin[1] != "*":
continue
else:
# We have a valid address and network, so see if the address
# is contained within the network.
if addr not in network:
continue
# Check to see if the port patches
if (origin[2] != secure_origin[2] and
secure_origin[2] != "*" and
secure_origin[2] is not None):
continue
# If we've gotten here, then this origin matches the current
# secure origin and we should return True
return True
# If we've gotten to this point, then the origin isn't secure and we
# will not accept it as a valid location to search. We will however
# log a warning that we are ignoring it.
logger.warning(
"The repository located at %s is not a trusted or secure host and "
"is being ignored. If this repository is available via HTTPS it "
"is recommended to use HTTPS instead, otherwise you may silence "
"this warning and allow it anyways with '--trusted-host %s'.",
parsed.hostname,
parsed.hostname,
)
return False
def _get_index_urls_locations(self, project_name):
"""Returns the locations found via self.index_urls
Checks the url_name on the main (first in the list) index and
use this url_name to produce all locations
"""
def mkurl_pypi_url(url):
loc = posixpath.join(url, urllib_parse.quote(project_name.lower()))
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's
# behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
return [mkurl_pypi_url(url) for url in self.index_urls]
def _find_all_versions(self, project_name):
"""Find all available versions for project_name
This checks index_urls, find_links and dependency_links.
All versions found are returned as an InstallationCandidate list.
See _link_package_versions for details on which files are accepted
"""
index_locations = self._get_index_urls_locations(project_name)
index_file_loc, index_url_loc = self._sort_locations(index_locations)
fl_file_loc, fl_url_loc = self._sort_locations(
self.find_links, expand_dir=True)
dep_file_loc, dep_url_loc = self._sort_locations(self.dependency_links)
file_locations = (
Link(url) for url in itertools.chain(
index_file_loc, fl_file_loc, dep_file_loc)
)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
# We explicitly do not trust links that came from dependency_links
# We want to filter out any thing which does not have a secure origin.
url_locations = [
link for link in itertools.chain(
(Link(url) for url in index_url_loc),
(Link(url) for url in fl_url_loc),
(Link(url) for url in dep_url_loc),
)
if self._validate_secure_origin(logger, link)
]
logger.debug('%d location(s) to search for versions of %s:',
len(url_locations), project_name)
for location in url_locations:
logger.debug('* %s', location)
canonical_name = canonicalize_name(project_name)
formats = fmt_ctl_formats(self.format_control, canonical_name)
search = Search(project_name, canonical_name, formats)
find_links_versions = self._package_versions(
# We trust every directly linked archive in find_links
(Link(url, '-f') for url in self.find_links),
search
)
page_versions = []
for page in self._get_pages(url_locations, project_name):
logger.debug('Analyzing links from page %s', page.url)
with indent_log():
page_versions.extend(
self._package_versions(page.links, search)
)
dependency_versions = self._package_versions(
(Link(url) for url in self.dependency_links), search
)
if dependency_versions:
logger.debug(
'dependency_links found: %s',
', '.join([
version.location.url for version in dependency_versions
])
)
file_versions = self._package_versions(file_locations, search)
if file_versions:
file_versions.sort(reverse=True)
logger.debug(
'Local files found: %s',
', '.join([
url_to_path(candidate.location.url)
for candidate in file_versions
])
)
# This is an intentional priority ordering
return (
file_versions + find_links_versions + page_versions +
dependency_versions
)
def find_requirement(self, req, upgrade):
"""Try to find a Link matching req
Expects req, an InstallRequirement and upgrade, a boolean
Returns a Link if found,
Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
"""
all_candidates = self._find_all_versions(req.name)
# Filter out anything which doesn't match our specifier
_versions = set(
req.specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
# format. If we stop using the pkg_resources provided specifier
# and start using our own, we can drop the cast to str().
[str(c.version) for c in all_candidates],
prereleases=(
self.allow_all_prereleases
if self.allow_all_prereleases else None
),
)
)
applicable_candidates = [
# Again, converting to str to deal with debundling.
c for c in all_candidates if str(c.version) in _versions
]
applicable_candidates = self._sort_versions(applicable_candidates)
if req.satisfied_by is not None:
installed_version = parse_version(req.satisfied_by.version)
else:
installed_version = None
if installed_version is None and not applicable_candidates:
logger.critical(
'Could not find a version that satisfies the requirement %s '
'(from versions: %s)',
req,
', '.join(
sorted(
set(str(c.version) for c in all_candidates),
key=parse_version,
)
)
)
raise DistributionNotFound(
'No matching distribution found for %s' % req
)
best_installed = False
if installed_version and (
not applicable_candidates or
applicable_candidates[0].version <= installed_version):
best_installed = True
if not upgrade and installed_version is not None:
if best_installed:
logger.debug(
'Existing installed version (%s) is most up-to-date and '
'satisfies requirement',
installed_version,
)
else:
logger.debug(
'Existing installed version (%s) satisfies requirement '
'(most up-to-date version is %s)',
installed_version,
applicable_candidates[0].version,
)
return None
if best_installed:
# We have an existing version, and its the best version
logger.debug(
'Installed version (%s) is most up-to-date (past versions: '
'%s)',
installed_version,
', '.join(str(c.version) for c in applicable_candidates) or
"none",
)
raise BestVersionAlreadyInstalled
selected_candidate = applicable_candidates[0]
logger.debug(
'Using version %s (newest of versions: %s)',
selected_candidate.version,
', '.join(str(c.version) for c in applicable_candidates)
)
return selected_candidate.location
def _get_pages(self, locations, project_name):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors.
"""
seen = set()
for location in locations:
if location in seen:
continue
seen.add(location)
page = self._get_page(location)
if page is None:
continue
yield page
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search):
result = []
for link in self._sort_links(links):
v = self._link_package_versions(link, search)
if v is not None:
result.append(v)
return result
def _log_skipped_link(self, link, reason):
if link not in self.logged_links:
logger.debug('Skipping link %s; %s', link, reason)
self.logged_links.add(link)
def _link_package_versions(self, link, search):
"""Return an InstallationCandidate or None"""
platform = get_platform()
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
self._log_skipped_link(link, 'not a file')
return
if ext not in SUPPORTED_EXTENSIONS:
self._log_skipped_link(
link, 'unsupported archive format: %s' % ext)
return
if "binary" not in search.formats and ext == wheel_ext:
self._log_skipped_link(
link, 'No binaries permitted for %s' % search.supplied)
return
if "macosx10" in link.path and ext == '.zip':
self._log_skipped_link(link, 'macosx10 one')
return
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
self._log_skipped_link(link, 'invalid wheel filename')
return
if canonicalize_name(wheel.name) != search.canonical:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
if not wheel.supported():
self._log_skipped_link(
link, 'it is not compatible with this Python')
return
# This is a dirty hack to prevent installing Binary Wheels from
# PyPI unless it is a Windows or Mac Binary Wheel. This is
# paired with a change to PyPI disabling uploads for the
# same. Once we have a mechanism for enabling support for
# binary wheels on linux that deals with the inherent problems
# of binary distribution this can be removed.
comes_from = getattr(link, "comes_from", None)
if (
(
not platform.startswith('win') and not
platform.startswith('macosx') and not
platform == 'cli'
) and
comes_from is not None and
urllib_parse.urlparse(
comes_from.url
).netloc.endswith(PyPI.netloc)):
if not wheel.supported(tags=supported_tags_noarch):
self._log_skipped_link(
link,
"it is a pypi-hosted binary "
"Wheel on an unsupported platform",
)
return
version = wheel.version
# This should be up by the search.ok_binary check, but see issue 2700.
if "source" not in search.formats and ext != wheel_ext:
self._log_skipped_link(
link, 'No sources permitted for %s' % search.supplied)
return
if not version:
version = egg_info_matches(egg_info, search.supplied, link)
if version is None:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
self._log_skipped_link(
link, 'Python version is incorrect')
return
logger.debug('Found link %s, version: %s', link, version)
return InstallationCandidate(search.supplied, version, link)
def _get_page(self, link):
return HTMLPage.get_page(link, session=self.session)
def egg_info_matches(
egg_info, search_name, link,
_egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)):
"""Pull the version part out of a string.
:param egg_info: The string to parse. E.g. foo-2.1
:param search_name: The name of the package this belongs to. None to
infer the name. Note that this cannot unambiguously parse strings
like foo-2-2 which might be foo, 2-2 or foo-2, 2.
:param link: The link the string came from, for logging on failure.
"""
match = _egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s', link)
return None
if search_name is None:
full_match = match.group(0)
return full_match[full_match.index('-'):]
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
class HTMLPage(object):
"""Represents one page, along with its URL"""
def __init__(self, content, url, headers=None):
# Determine if we have any encoding information in our headers
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params['charset']
self.content = content
self.parsed = html5lib.parse(
self.content,
encoding=encoding,
namespaceHTMLElements=False,
)
self.url = url
self.headers = headers
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, skip_archives=True, session=None):
if session is None:
raise TypeError(
"get_page() missing 1 required keyword argument: 'session'"
)
url = link.url
url = url.split('#', 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %s URL %s', scheme, link)
return None
try:
if skip_archives:
filename = link.filename
for bad_ext in ARCHIVE_EXTENSIONS:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(
url, session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
logger.debug('Getting page %s', url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = \
urllib_parse.urlparse(url)
if (scheme == 'file' and
os.path.isdir(urllib_request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urllib_parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
resp = session.get(
url,
headers={
"Accept": "text/html",
"Cache-Control": "max-age=600",
},
)
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
inst = cls(resp.content, resp.url, resp.headers)
except requests.HTTPError as exc:
cls._handle_fail(link, exc, url)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(link, reason, url, meth=logger.info)
except requests.ConnectionError as exc:
cls._handle_fail(link, "connection error: %s" % exc, url)
except requests.Timeout:
cls._handle_fail(link, "timed out", url)
else:
return inst
@staticmethod
def _handle_fail(link, reason, url, meth=None):
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
@staticmethod
def _get_content_type(url, session):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in ('http', 'https'):
# FIXME: some warning or something?
# assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@cached_property
def base_url(self):
bases = [
x for x in self.parsed.findall(".//base")
if x.get("href") is not None
]
if bases and bases[0].get("href"):
return bases[0].get("href")
else:
return self.url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
yield Link(url, self)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None):
# url can be a UNC windows share
if url.startswith('\\\\'):
url = path_to_url(url)
self.url = url
self.comes_from = comes_from
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url == other.url
def __ne__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url != other.url
def __lt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url < other.url
def __le__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url <= other.url
def __gt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url > other.url
def __ge__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urllib_parse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
name = urllib_parse.unquote(name)
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urllib_parse.urlsplit(self.url)[0]
@property
def netloc(self):
return urllib_parse.urlsplit(self.url)[1]
@property
def path(self):
return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2])
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url)
return urllib_parse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(
r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)'
)
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def is_wheel(self):
return self.ext == wheel_ext
@property
def is_artifact(self):
"""
Determines if this points to an actual artifact (e.g. a tarball) or if
it points to an "abstract" thing like a path or a VCS location.
"""
from pip.vcs import vcs
if self.scheme in vcs.all_schemes:
return False
return True
FormatControl = namedtuple('FormatControl', 'no_binary only_binary')
"""This object has two fields, no_binary and only_binary.
If a field is falsy, it isn't set. If it is {':all:'}, it should match all
packages except those listed in the other field. Only one field can be set
to {':all:'} at a time. The rest of the time exact package name matches
are listed, with any given package only showing up in one field at a time.
"""
def fmt_ctl_handle_mutual_exclude(value, target, other):
new = value.split(',')
while ':all:' in new:
other.clear()
target.clear()
target.add(':all:')
del new[:new.index(':all:') + 1]
if ':none:' not in new:
# Without a none, we want to discard everything as :all: covers it
return
for name in new:
if name == ':none:':
target.clear()
continue
name = canonicalize_name(name)
other.discard(name)
target.add(name)
def fmt_ctl_formats(fmt_ctl, canonical_name):
result = set(["binary", "source"])
if canonical_name in fmt_ctl.only_binary:
result.discard('source')
elif canonical_name in fmt_ctl.no_binary:
result.discard('binary')
elif ':all:' in fmt_ctl.only_binary:
result.discard('source')
elif ':all:' in fmt_ctl.no_binary:
result.discard('binary')
return frozenset(result)
def fmt_ctl_no_binary(fmt_ctl):
fmt_ctl_handle_mutual_exclude(
':all:', fmt_ctl.no_binary, fmt_ctl.only_binary)
def fmt_ctl_no_use_wheel(fmt_ctl):
fmt_ctl_no_binary(fmt_ctl)
warnings.warn(
'--no-use-wheel is deprecated and will be removed in the future. '
' Please use --no-binary :all: instead.', DeprecationWarning,
stacklevel=2)
Search = namedtuple('Search', 'supplied canonical formats')
"""Capture key aspects of a search.
:attribute supplied: The user supplied package.
:attribute canonical: The canonical package name.
:attribute formats: The formats allowed for this package. Should be a set
with 'binary' or 'source' or both in it.
"""
| mit |
faroit/mir_eval | tests/test_sonify.py | 1 | 3340 | """ Unit tests for sonification methods """
import mir_eval
import numpy as np
import scipy
def test_clicks():
# Test output length for a variety of parameter settings
for times in [np.array([1.]), np.arange(10)*1.]:
for fs in [8000, 44100]:
click_signal = mir_eval.sonify.clicks(times, fs)
assert len(click_signal) == times.max()*fs + int(fs*.1) + 1
click_signal = mir_eval.sonify.clicks(times, fs, length=1000)
assert len(click_signal) == 1000
click_signal = mir_eval.sonify.clicks(
times, fs, click=np.zeros(1000))
assert len(click_signal) == times.max()*fs + 1000 + 1
def test_time_frequency():
# Test length for different inputs
for fs in [8000, 44100]:
signal = mir_eval.sonify.time_frequency(
np.random.standard_normal((100, 1000)), np.arange(1, 101),
np.linspace(0, 10, 1000), fs)
assert len(signal) == 10*fs
signal = mir_eval.sonify.time_frequency(
np.random.standard_normal((100, 1000)), np.arange(1, 101),
np.linspace(0, 10, 1000), fs, length=fs*11)
assert len(signal) == 11*fs
def test_chroma():
for fs in [8000, 44100]:
signal = mir_eval.sonify.chroma(
np.random.standard_normal((12, 1000)),
np.linspace(0, 10, 1000), fs)
assert len(signal) == 10*fs
signal = mir_eval.sonify.chroma(
np.random.standard_normal((12, 1000)),
np.linspace(0, 10, 1000), fs, length=fs*11)
assert len(signal) == 11*fs
def test_chords():
for fs in [8000, 44100]:
intervals = np.array([np.arange(10), np.arange(1, 11)]).T
signal = mir_eval.sonify.chords(
['C', 'C:maj', 'D:min7', 'E:min', 'C#', 'C', 'C', 'C', 'C', 'C'],
intervals, fs)
assert len(signal) == 10*fs
signal = mir_eval.sonify.chords(
['C', 'C:maj', 'D:min7', 'E:min', 'C#', 'C', 'C', 'C', 'C', 'C'],
intervals, fs, length=fs*11)
assert len(signal) == 11*fs
def test_pitch_contour():
# Generate some random pitch
fs = 8000
times = np.linspace(0, 5, num=5 * fs, endpoint=True)
noise = scipy.ndimage.gaussian_filter1d(np.random.randn(len(times)),
sigma=256)
freqs = 440.0 * 2.0**(16 * noise)
# negate a bunch of sequences
idx = np.unique(np.random.randint(0, high=len(times), size=32))
for start, end in zip(idx[::2], idx[1::2]):
freqs[start:end] *= -1
# Test with inferring duration
x = mir_eval.sonify.pitch_contour(times, freqs, fs)
assert len(x) == fs * 5
# Test with an explicit duration
# This forces the interpolator to go off the end of the sampling grid,
# which should result in a constant sequence in the output
x = mir_eval.sonify.pitch_contour(times, freqs, fs, length=fs * 7)
assert len(x) == fs * 7
assert np.allclose(x[-fs * 2:], x[-fs * 2])
# Test with an explicit duration and a fixed offset
# This forces the interpolator to go off the beginning of
# the sampling grid, which should result in a constant output
x = mir_eval.sonify.pitch_contour(times + 5.0, freqs, fs, length=fs * 7)
assert len(x) == fs * 7
assert np.allclose(x[:fs * 5], x[0])
| mit |
crakensio/django_training | lib/python2.7/site-packages/django/contrib/auth/tests/test_auth_backends.py | 97 | 19207 | from __future__ import unicode_literals
from datetime import date
from django.conf import settings
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User, Group, Permission, AnonymousUser
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.auth.tests.custom_user import ExtensionUser, CustomPermissionsUser, CustomUser
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.contrib.auth import authenticate, get_user
from django.http import HttpRequest
from django.test import TestCase
from django.test.utils import override_settings
from django.contrib.auth.hashers import MD5PasswordHasher
class CountingMD5PasswordHasher(MD5PasswordHasher):
"""Hasher that counts how many times it computes a hash."""
calls = 0
def encode(self, *args, **kwargs):
type(self).calls += 1
return super(CountingMD5PasswordHasher, self).encode(*args, **kwargs)
class BaseModelBackendTest(object):
"""
A base class for tests that need to validate the ModelBackend
with different User models. Subclasses should define a class
level UserModel attribute, and a create_users() method to
construct two users for test purposes.
"""
backend = 'django.contrib.auth.backends.ModelBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = (self.backend,)
self.create_users()
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
# The custom_perms test messes with ContentTypes, which will
# be cached; flush the cache to ensure there are no side effects
# Refs #14975, #14925
ContentType.objects.clear_cache()
def test_has_perm(self):
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_superuser = True
user.save()
self.assertEqual(user.has_perm('auth.test'), True)
user.is_staff = False
user.is_superuser = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
user.is_staff = True
user.is_superuser = True
user.is_active = False
user.save()
self.assertEqual(user.has_perm('auth.test'), False)
def test_custom_perms(self):
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
user.save()
# reloading user to purge the _perm_cache
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertEqual(user.get_all_permissions() == set(['auth.test']), True)
self.assertEqual(user.get_group_permissions(), set([]))
self.assertEqual(user.has_module_perms('Group'), False)
self.assertEqual(user.has_module_perms('auth'), True)
perm = Permission.objects.create(name='test2', content_type=content_type, codename='test2')
user.user_permissions.add(perm)
user.save()
perm = Permission.objects.create(name='test3', content_type=content_type, codename='test3')
user.user_permissions.add(perm)
user.save()
user = self.UserModel._default_manager.get(pk=self.user.pk)
self.assertEqual(user.get_all_permissions(), set(['auth.test2', 'auth.test', 'auth.test3']))
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), True)
perm = Permission.objects.create(name='test_group', content_type=content_type, codename='test_group')
group = Group.objects.create(name='test_group')
group.permissions.add(perm)
group.save()
user.groups.add(group)
user = self.UserModel._default_manager.get(pk=self.user.pk)
exp = set(['auth.test2', 'auth.test', 'auth.test3', 'auth.test_group'])
self.assertEqual(user.get_all_permissions(), exp)
self.assertEqual(user.get_group_permissions(), set(['auth.test_group']))
self.assertEqual(user.has_perms(['auth.test3', 'auth.test_group']), True)
user = AnonymousUser()
self.assertEqual(user.has_perm('test'), False)
self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), False)
def test_has_no_object_perm(self):
"""Regressiontest for #12462"""
user = self.UserModel._default_manager.get(pk=self.user.pk)
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(name='test', content_type=content_type, codename='test')
user.user_permissions.add(perm)
user.save()
self.assertEqual(user.has_perm('auth.test', 'object'), False)
self.assertEqual(user.get_all_permissions('object'), set([]))
self.assertEqual(user.has_perm('auth.test'), True)
self.assertEqual(user.get_all_permissions(), set(['auth.test']))
def test_get_all_superuser_permissions(self):
"""A superuser has all permissions. Refs #14795."""
user = self.UserModel._default_manager.get(pk=self.superuser.pk)
self.assertEqual(len(user.get_all_permissions()), len(Permission.objects.all()))
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.tests.test_auth_backends.CountingMD5PasswordHasher',))
def test_authentication_timing(self):
"""Hasher is run once regardless of whether the user exists. Refs #20760."""
# Re-set the password, because this tests overrides PASSWORD_HASHERS
self.user.set_password('test')
self.user.save()
CountingMD5PasswordHasher.calls = 0
username = getattr(self.user, self.UserModel.USERNAME_FIELD)
authenticate(username=username, password='test')
self.assertEqual(CountingMD5PasswordHasher.calls, 1)
CountingMD5PasswordHasher.calls = 0
authenticate(username='no_such_user', password='test')
self.assertEqual(CountingMD5PasswordHasher.calls, 1)
@skipIfCustomUser
class ModelBackendTest(BaseModelBackendTest, TestCase):
"""
Tests for the ModelBackend using the default User model.
"""
UserModel = User
def create_users(self):
self.user = User.objects.create_user(
username='test',
email='[email protected]',
password='test',
)
self.superuser = User.objects.create_superuser(
username='test2',
email='[email protected]',
password='test',
)
@override_settings(AUTH_USER_MODEL='auth.ExtensionUser')
class ExtensionUserModelBackendTest(BaseModelBackendTest, TestCase):
"""
Tests for the ModelBackend using the custom ExtensionUser model.
This isn't a perfect test, because both the User and ExtensionUser are
synchronized to the database, which wouldn't ordinary happen in
production. As a result, it doesn't catch errors caused by the non-
existence of the User table.
The specific problem is queries on .filter(groups__user) et al, which
makes an implicit assumption that the user model is called 'User'. In
production, the auth.User table won't exist, so the requested join
won't exist either; in testing, the auth.User *does* exist, and
so does the join. However, the join table won't contain any useful
data; for testing, we check that the data we expect actually does exist.
"""
UserModel = ExtensionUser
def create_users(self):
self.user = ExtensionUser._default_manager.create_user(
username='test',
email='[email protected]',
password='test',
date_of_birth=date(2006, 4, 25)
)
self.superuser = ExtensionUser._default_manager.create_superuser(
username='test2',
email='[email protected]',
password='test',
date_of_birth=date(1976, 11, 8)
)
@override_settings(AUTH_USER_MODEL='auth.CustomPermissionsUser')
class CustomPermissionsUserModelBackendTest(BaseModelBackendTest, TestCase):
"""
Tests for the ModelBackend using the CustomPermissionsUser model.
As with the ExtensionUser test, this isn't a perfect test, because both
the User and CustomPermissionsUser are synchronized to the database,
which wouldn't ordinary happen in production.
"""
UserModel = CustomPermissionsUser
def create_users(self):
self.user = CustomPermissionsUser._default_manager.create_user(
email='[email protected]',
password='test',
date_of_birth=date(2006, 4, 25)
)
self.superuser = CustomPermissionsUser._default_manager.create_superuser(
email='[email protected]',
password='test',
date_of_birth=date(1976, 11, 8)
)
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
class CustomUserModelBackendAuthenticateTest(TestCase):
"""
Tests that the model backend can accept a credentials kwarg labeled with
custom user model's USERNAME_FIELD.
"""
def test_authenticate(self):
test_user = CustomUser._default_manager.create_user(
email='[email protected]',
password='test',
date_of_birth=date(2006, 4, 25)
)
authenticated_user = authenticate(email='[email protected]', password='test')
self.assertEqual(test_user, authenticated_user)
class TestObj(object):
pass
class SimpleRowlevelBackend(object):
def has_perm(self, user, perm, obj=None):
if not obj:
return # We only support row level perms
if isinstance(obj, TestObj):
if user.username == 'test2':
return True
elif user.is_anonymous() and perm == 'anon':
return True
elif not user.is_active and perm == 'inactive':
return True
return False
def has_module_perms(self, user, app_label):
if not user.is_anonymous() and not user.is_active:
return False
return app_label == "app1"
def get_all_permissions(self, user, obj=None):
if not obj:
return [] # We only support row level perms
if not isinstance(obj, TestObj):
return ['none']
if user.is_anonymous():
return ['anon']
if user.username == 'test2':
return ['simple', 'advanced']
else:
return ['simple']
def get_group_permissions(self, user, obj=None):
if not obj:
return # We only support row level perms
if not isinstance(obj, TestObj):
return ['none']
if 'test_group' in [group.name for group in user.groups.all()]:
return ['group_perm']
else:
return ['none']
@skipIfCustomUser
class RowlevelBackendTest(TestCase):
"""
Tests for auth backend that supports object level permissions
"""
backend = 'django.contrib.auth.tests.test_auth_backends.SimpleRowlevelBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = tuple(self.curr_auth) + (self.backend,)
self.user1 = User.objects.create_user('test', '[email protected]', 'test')
self.user2 = User.objects.create_user('test2', '[email protected]', 'test')
self.user3 = User.objects.create_user('test3', '[email protected]', 'test')
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
# The get_group_permissions test messes with ContentTypes, which will
# be cached; flush the cache to ensure there are no side effects
# Refs #14975, #14925
ContentType.objects.clear_cache()
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user2.has_perm('perm', TestObj()), True)
self.assertEqual(self.user2.has_perm('perm'), False)
self.assertEqual(self.user2.has_perms(['simple', 'advanced'], TestObj()), True)
self.assertEqual(self.user3.has_perm('perm', TestObj()), False)
self.assertEqual(self.user3.has_perm('anon', TestObj()), False)
self.assertEqual(self.user3.has_perms(['simple', 'advanced'], TestObj()), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), set(['simple']))
self.assertEqual(self.user2.get_all_permissions(TestObj()), set(['simple', 'advanced']))
self.assertEqual(self.user2.get_all_permissions(), set([]))
def test_get_group_permissions(self):
group = Group.objects.create(name='test_group')
self.user3.groups.add(group)
self.assertEqual(self.user3.get_group_permissions(TestObj()), set(['group_perm']))
class AnonymousUserBackendTest(TestCase):
"""
Tests for AnonymousUser delegating to backend.
"""
backend = 'django.contrib.auth.tests.test_auth_backends.SimpleRowlevelBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = (self.backend,)
self.user1 = AnonymousUser()
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user1.has_perm('anon', TestObj()), True)
def test_has_perms(self):
self.assertEqual(self.user1.has_perms(['anon'], TestObj()), True)
self.assertEqual(self.user1.has_perms(['anon', 'perm'], TestObj()), False)
def test_has_module_perms(self):
self.assertEqual(self.user1.has_module_perms("app1"), True)
self.assertEqual(self.user1.has_module_perms("app2"), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), set(['anon']))
@skipIfCustomUser
@override_settings(AUTHENTICATION_BACKENDS=[])
class NoBackendsTest(TestCase):
"""
Tests that an appropriate error is raised if no auth backends are provided.
"""
def setUp(self):
self.user = User.objects.create_user('test', '[email protected]', 'test')
def test_raises_exception(self):
self.assertRaises(ImproperlyConfigured, self.user.has_perm, ('perm', TestObj(),))
@skipIfCustomUser
class InActiveUserBackendTest(TestCase):
"""
Tests for a inactive user
"""
backend = 'django.contrib.auth.tests.test_auth_backends.SimpleRowlevelBackend'
def setUp(self):
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.AUTHENTICATION_BACKENDS = (self.backend,)
self.user1 = User.objects.create_user('test', '[email protected]', 'test')
self.user1.is_active = False
self.user1.save()
def tearDown(self):
settings.AUTHENTICATION_BACKENDS = self.curr_auth
def test_has_perm(self):
self.assertEqual(self.user1.has_perm('perm', TestObj()), False)
self.assertEqual(self.user1.has_perm('inactive', TestObj()), True)
def test_has_module_perms(self):
self.assertEqual(self.user1.has_module_perms("app1"), False)
self.assertEqual(self.user1.has_module_perms("app2"), False)
class PermissionDeniedBackend(object):
"""
Always raises PermissionDenied.
"""
supports_object_permissions = True
supports_anonymous_user = True
supports_inactive_user = True
def authenticate(self, username=None, password=None):
raise PermissionDenied
@skipIfCustomUser
class PermissionDeniedBackendTest(TestCase):
"""
Tests that other backends are not checked once a backend raises PermissionDenied
"""
backend = 'django.contrib.auth.tests.test_auth_backends.PermissionDeniedBackend'
def setUp(self):
self.user1 = User.objects.create_user('test', '[email protected]', 'test')
self.user1.save()
@override_settings(AUTHENTICATION_BACKENDS=(backend, ) +
tuple(settings.AUTHENTICATION_BACKENDS))
def test_permission_denied(self):
"user is not authenticated after a backend raises permission denied #2550"
self.assertEqual(authenticate(username='test', password='test'), None)
@override_settings(AUTHENTICATION_BACKENDS=tuple(
settings.AUTHENTICATION_BACKENDS) + (backend, ))
def test_authenticates(self):
self.assertEqual(authenticate(username='test', password='test'), self.user1)
class NewModelBackend(ModelBackend):
pass
@skipIfCustomUser
class ChangedBackendSettingsTest(TestCase):
"""
Tests for changes in the settings.AUTHENTICATION_BACKENDS
"""
backend = 'django.contrib.auth.tests.test_auth_backends.NewModelBackend'
TEST_USERNAME = 'test_user'
TEST_PASSWORD = 'test_password'
TEST_EMAIL = '[email protected]'
def setUp(self):
User.objects.create_user(self.TEST_USERNAME,
self.TEST_EMAIL,
self.TEST_PASSWORD)
@override_settings(AUTHENTICATION_BACKENDS=(backend, ))
def test_changed_backend_settings(self):
"""
Tests that removing a backend configured in AUTHENTICATION_BACKENDS
make already logged-in users disconnect.
"""
# Get a session for the test user
self.assertTrue(self.client.login(
username=self.TEST_USERNAME,
password=self.TEST_PASSWORD)
)
# Prepare a request object
request = HttpRequest()
request.session = self.client.session
# Remove NewModelBackend
with self.settings(AUTHENTICATION_BACKENDS=(
'django.contrib.auth.backends.ModelBackend',)):
# Get the user from the request
user = get_user(request)
# Assert that the user retrieval is successful and the user is
# anonymous as the backend is not longer available.
self.assertIsNotNone(user)
self.assertTrue(user.is_anonymous())
@skipIfCustomUser
class ImproperlyConfiguredUserModelTest(TestCase):
"""
Tests that an exception from within get_user_model is propagated and doesn't
raise an UnboundLocalError.
Regression test for ticket #21439
"""
def setUp(self):
self.user1 = User.objects.create_user('test', '[email protected]', 'test')
self.client.login(
username='test',
password='test'
)
@override_settings(AUTH_USER_MODEL='thismodel.doesntexist')
def test_does_not_shadow_exception(self):
# Prepare a request object
request = HttpRequest()
request.session = self.client.session
self.assertRaises(ImproperlyConfigured, get_user, request)
| cc0-1.0 |
kunaltyagi/nsiqcppstyle | nsiqcppstyle.py | 1 | 2624 | #!/usr/bin/env python
#
# Copyright (c) 2009 NHN Inc. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of NHN Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import os
import csv # @UnusedImport
import urllib # @UnusedImport
try:
import hashlib # @UnusedImport
except ImportError:
import md5 # @UnusedImport
import unittest # @UnusedImport
import platform # @UnusedImport
import sre_compile # @UnusedImport
import shutil # @UnusedImport
def WeAreFrozen():
return hasattr(sys, "frozen")
def ModulePath():
if WeAreFrozen():
return os.path.dirname(
unicode(sys.executable, sys.getfilesystemencoding()))
return os.path.dirname(unicode(__file__, sys.getfilesystemencoding()))
def GetRuntimePath():
"Return the path of this tool"
if (sys.platform == "win32"):
runtimePath = ModulePath()
else:
modename = globals()['__name__']
module = sys.modules[modename]
runtimePath = os.path.dirname(module.__file__)
return runtimePath
if __name__ == "__main__":
sys.path.append(GetRuntimePath())
module = __import__("nsiqcppstyle_exe")
sys.exit(module.main())
| gpl-2.0 |
dmsurti/mayavi | mayavi/sources/chaco_reader.py | 3 | 2647 | """A Chaco file reader.
"""
# Author: Suyog Dutt Jain <[email protected]>
# Copyright (c) 2009, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance, Str
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
# Local imports.
from mayavi.core.source import Source
from mayavi.core.pipeline_info import PipelineInfo
########################################################################
# `ChacoReader` class
########################################################################
class ChacoReader(Source):
"""A Chaco reader.
"""
# The version of this class. Used for persistence.
__version__ = 0
base_name = Str('', desc='basename of the Chaco files')
# The VTK data file reader.
reader = Instance(tvtk.ChacoReader, args=(), allow_none=False,
record=True)
# Information about what this object can produce.
output_info = PipelineInfo(datasets=['unstructured_grid'])
########################################
# View related code.
# Our view.
view = View(Group(Item(name='reader', style='custom',
resizable=True),
show_labels=False),
resizable=True)
######################################################################
# `FileDataSource` interface
######################################################################
def __init__(self, base_name='', configure=True, **traits):
super(ChacoReader, self).__init__(**traits)
if configure:
self.reader.edit_traits(kind='livemodal')
self.base_name = self.reader.base_name
def update(self):
if len(self.base_name) == 0:
return
self.reader.update()
self.render()
def has_output_port(self):
""" Return True as the reader has output port."""
return True
def get_output_object(self):
""" Return the reader output port."""
return self.reader.output_port
######################################################################
# Non-public interface
######################################################################
def _base_name_changed(self, value):
if len(value) == 0:
return
else:
self.reader.base_name = value
self._update_reader_output()
def _update_reader_output(self):
self.reader.update()
self.reader.update_information()
self.reader.on_trait_change(self.render)
self.outputs = [self.reader.output]
self.data_changed = True
| bsd-3-clause |
ZaraSeries/repo | script.module.urlresolver/lib/urlresolver/plugins/streaminto.py | 3 | 2808 | """
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json
from lib import helpers
from urlresolver import common
from urlresolver.common import i18n
from urlresolver.resolver import UrlResolver, ResolverError
class StreamintoResolver(UrlResolver):
name = "streaminto"
domains = ["streamin.to"]
pattern = '(?://|\.)(streamin\.to)/(?:embed-|)?([0-9A-Za-z]+)'
def __init__(self):
self.net = common.Net()
self.headers = {'User-Agent': common.SMU_USER_AGENT}
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'Referer': web_url}
headers.update(self.headers)
html = self.net.http_GET(web_url, headers=headers).content
sources = helpers.scrape_sources(html, patterns=["""file:\s*["'](?P<url>[^"']+)"""])
if sources:
auth = self.__check_auth(media_id)
if not auth:
auth = self.__auth_ip(media_id)
if auth:
return helpers.pick_source(sources) + helpers.append_headers(headers)
else:
raise ResolverError(i18n('no_ip_authorization'))
else:
raise ResolverError('Unable to locate links')
def __auth_ip(self, media_id):
header = i18n('stream_auth_header')
line1 = i18n('auth_required')
line2 = i18n('visit_link')
line3 = i18n('click_pair') % ('http://api.streamin.to/pair')
with common.kodi.CountdownDialog(header, line1, line2, line3) as cd:
return cd.start(self.__check_auth, [media_id])
def __check_auth(self, media_id):
common.logger.log('Checking Auth: %s' % (media_id))
url = 'http://api.streamin.to/pair/check.php'
try: js_result = json.loads(self.net.http_GET(url, headers=self.headers).content)
except ValueError: raise ResolverError('Unusable Authorization Response')
common.logger.log('Auth Result: %s' % (js_result))
return js_result.get('status') == 200
def get_url(self, host, media_id):
return self._default_get_url(host, media_id)
| gpl-2.0 |
arunlodhi/pymc3 | pymc3/interactive_sampling.py | 10 | 4699 | try:
__IPYTHON__
import IPython
import ipywidgets as widgets
from IPython.core import display
from traitlets import Unicode, Integer, Float
import json
from numpy.random import seed
import time
from .backends.base import MultiTrace
from .sampling import _iter_sample
except (NameError, ImportError):
IPython = False
_no_notebook_error_message = "nbsample can only be run inside IPython Notebook."
if IPython:
__all__ = ['nbsample']
_javascript = """<script type="text/javascript">
require(["widgets/js/widget"], function(WidgetManager){
var ISampleWidget = IPython.WidgetView.extend({
render: function(){
var html = $("<table style='width:100%;'><tr><td style='width:60px'><button>Stop</button></td>"+
"<td class='pymc3-clock' style='width:60px'></td>"+
"<td class='pymc3-progress'>"+
"<div class='bar' style='width:0px; height: 20px; "+
"background-image: linear-gradient(to bottom, #dddddd 0%,#111111 100%)"+
"'> </div></td>"+
"<td class='pymc3-current-samples' style='width:60px;'>0</td>"+
"<td style='width:10px;'>/</td>"+
"<td style='width:60px;' class='pymc3-max-samples'></td>"+
"</tr>"+
"</table>");
this.setElement(html);
this.$el.find("button").click($.proxy(function(){
this.send("stop","stop");
this.$el.find("button").attr("disabled", "disabled");
}, this));
this.model.on('change:max_samples', function(model, value){
this.$el.find(".pymc3-max-samples").text(value);
}, this);
this.model.on('change:clock', function(model, value){
this.$el.find(".pymc3-clock").text(value);
}, this);
this.model.on('change:current_samples', function(model, value){
this.$el.find(".pymc3-current-samples").text(value);
var total_width = this.$el.find(".pymc3-progress").width()-5;
var total_samples = this.model.get("max_samples");
var width = value * total_width / total_samples;
this.$el.find(".pymc3-progress .bar").width(width)
}, this);
}
});
WidgetManager.register_widget_view('ISampleWidget', ISampleWidget)
});
</script>
"""
class ISampleWidget(widgets.DOMWidget):
_view_name = Unicode('ISampleWidget', sync=True)
current_samples = Integer(sync=True)
max_samples = Integer(sync=True)
clock = Unicode(sync=True)
def __init__(self, *args, **kwargs):
widgets.DOMWidget.__init__(self,*args, **kwargs)
self.iteration = 0
self.on_msg(self._handle_custom_msg)
self.send_state()
self.stopped = False
def _handle_custom_msg(self, message):
if message == "stop":
self.stopped = True
def nbsample(draws, step, start=None, trace=None, chain=0, tune=None, model=None, random_seed=None):
try:
assert(hasattr(IPython.get_ipython(), 'comm_manager'))
except (AssertionError, NameError, KeyError) as e:
raise NotImplementedError(_no_notebook_error_message)
display.display_html(_javascript, raw=True)
w = ISampleWidget()
display.display(w)
t_start = time.time()
t_last = time.time()
w.max_samples = draws
w.current_samples = 0
sampling = _iter_sample(draws, step, start=start, trace=trace,
chain=chain, tune=tune, model=model,
random_seed=random_seed)
for i, trace in enumerate(sampling, 1):
elapsed = time.time() - t_start
elapsed_last = time.time() - t_last
if elapsed_last > 0.1:
t_last = time.time()
w.current_samples = i
w.clock = "%02i:%02i:%02i" % (elapsed / 60 / 60, elapsed / 60 % 60, elapsed % 60)
get_ipython().kernel.do_one_iteration()
if w.stopped:
trace.close()
break
w.current_samples = i
return MultiTrace([trace])
else:
def nbsample(*args, **kwargs):
raise NotImplemented(_no_notebook_error_message)
| apache-2.0 |
dnozay/lettuce | tests/integration/lib/Django-1.3/django/core/servers/fastcgi.py | 289 | 6402 | """
FastCGI (or SCGI, or AJP1.3 ...) server that implements the WSGI protocol.
Uses the flup python package: http://www.saddi.com/software/flup/
This is a adaptation of the flup package to add FastCGI server support
to run Django apps from Web servers that support the FastCGI protocol.
This module can be run standalone or from the django-admin / manage.py
scripts using the "runfcgi" directive.
Run with the extra option "help" for a list of additional options you can
pass to this server.
"""
from django.utils import importlib
import sys, os
__version__ = "0.1"
__all__ = ["runfastcgi"]
FASTCGI_HELP = r"""
Run this project as a fastcgi (or some other protocol supported
by flup) application. To do this, the flup package from
http://www.saddi.com/software/flup/ is required.
runfcgi [options] [fcgi settings]
Optional Fcgi settings: (setting=value)
protocol=PROTOCOL fcgi, scgi, ajp, ... (default fcgi)
host=HOSTNAME hostname to listen on.
port=PORTNUM port to listen on.
socket=FILE UNIX socket to listen on.
method=IMPL prefork or threaded (default prefork).
maxrequests=NUMBER number of requests a child handles before it is
killed and a new child is forked (0 = no limit).
maxspare=NUMBER max number of spare processes / threads.
minspare=NUMBER min number of spare processes / threads.
maxchildren=NUMBER hard limit number of processes / threads.
daemonize=BOOL whether to detach from terminal.
pidfile=FILE write the spawned process-id to this file.
workdir=DIRECTORY change to this directory when daemonizing.
debug=BOOL set to true to enable flup tracebacks.
outlog=FILE write stdout to this file.
errlog=FILE write stderr to this file.
umask=UMASK umask to use when daemonizing, in octal notation (default 022).
Examples:
Run a "standard" fastcgi process on a file-descriptor
(for Web servers which spawn your processes for you)
$ manage.py runfcgi method=threaded
Run a scgi server on a TCP host/port
$ manage.py runfcgi protocol=scgi method=prefork host=127.0.0.1 port=8025
Run a fastcgi server on a UNIX domain socket (posix platforms only)
$ manage.py runfcgi method=prefork socket=/tmp/fcgi.sock
Run a fastCGI as a daemon and write the spawned PID in a file
$ manage.py runfcgi socket=/tmp/fcgi.sock method=prefork \
daemonize=true pidfile=/var/run/django-fcgi.pid
"""
FASTCGI_OPTIONS = {
'protocol': 'fcgi',
'host': None,
'port': None,
'socket': None,
'method': 'fork',
'daemonize': None,
'workdir': '/',
'pidfile': None,
'maxspare': 5,
'minspare': 2,
'maxchildren': 50,
'maxrequests': 0,
'debug': None,
'outlog': None,
'errlog': None,
'umask': None,
}
def fastcgi_help(message=None):
print FASTCGI_HELP
if message:
print message
return False
def runfastcgi(argset=[], **kwargs):
options = FASTCGI_OPTIONS.copy()
options.update(kwargs)
for x in argset:
if "=" in x:
k, v = x.split('=', 1)
else:
k, v = x, True
options[k.lower()] = v
if "help" in options:
return fastcgi_help()
try:
import flup
except ImportError, e:
print >> sys.stderr, "ERROR: %s" % e
print >> sys.stderr, " Unable to load the flup package. In order to run django"
print >> sys.stderr, " as a FastCGI application, you will need to get flup from"
print >> sys.stderr, " http://www.saddi.com/software/flup/ If you've already"
print >> sys.stderr, " installed flup, then make sure you have it in your PYTHONPATH."
return False
flup_module = 'server.' + options['protocol']
if options['method'] in ('prefork', 'fork'):
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxChildren': int(options["maxchildren"]),
'maxRequests': int(options["maxrequests"]),
}
flup_module += '_fork'
elif options['method'] in ('thread', 'threaded'):
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxThreads': int(options["maxchildren"]),
}
else:
return fastcgi_help("ERROR: Implementation must be one of prefork or thread.")
wsgi_opts['debug'] = options['debug'] is not None
try:
module = importlib.import_module('.%s' % flup_module, 'flup')
WSGIServer = module.WSGIServer
except:
print "Can't import flup." + flup_module
return False
# Prep up and go
from django.core.handlers.wsgi import WSGIHandler
if options["host"] and options["port"] and not options["socket"]:
wsgi_opts['bindAddress'] = (options["host"], int(options["port"]))
elif options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = options["socket"]
elif not options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = None
else:
return fastcgi_help("Invalid combination of host, port, socket.")
if options["daemonize"] is None:
# Default to daemonizing if we're running on a socket/named pipe.
daemonize = (wsgi_opts['bindAddress'] is not None)
else:
if options["daemonize"].lower() in ('true', 'yes', 't'):
daemonize = True
elif options["daemonize"].lower() in ('false', 'no', 'f'):
daemonize = False
else:
return fastcgi_help("ERROR: Invalid option for daemonize parameter.")
daemon_kwargs = {}
if options['outlog']:
daemon_kwargs['out_log'] = options['outlog']
if options['errlog']:
daemon_kwargs['err_log'] = options['errlog']
if options['umask']:
daemon_kwargs['umask'] = int(options['umask'], 8)
if daemonize:
from django.utils.daemonize import become_daemon
become_daemon(our_home_dir=options["workdir"], **daemon_kwargs)
if options["pidfile"]:
fp = open(options["pidfile"], "w")
fp.write("%d\n" % os.getpid())
fp.close()
WSGIServer(WSGIHandler(), **wsgi_opts).run()
if __name__ == '__main__':
runfastcgi(sys.argv[1:])
| gpl-3.0 |
gdgellatly/OCB1 | addons/delivery/partner.py | 57 | 1474 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'property_delivery_carrier': fields.property(
'delivery.carrier',
type='many2one',
relation='delivery.carrier',
string="Delivery Method",
view_load=True,
help="This delivery method will be used when invoicing from picking."),
}
res_partner()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
swilcox/pinax-blog | pinax/blog/admin.py | 3 | 2677 | from django.contrib import admin
from django.utils import timezone
from django.utils.functional import curry
from django.utils.translation import ugettext_lazy as _
from .forms import AdminPostForm
from .models import Post, Image, ReviewComment, Section
from .utils import can_tweet
class ImageInline(admin.TabularInline):
model = Image
fields = ["image_path"]
class ReviewInline(admin.TabularInline):
model = ReviewComment
def make_published(modeladmin, request, queryset):
queryset = queryset.exclude(state=Post.STATE_CHOICES[-1][0], published__isnull=False)
queryset.update(state=Post.STATE_CHOICES[-1][0])
queryset.filter(published__isnull=True).update(published=timezone.now())
make_published.short_description = _("Publish selected posts")
class PostAdmin(admin.ModelAdmin):
list_display = ["title", "state", "section", "published", "show_secret_share_url"]
list_filter = ["section", "state"]
form = AdminPostForm
actions = [make_published]
fields = [
"section",
"title",
"slug",
"author",
"markup",
"teaser",
"content",
"description",
"primary_image",
"sharable_url",
"state"
]
readonly_fields = ["sharable_url"]
if can_tweet():
fields.append("tweet")
prepopulated_fields = {"slug": ("title",)}
inlines = [
ImageInline,
ReviewInline,
]
def show_secret_share_url(self, obj):
return '<a href="%s">%s</a>' % (obj.sharable_url, obj.sharable_url)
show_secret_share_url.short_description = _("Share this url")
show_secret_share_url.allow_tags = True
def formfield_for_dbfield(self, db_field, **kwargs):
request = kwargs.get("request")
if db_field.name == "author":
ff = super(PostAdmin, self).formfield_for_dbfield(db_field, **kwargs)
ff.initial = request.user.id
return ff
return super(PostAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def get_form(self, request, obj=None, **kwargs):
kwargs.update({
"formfield_callback": curry(self.formfield_for_dbfield, request=request),
})
return super(PostAdmin, self).get_form(request, obj, **kwargs)
def save_form(self, request, form, change):
# this is done for explicitness that we want form.save to commit
# form.save doesn't take a commit kwarg for this reason
return form.save()
class SectionAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
admin.site.register(Post, PostAdmin)
admin.site.register(Image)
admin.site.register(Section, SectionAdmin)
| mit |
gengjiawen/AndroidHelper | aar_util/aar_util.py | 1 | 1841 | import os
import shutil
import tempfile
import zipfile
from utils.file_util import get_files_by_re, gen_new_file_extension
def get_aar_files(proj_dir, des_dir):
rel_aar_dir = r"build\outputs\aar"
aar_dirs = [os.path.join(proj_dir, i) for i in os.listdir(proj_dir) if os.path.isdir(os.path.join(proj_dir, i))]
aar_dirs = [os.path.join(i, rel_aar_dir) for i in aar_dirs if os.path.exists(os.path.join(i, rel_aar_dir))]
for i in aar_dirs:
file = os.listdir(i)[0]
debug_aar = os.path.join(i, file)
print(debug_aar)
os.makedirs(des_dir, exist_ok=True)
shutil.copyfile(debug_aar, os.path.join(des_dir, file))
def using_local_aar(aar_dir):
# http://stackoverflow.com/a/24894387/1713757
# or you can just do it in android studio ui
s = 'configurations.maybeCreate("default")'
for i in os.listdir(aar_dir):
if i.endswith("aar"):
print("aar:", i)
t = "artifacts.add(\"default\", file('{}'))\n".format(i)
s += t
print(s)
build_script = os.path.join(aar_dir, "build.gradle")
open(build_script, mode='w', encoding='utf-8').write(s)
aar_module_name = os.path.basename(aar_dir)
print("add this to setting.gradle: ")
print("include ':{}'".format(aar_module_name))
print("\nadd this to mudule using aars: ")
print("compile project(':{}')".format(aar_module_name))
def extract_aar2jar(aar_dir):
aar_files = get_files_by_re(aar_dir, ".*aar")
for i in aar_files:
jar_name = gen_new_file_extension(i, "jar")
with zipfile.ZipFile(i, "r") as z:
temp_dir = tempfile.mkdtemp()
z.extract("classes.jar", temp_dir)
if os.path.exists(jar_name):
os.remove(jar_name)
shutil.move(os.path.join(temp_dir, "classes.jar"), jar_name)
| mit |
SysTheron/adhocracy | src/adhocracy/websetup.py | 2 | 1955 | """Setup the adhocracy application"""
import logging
import os
import os.path
import pylons
import pylons.test
from pylons import config
import migrate.versioning.api as migrateapi
try:
from migrate.versioning.exceptions import DatabaseAlreadyControlledError
from migrate.versioning.exceptions import DatabaseNotControlledError
except ImportError:
# location changed in 0.6.1
from migrate.exceptions import DatabaseAlreadyControlledError
from migrate.exceptions import DatabaseNotControlledError
from adhocracy.config.environment import load_environment
from adhocracy.lib import install
from adhocracy.model import meta
log = logging.getLogger(__name__)
def setup_app(command, conf, vars):
"""Place any commands to setup adhocracy here"""
if not pylons.test.pylonsapp:
conf = load_environment(conf.global_conf, conf.local_conf, with_db=False)
_setup(conf)
def _setup(config):
# disable delayed execution
# config['adhocracy.amqp.host'] = None
# FIXME: still do this with rq instead of rabbitmq
# NOTE: this is called from tests so it may have side effects
# Create the tables if they don't already exist
url = config.get('sqlalchemy.url')
migrate_repo = os.path.join(os.path.dirname(__file__), 'migration')
repo_version = migrateapi.version(migrate_repo)
if config.get('adhocracy.setup.drop', "OH_NOES") == "KILL_EM_ALL":
meta.data.drop_all(bind=meta.engine)
meta.engine.execute("DROP TABLE IF EXISTS migrate_version")
try:
db_version = migrateapi.db_version(url, migrate_repo)
if db_version < repo_version:
migrateapi.upgrade(url, migrate_repo)
initial_setup = False
except DatabaseNotControlledError:
meta.data.create_all(bind=meta.engine)
migrateapi.version_control(url, migrate_repo, version=repo_version)
initial_setup = True
install.setup_entities(config, initial_setup)
| agpl-3.0 |
JFriel/honours_project | venv/lib/python2.7/site-packages/numpy/fft/tests/test_helper.py | 117 | 2556 | #!/usr/bin/env python
"""Test functions for fftpack.helper module
Copied from fftpack.helper by Pearu Peterson, October 2005
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal
from numpy import fft
from numpy import pi
class TestFFTShift(TestCase):
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
assert_array_almost_equal(fft.fftshift(x), y)
assert_array_almost_equal(fft.ifftshift(y), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]
assert_array_almost_equal(fft.fftshift(x), y)
assert_array_almost_equal(fft.ifftshift(y), x)
def test_inverse(self):
for n in [1, 4, 9, 100, 211]:
x = np.random.random((n,))
assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x)
def test_axes_keyword(self):
freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]
shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted)
assert_array_almost_equal(fft.fftshift(freqs, axes=0),
fft.fftshift(freqs, axes=(0,)))
assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs)
assert_array_almost_equal(fft.ifftshift(shifted, axes=0),
fft.ifftshift(shifted, axes=(0,)))
class TestFFTFreq(TestCase):
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
assert_array_almost_equal(9*fft.fftfreq(9), x)
assert_array_almost_equal(9*pi*fft.fftfreq(9, pi), x)
x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]
assert_array_almost_equal(10*fft.fftfreq(10), x)
assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x)
class TestRFFTFreq(TestCase):
def test_definition(self):
x = [0, 1, 2, 3, 4]
assert_array_almost_equal(9*fft.rfftfreq(9), x)
assert_array_almost_equal(9*pi*fft.rfftfreq(9, pi), x)
x = [0, 1, 2, 3, 4, 5]
assert_array_almost_equal(10*fft.rfftfreq(10), x)
assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x)
class TestIRFFTN(TestCase):
def test_not_last_axis_success(self):
ar, ai = np.random.random((2, 16, 8, 32))
a = ar + 1j*ai
axes = (-2,)
# Should not raise error
fft.irfftn(a, axes=axes)
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
slundberg/Mocha.jl | docs/conf.py | 23 | 8570 | # -*- coding: utf-8 -*-
#
# Mocha documentation build configuration file, created by
# sphinx-quickstart on Thu Nov 13 00:43:40 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath("sphinx"))
import julia
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
'julia'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Mocha'
copyright = u'2014, pluskid'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.9'
# The full version, including alpha/beta/rc tags.
release = '0.0.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
primary_domain = 'jl'
highlight_language = 'julia'
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin theme
html_theme = 'default'
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except:
pass
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Mochadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Mocha.tex', u'Mocha Documentation',
u'pluskid', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mocha', u'Mocha Documentation',
[u'pluskid'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Mocha', u'Mocha Documentation',
u'pluskid', 'Mocha', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
waymarkedtrails/waymarked-trails-site | db/tables/route_nodes.py | 2 | 3914 | # This file is part of the Waymarked Trails Map Project
# Copyright (C) 2015 Sarah Hoffmann
#
# This is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
""" Various tables for nodes in a route network.
"""
from re import compile as re_compile
import sqlalchemy as sa
from geoalchemy2 import Geometry
from osgende.generic import TransformedTable
from osgende.common.tags import TagStore
from db.configs import GuidePostConfig, NetworkNodeConfig
from db import conf
GUIDEPOST_CONF = conf.get('GUIDEPOSTS', GuidePostConfig)
class GuidePosts(TransformedTable):
""" Information about guide posts. """
elepattern = re_compile('[\\d.]+')
def __init__(self, meta, source, updates):
self.srid = meta.info.get('srid', source.c.geom.type.srid)
super().__init__(meta, GUIDEPOST_CONF.table_name, source)
self.updates = updates
def add_columns(self, table, src):
table.append_column(sa.Column('name', sa.String))
table.append_column(sa.Column('ele', sa.String))
table.append_column(sa.Column('geom', Geometry('POINT', srid=self.srid)))
def before_update(self, engine):
# save all added guideposts
sql = sa.except_(sa.select([self.src.c.geom.ST_Transform(self.srid)])
.where(self.src.c.id.in_(self.src.select_add_modify())),
sa.select([self.c.geom])
.where(self.c.id.in_(self.src.select_add_modify())))
self.updates.add_from_select(engine, sql)
def transform(self, obj):
tags = TagStore(obj['tags'])
# filter by subtype
if GUIDEPOST_CONF.subtype is not None:
booltags = tags.get_booleans()
if len(booltags) > 0:
if not booltags.get(GUIDEPOST_CONF.subtype, False):
return None
else:
if GUIDEPOST_CONF.require_subtype:
return None
outtags = { 'name' : tags.get('name'), 'ele' : None }
if 'ele'in tags:
m = self.elepattern.search(tags['ele'])
if m:
outtags['ele'] = m.group(0)
# XXX check for ft
if self.srid == self.src.c.geom.type.srid:
outtags['geom'] = obj['geom']
else:
outtags['geom'] = obj['geom'].ST_Transform(self.srid)
return outtags
NETWORKNODE_CONF = conf.get('NETWORKNODES', NetworkNodeConfig)
class NetworkNodes(TransformedTable):
""" Information about referenced nodes in a route network.
"""
def __init__(self, meta, source):
self.srid = meta.info.get('srid', source.c.geom.type.srid)
super().__init__(meta, NETWORKNODE_CONF.table_name, source)
def add_columns(self, table, src):
table.append_column(sa.Column('name', sa.String))
table.append_column(sa.Column('geom', Geometry('POINT', srid=self.srid)))
def transform(self, obj):
tags = TagStore(obj['tags'])
if NETWORKNODE_CONF.node_tag not in tags:
return None
outtags = { 'name' : tags[NETWORKNODE_CONF.node_tag] }
if self.srid == self.src.c.geom.type.srid:
outtags['geom'] = obj['geom']
else:
outtags['geom'] = obj['geom'].ST_Transform(self.srid)
return outtags
| gpl-3.0 |
Yawning/or-applet | orapplet/status_icon.py | 1 | 12289 | #!/usr/bin/env python2
# This file is part of or-applet.
#
# or-applet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# or-applet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with or-applet. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen
from gi.repository import Gtk,Gdk
from stem import CircBuildFlag,CircClosureReason,CircPurpose,CircStatus,HiddenServiceState
from orapplet.utils import get_leek_icon
def _pos(menu, x, y, icon):
print(x)
print(y)
print(icon)
return (Gtk.StatusIcon.position_menu(menu, x, y, icon))
def _format_circuit(circuit):
s = 'Circuit: ' + circuit.id + '\n'
s += 'Created: ' + str(circuit.created) + '\n'
s += 'Status: ' + _format_status(circuit.status) + '\n'
s += 'Purpose: ' + _format_purpose(circuit.purpose) + '\n'
s += 'Flags: \n' + _format_build_flags(circuit.build_flags)
if circuit.hs_state is not None:
s += 'HS State: ' + _format_hs_state(circuit.hs_state) + '\n'
if circuit.path is not None and circuit.path:
s += 'Path:\n'
s += _format_path(circuit.path)
if circuit.reason is not None:
s += 'Local Close Reason: ' + _format_close_reason(circuit.reason)
if circuit.remote_reason is not None:
s += 'Remote Close Reason: ' + _format_close_reason(circuit.remote_reason)
return s
_FORMAT_STATUSES = {
CircStatus.LAUNCHED: 'LAUNCHED (circuit ID assigned to new circuit)',
CircStatus.BUILT: 'BUILT (all hops finished, can now accept streams)',
CircStatus.EXTENDED: 'EXTENDED (one more hop has been completed)',
CircStatus.FAILED: 'FAILED (circuit closed (was not built))',
CircStatus.CLOSED: 'CLOSED (circuit closed (was built))'
}
def _format_status(status):
return _FORMAT_STATUSES.get(status, str(status))
_FORMAT_PURPOSES = {
CircPurpose.GENERAL: 'GENERAL (circuit for AP and/or directory request streams)',
CircPurpose.HS_CLIENT_INTRO: 'HS_CLIENT_INTRO (HS client-side introduction-point circuit)',
CircPurpose.HS_CLIENT_REND: 'HS_CLIENT_REND (HS client-side rendezvous circuit; carries AP streams)',
CircPurpose.HS_SERVICE_INTRO: 'HS_SERVICE_INTRO (HS service-side introduction-point circuit)',
CircPurpose.HS_SERVICE_REND: 'HS_SERVICE_REND (HS service-side rendezvous circuit)',
CircPurpose.TESTING: 'TESTING (reachability-testing circuit; carries no traffic)',
CircPurpose.CONTROLLER: 'CONTROLLER (circuit built by a controller)',
CircPurpose.MEASURE_TIMEOUT: 'MEASURE_TIMEOUT (circuit being kept around to see how long it takes)'
}
_HS_PURPOSES = [
CircPurpose.HS_CLIENT_INTRO,
CircPurpose.HS_CLIENT_REND,
CircPurpose.HS_SERVICE_INTRO,
CircPurpose.HS_SERVICE_REND
]
def _filter_circuit(circuit):
if CircBuildFlag.IS_INTERNAL in circuit.build_flags:
return circuit.purpose in _HS_PURPOSES
return True
def _format_purpose(purpose):
return _FORMAT_PURPOSES.get(purpose, str(purpose))
_FORMAT_FLAGS = {
CircBuildFlag.ONEHOP_TUNNEL: 'ONEHOP_TUNNEL (one-hop circuit, used for tunneled directory conns)',
CircBuildFlag.IS_INTERNAL: 'IS_INTERNAL (internal circuit, not to be used for exiting streams)',
CircBuildFlag.NEED_CAPACITY: 'NEED_CAPACITY (this circuit must use only high-capacity nodes)',
CircBuildFlag.NEED_UPTIME: 'NEED_UPTIME (this circuit must use only high-uptime nodes)'
}
def _format_build_flags(flags):
s_list = []
for flag in flags:
s_list.append(' %s\n' % _FORMAT_FLAGS.get(flag, str(flag)))
return ''.join(s_list)
def _format_path(path):
s_list = []
idx = 0
for hop in path:
s_list.append(' [%d]: %s~%s\n' % (idx, hop[0], hop[1]))
idx += 1
return ''.join(s_list)
_FORMAT_HS_STATE = {
HiddenServiceState.HSCI_CONNECTING: 'HSCI_CONNECTING (connecting to intro point)',
HiddenServiceState.HSCI_INTRO_SENT: 'HSCI_INTRO_SENT (sent INTRODUCE1; waiting for reply from IP)',
HiddenServiceState.HSCI_DONE: 'HSCI_DONE (received reply from IP relay; closing)',
HiddenServiceState.HSCR_CONNECTING: 'HSCR_CONNECTING (connecting to or waiting for reply from RP)',
HiddenServiceState.HSCR_ESTABLISHED_IDLE: 'HSCR_ESTABLISHED_IDLE (established RP; waiting for introduction)',
HiddenServiceState.HSCR_ESTABLISHED_WAITING: 'HSCR_ESTABLISHED_WAITING (introduction sent to HS; waiting for rend)',
HiddenServiceState.HSCR_JOINED: 'HSCR_JOINED (connected to HS)',
HiddenServiceState.HSSI_CONNECTING: 'HSSI_CONNECTING (connecting to intro point)',
HiddenServiceState.HSSI_ESTABLISHED: 'HSSI_ESTABLISHED (established intro point)',
HiddenServiceState.HSSR_CONNECTING: 'HSSR_CONNECTING (connecting to client\'s rend point)',
HiddenServiceState.HSSR_JOINED: 'HSSR_JOINED (connected to client\'s RP circuit)',
}
def _format_hs_state(hs_state):
return _FORMAT_HS_STATE.get(hs_state, str(hs_state))
def _format_close_reason(reason):
# Fuck it, these shouldn't show up in normal use anyway.
return str(reason)
def _format_streams(streams):
s_list = []
for stream in streams:
s_list.append(' %s\n' % stream)
return ''.join(s_list)
def _labeled_separator(label):
box = Gtk.Box()
label = Gtk.Label(label) # set_markup?
box.pack_start(Gtk.HSeparator(), True, True, 0)
box.pack_start(label, False, False, 2)
box.pack_start(Gtk.HSeparator(), True, True, 0)
item = Gtk.ImageMenuItem()
item.set_property('child', box)
item.set_sensitive(False)
return item
class PopupMenu(object):
_ctl = None
_menu = None
_status_icon = None
def __init__(self, icon):
self._ctl = icon._ctl
self._status_icon = icon
self._menu = Gtk.Menu()
item = Gtk.MenuItem('Stem Prompt')
item.connect('activate', self._on_prompt)
self._menu.append(item)
item = Gtk.MenuItem('Reload Tor Config')
item.connect('activate', self._on_reload)
self._menu.append(item)
self._menu.append(Gtk.SeparatorMenuItem())
item = Gtk.MenuItem('About')
item.connect('activate', self._on_about)
self._menu.append(item)
self._menu.show_all()
def popup(self, widget, button, time):
self._menu.popup(None, None, _pos, self._status_icon._icon, button, time)
def _on_prompt(self, widget, data=None):
Popen('/usr/bin/urxvt -e python2 -c "from stem.interpreter import main; main()"', shell=True)
def _on_reload(self, widget, data=None):
self._ctl.reload()
def _on_about(self, widget, data=None):
about_dialog = Gtk.AboutDialog()
about_dialog.set_destroy_with_parent(True)
about_dialog.set_program_name('or-applet')
about_dialog.set_copyright('Copyright 2014 Yawning Angel')
about_dialog.set_comments('A Gtk+ Tor System Tray applet.')
about_dialog.set_version('0.0.1')
about_dialog.set_authors(['Yawning Angel'])
about_dialog.set_artists(['Robin Weatherall http://www.robinweatherall.eu'])
about_dialog.run()
about_dialog.destroy()
class ActivateMenu(object):
_ctl = None
_clipboard = None
_menu = None
_status_icon = None
def __init__(self, icon):
self._ctl = icon._ctl
self._clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
self._status_icon = icon
self._menu = Gtk.Menu()
item = Gtk.MenuItem('NEWNYM')
item.set_sensitive(self._ctl.is_newnym_available())
item.connect('activate', self._on_newnym)
self._menu.append(item)
self._menu.append(Gtk.SeparatorMenuItem())
self._build_dynamic_menu()
self._menu.show_all()
def popup(self, widget, button, time):
self._menu.popup(None, None, _pos, self._status_icon._icon, button, time)
def _build_dynamic_menu(self):
circuits = self._ctl.get_circuits()
if circuits is None:
item = Gtk.MenuItem('No circuits established')
item.set_sensitive(False)
self._menu.append(item)
return
streams = self._ctl.get_streams()
for circuit in circuits:
self._build_circuit_menu(circuit, streams)
def _build_circuit_menu(self, circuit, streams):
# Skip displaying internal circuits, unless they are actually hidden
# service circuits in disguise.
if not _filter_circuit(circuit):
return
circ_info = _format_circuit(circuit)
our_auth = ""
if circuit.socks_username:
our_auth = circuit.socks_username
if circuit.socks_password:
our_auth += ':' + circuit.socks_password
our_streams = []
if CircPurpose.HS_CLIENT_INTRO in circuit.purpose or CircPurpose.HS_CLIENT_REND in circuit.purpose:
our_streams.append('[HS Client]: %s.onion' % circuit.rend_query)
elif CircPurpose.HS_SERVICE_INTRO in circuit.purpose:
our_streams.append('[HS Server Intro]: %s.onion' % circuit.rend_query)
elif CircPurpose.HS_SERVICE_REND in circuit.purpose:
our_streams.append('[HS Server Rend]: %s.onion' % circuit.rend_query)
else:
for stream in streams:
if stream.circ_id == circuit.id:
our_streams.append('[%s]: %s' % (stream.id, stream.target))
if not our_streams:
our_streams.append('No streams established')
stream_info = 'Streams:\n%s' % _format_streams(our_streams)
menu = Gtk.Menu()
stream_sep = ''
if len(our_auth) > 0:
stream_sep = 'Streams (%s)' % our_auth
else:
stream_sep = 'Streams'
menu.append(_labeled_separator(stream_sep))
for s in our_streams:
item = Gtk.MenuItem(s)
menu.append(item)
menu.append(_labeled_separator('Path'))
idx = 0
for hop in circuit.path:
item_text = '[%d]: %s~%s' % (idx, hop[0], hop[1])
item = Gtk.MenuItem(item_text)
menu.append(item)
idx += 1
menu.append(Gtk.SeparatorMenuItem())
item = Gtk.MenuItem('Copy to clipboard')
item.connect('activate', self._on_copy_circuit, circ_info + stream_info)
menu.append(item)
item = Gtk.MenuItem('Close circuit')
if CircPurpose.HS_SERVICE_INTRO not in circuit.purpose and CircPurpose.HS_SERVICE_REND not in circuit.purpose:
item.connect('activate', self._on_close_circuit, circuit.id)
else:
item.set_sensitive(False)
menu.append(item)
item = Gtk.MenuItem('Circuit: ' + circuit.id)
item.set_submenu(menu)
self._menu.append(item)
def _on_newnym(self, widget, data=None):
self._ctl.newnym()
def _on_copy_circuit(self, widget, data=None):
self._clipboard.set_text(data, -1)
def _on_close_circuit(self, widget, data=None):
self._ctl.close_circuit(data)
class OrStatusIcon(object):
_ctl = None
_icon = None
_menu_popup = None
_activate_menu = None
def __init__(self, ctl):
self._ctl = ctl
self._menu_popup = PopupMenu(self)
self._icon = Gtk.StatusIcon()
self._icon.set_from_file(get_leek_icon())
self._icon.connect('activate', self._on_activate)
self._icon.connect('popup-menu', self._menu_popup.popup)
self._icon.set_visible(True)
def set_tooltip_text(self, text):
self._icon.set_tooltip_text(text)
def pos(self, menu, icon):
return (Gtk.StatusIcon.position_menu(menu, icon))
def _on_activate(self, widget, data=None):
# Fucking python GCs the menu unless I stash it in a local.
self._activate_menu = ActivateMenu(self)
self._activate_menu.popup(self._activate_menu, 1, Gtk.get_current_event_time())
| gpl-3.0 |
deadblue/baidupan_shell | pyasn1/type/univ.py | 185 | 39731 | # ASN.1 "universal" data types
import operator, sys
from pyasn1.type import base, tag, constraint, namedtype, namedval, tagmap
from pyasn1.codec.ber import eoo
from pyasn1.compat import octets
from pyasn1 import error
# "Simple" ASN.1 types (yet incomplete)
class Integer(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x02)
)
namedValues = namedval.NamedValues()
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if namedValues is None:
self.__namedValues = self.namedValues
else:
self.__namedValues = namedValues
base.AbstractSimpleAsn1Item.__init__(
self, value, tagSet, subtypeSpec
)
def __and__(self, value): return self.clone(self._value & value)
def __rand__(self, value): return self.clone(value & self._value)
def __or__(self, value): return self.clone(self._value | value)
def __ror__(self, value): return self.clone(value | self._value)
def __xor__(self, value): return self.clone(self._value ^ value)
def __rxor__(self, value): return self.clone(value ^ self._value)
def __lshift__(self, value): return self.clone(self._value << value)
def __rshift__(self, value): return self.clone(self._value >> value)
def __add__(self, value): return self.clone(self._value + value)
def __radd__(self, value): return self.clone(value + self._value)
def __sub__(self, value): return self.clone(self._value - value)
def __rsub__(self, value): return self.clone(value - self._value)
def __mul__(self, value): return self.clone(self._value * value)
def __rmul__(self, value): return self.clone(value * self._value)
def __mod__(self, value): return self.clone(self._value % value)
def __rmod__(self, value): return self.clone(value % self._value)
def __pow__(self, value, modulo=None): return self.clone(pow(self._value, value, modulo))
def __rpow__(self, value): return self.clone(pow(value, self._value))
if sys.version_info[0] <= 2:
def __div__(self, value): return self.clone(self._value // value)
def __rdiv__(self, value): return self.clone(value // self._value)
else:
def __truediv__(self, value): return self.clone(self._value / value)
def __rtruediv__(self, value): return self.clone(value / self._value)
def __divmod__(self, value): return self.clone(self._value // value)
def __rdivmod__(self, value): return self.clone(value // self._value)
__hash__ = base.AbstractSimpleAsn1Item.__hash__
def __int__(self): return int(self._value)
if sys.version_info[0] <= 2:
def __long__(self): return long(self._value)
def __float__(self): return float(self._value)
def __abs__(self): return abs(self._value)
def __index__(self): return int(self._value)
def __lt__(self, value): return self._value < value
def __le__(self, value): return self._value <= value
def __eq__(self, value): return self._value == value
def __ne__(self, value): return self._value != value
def __gt__(self, value): return self._value > value
def __ge__(self, value): return self._value >= value
def prettyIn(self, value):
if not isinstance(value, str):
try:
return int(value)
except:
raise error.PyAsn1Error(
'Can\'t coerce %s into integer: %s' % (value, sys.exc_info()[1])
)
r = self.__namedValues.getValue(value)
if r is not None:
return r
try:
return int(value)
except:
raise error.PyAsn1Error(
'Can\'t coerce %s into integer: %s' % (value, sys.exc_info()[1])
)
def prettyOut(self, value):
r = self.__namedValues.getName(value)
return r is None and str(value) or repr(r)
def getNamedValues(self): return self.__namedValues
def clone(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if value is None and tagSet is None and subtypeSpec is None \
and namedValues is None:
return self
if value is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def subtype(self, value=None, implicitTag=None, explicitTag=None,
subtypeSpec=None, namedValues=None):
if value is None:
value = self._value
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
else:
namedValues = namedValues + self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
class Boolean(Integer):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x01),
)
subtypeSpec = Integer.subtypeSpec+constraint.SingleValueConstraint(0,1)
namedValues = Integer.namedValues.clone(('False', 0), ('True', 1))
class BitString(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x03)
)
namedValues = namedval.NamedValues()
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if namedValues is None:
self.__namedValues = self.namedValues
else:
self.__namedValues = namedValues
base.AbstractSimpleAsn1Item.__init__(
self, value, tagSet, subtypeSpec
)
def clone(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if value is None and tagSet is None and subtypeSpec is None \
and namedValues is None:
return self
if value is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def subtype(self, value=None, implicitTag=None, explicitTag=None,
subtypeSpec=None, namedValues=None):
if value is None:
value = self._value
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
else:
namedValues = namedValues + self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def __str__(self): return str(tuple(self))
# Immutable sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(operator.getitem(self._value, i))
else:
return self._value[i]
def __add__(self, value): return self.clone(self._value + value)
def __radd__(self, value): return self.clone(value + self._value)
def __mul__(self, value): return self.clone(self._value * value)
def __rmul__(self, value): return self * value
def prettyIn(self, value):
r = []
if not value:
return ()
elif isinstance(value, str):
if value[0] == '\'':
if value[-2:] == '\'B':
for v in value[1:-2]:
if v == '0':
r.append(0)
elif v == '1':
r.append(1)
else:
raise error.PyAsn1Error(
'Non-binary BIT STRING initializer %s' % (v,)
)
return tuple(r)
elif value[-2:] == '\'H':
for v in value[1:-2]:
i = 4
v = int(v, 16)
while i:
i = i - 1
r.append((v>>i)&0x01)
return tuple(r)
else:
raise error.PyAsn1Error(
'Bad BIT STRING value notation %s' % (value,)
)
else:
for i in value.split(','):
j = self.__namedValues.getValue(i)
if j is None:
raise error.PyAsn1Error(
'Unknown bit identifier \'%s\'' % (i,)
)
if j >= len(r):
r.extend([0]*(j-len(r)+1))
r[j] = 1
return tuple(r)
elif isinstance(value, (tuple, list)):
r = tuple(value)
for b in r:
if b and b != 1:
raise error.PyAsn1Error(
'Non-binary BitString initializer \'%s\'' % (r,)
)
return r
elif isinstance(value, BitString):
return tuple(value)
else:
raise error.PyAsn1Error(
'Bad BitString initializer type \'%s\'' % (value,)
)
def prettyOut(self, value):
return '\"\'%s\'B\"' % ''.join([str(x) for x in value])
class OctetString(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x04)
)
defaultBinValue = defaultHexValue = base.noValue
encoding = 'us-ascii'
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
encoding=None, binValue=None, hexValue=None):
if encoding is None:
self._encoding = self.encoding
else:
self._encoding = encoding
if binValue is not None:
value = self.fromBinaryString(binValue)
if hexValue is not None:
value = self.fromHexString(hexValue)
if value is None or value is base.noValue:
value = self.defaultHexValue
if value is None or value is base.noValue:
value = self.defaultBinValue
self.__intValue = None
base.AbstractSimpleAsn1Item.__init__(self, value, tagSet, subtypeSpec)
def clone(self, value=None, tagSet=None, subtypeSpec=None,
encoding=None, binValue=None, hexValue=None):
if value is None and tagSet is None and subtypeSpec is None and \
encoding is None and binValue is None and hexValue is None:
return self
if value is None and binValue is None and hexValue is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if encoding is None:
encoding = self._encoding
return self.__class__(
value, tagSet, subtypeSpec, encoding, binValue, hexValue
)
if sys.version_info[0] <= 2:
def prettyIn(self, value):
if isinstance(value, str):
return value
elif isinstance(value, (tuple, list)):
try:
return ''.join([ chr(x) for x in value ])
except ValueError:
raise error.PyAsn1Error(
'Bad OctetString initializer \'%s\'' % (value,)
)
else:
return str(value)
else:
def prettyIn(self, value):
if isinstance(value, bytes):
return value
elif isinstance(value, OctetString):
return value.asOctets()
elif isinstance(value, (tuple, list, map)):
try:
return bytes(value)
except ValueError:
raise error.PyAsn1Error(
'Bad OctetString initializer \'%s\'' % (value,)
)
else:
try:
return str(value).encode(self._encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
'Can\'t encode string \'%s\' with \'%s\' codec' % (value, self._encoding)
)
def fromBinaryString(self, value):
bitNo = 8; byte = 0; r = ()
for v in value:
if bitNo:
bitNo = bitNo - 1
else:
bitNo = 7
r = r + (byte,)
byte = 0
if v == '0':
v = 0
elif v == '1':
v = 1
else:
raise error.PyAsn1Error(
'Non-binary OCTET STRING initializer %s' % (v,)
)
byte = byte | (v << bitNo)
return octets.ints2octs(r + (byte,))
def fromHexString(self, value):
r = p = ()
for v in value:
if p:
r = r + (int(p+v, 16),)
p = ()
else:
p = v
if p:
r = r + (int(p+'0', 16),)
return octets.ints2octs(r)
def prettyOut(self, value):
if sys.version_info[0] <= 2:
numbers = tuple([ ord(x) for x in value ])
else:
numbers = tuple(value)
if [ x for x in numbers if x < 32 or x > 126 ]:
return '0x' + ''.join([ '%.2x' % x for x in numbers ])
else:
return str(value)
def __repr__(self):
if self._value is base.noValue:
return self.__class__.__name__ + '()'
if [ x for x in self.asNumbers() if x < 32 or x > 126 ]:
return self.__class__.__name__ + '(hexValue=\'' + ''.join([ '%.2x' % x for x in self.asNumbers() ])+'\')'
else:
return self.__class__.__name__ + '(\'' + self.prettyOut(self._value) + '\')'
if sys.version_info[0] <= 2:
def __str__(self): return str(self._value)
def __unicode__(self):
return self._value.decode(self._encoding, 'ignore')
def asOctets(self): return self._value
def asNumbers(self):
if self.__intValue is None:
self.__intValue = tuple([ ord(x) for x in self._value ])
return self.__intValue
else:
def __str__(self): return self._value.decode(self._encoding, 'ignore')
def __bytes__(self): return self._value
def asOctets(self): return self._value
def asNumbers(self):
if self.__intValue is None:
self.__intValue = tuple(self._value)
return self.__intValue
# Immutable sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(operator.getitem(self._value, i))
else:
return self._value[i]
def __add__(self, value): return self.clone(self._value + self.prettyIn(value))
def __radd__(self, value): return self.clone(self.prettyIn(value) + self._value)
def __mul__(self, value): return self.clone(self._value * value)
def __rmul__(self, value): return self * value
class Null(OctetString):
defaultValue = ''.encode() # This is tightly constrained
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x05)
)
subtypeSpec = OctetString.subtypeSpec+constraint.SingleValueConstraint(''.encode())
if sys.version_info[0] <= 2:
intTypes = (int, long)
else:
intTypes = int
class ObjectIdentifier(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x06)
)
def __add__(self, other): return self.clone(self._value + other)
def __radd__(self, other): return self.clone(other + self._value)
def asTuple(self): return self._value
# Sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(
operator.getitem(self._value, i)
)
else:
return self._value[i]
def __str__(self): return self.prettyPrint()
def index(self, suboid): return self._value.index(suboid)
def isPrefixOf(self, value):
"""Returns true if argument OID resides deeper in the OID tree"""
l = len(self)
if l <= len(value):
if self._value[:l] == value[:l]:
return 1
return 0
def prettyIn(self, value):
"""Dotted -> tuple of numerics OID converter"""
if isinstance(value, tuple):
pass
elif isinstance(value, ObjectIdentifier):
return tuple(value)
elif isinstance(value, str):
r = []
for element in [ x for x in value.split('.') if x != '' ]:
try:
r.append(int(element, 0))
except ValueError:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' %
(str(value), self.__class__.__name__, sys.exc_info()[1])
)
value = tuple(r)
else:
try:
value = tuple(value)
except TypeError:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' %
(str(value), self.__class__.__name__,sys.exc_info()[1])
)
for x in value:
if not isinstance(x, intTypes) or x < 0:
raise error.PyAsn1Error(
'Invalid sub-ID in %s at %s' % (value, self.__class__.__name__)
)
return value
def prettyOut(self, value): return '.'.join([ str(x) for x in value ])
class Real(base.AbstractSimpleAsn1Item):
try:
_plusInf = float('inf')
_minusInf = float('-inf')
_inf = (_plusInf, _minusInf)
except ValueError:
# Infinity support is platform and Python dependent
_plusInf = _minusInf = None
_inf = ()
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x09)
)
def __normalizeBase10(self, value):
m, b, e = value
while m and m % 10 == 0:
m = m / 10
e = e + 1
return m, b, e
def prettyIn(self, value):
if isinstance(value, tuple) and len(value) == 3:
for d in value:
if not isinstance(d, intTypes):
raise error.PyAsn1Error(
'Lame Real value syntax: %s' % (value,)
)
if value[1] not in (2, 10):
raise error.PyAsn1Error(
'Prohibited base for Real value: %s' % (value[1],)
)
if value[1] == 10:
value = self.__normalizeBase10(value)
return value
elif isinstance(value, intTypes):
return self.__normalizeBase10((value, 10, 0))
elif isinstance(value, float):
if self._inf and value in self._inf:
return value
else:
e = 0
while int(value) != value:
value = value * 10
e = e - 1
return self.__normalizeBase10((int(value), 10, e))
elif isinstance(value, Real):
return tuple(value)
elif isinstance(value, str): # handle infinite literal
try:
return float(value)
except ValueError:
pass
raise error.PyAsn1Error(
'Bad real value syntax: %s' % (value,)
)
def prettyOut(self, value):
if value in self._inf:
return '\'%s\'' % value
else:
return str(value)
def isPlusInfinity(self): return self._value == self._plusInf
def isMinusInfinity(self): return self._value == self._minusInf
def isInfinity(self): return self._value in self._inf
def __str__(self): return str(float(self))
def __add__(self, value): return self.clone(float(self) + value)
def __radd__(self, value): return self + value
def __mul__(self, value): return self.clone(float(self) * value)
def __rmul__(self, value): return self * value
def __sub__(self, value): return self.clone(float(self) - value)
def __rsub__(self, value): return self.clone(value - float(self))
def __mod__(self, value): return self.clone(float(self) % value)
def __rmod__(self, value): return self.clone(value % float(self))
def __pow__(self, value, modulo=None): return self.clone(pow(float(self), value, modulo))
def __rpow__(self, value): return self.clone(pow(value, float(self)))
if sys.version_info[0] <= 2:
def __div__(self, value): return self.clone(float(self) / value)
def __rdiv__(self, value): return self.clone(value / float(self))
else:
def __truediv__(self, value): return self.clone(float(self) / value)
def __rtruediv__(self, value): return self.clone(value / float(self))
def __divmod__(self, value): return self.clone(float(self) // value)
def __rdivmod__(self, value): return self.clone(value // float(self))
def __int__(self): return int(float(self))
if sys.version_info[0] <= 2:
def __long__(self): return long(float(self))
def __float__(self):
if self._value in self._inf:
return self._value
else:
return float(
self._value[0] * pow(self._value[1], self._value[2])
)
def __abs__(self): return abs(float(self))
def __lt__(self, value): return float(self) < value
def __le__(self, value): return float(self) <= value
def __eq__(self, value): return float(self) == value
def __ne__(self, value): return float(self) != value
def __gt__(self, value): return float(self) > value
def __ge__(self, value): return float(self) >= value
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(float(self))
else:
def __bool__(self): return bool(float(self))
__hash__ = base.AbstractSimpleAsn1Item.__hash__
def __getitem__(self, idx):
if self._value in self._inf:
raise error.PyAsn1Error('Invalid infinite value operation')
else:
return self._value[idx]
class Enumerated(Integer):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x0A)
)
# "Structured" ASN.1 types
class SetOf(base.AbstractConstructedAsn1Item):
componentType = None
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
)
typeId = 1
def _cloneComponentValues(self, myClone, cloneValueFlag):
idx = 0; l = len(self._componentValues)
while idx < l:
c = self._componentValues[idx]
if c is not None:
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByPosition(
idx, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByPosition(idx, c.clone())
idx = idx + 1
def _verifyComponent(self, idx, value):
if self._componentType is not None and \
not self._componentType.isSuperTypeOf(value):
raise error.PyAsn1Error('Component type error %s' % (value,))
def getComponentByPosition(self, idx): return self._componentValues[idx]
def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx-l+1)*[None]
if value is None:
if self._componentValues[idx] is None:
if self._componentType is None:
raise error.PyAsn1Error('Component type not defined')
self._componentValues[idx] = self._componentType.clone()
self._componentValuesSet = self._componentValuesSet + 1
return self
elif not isinstance(value, base.Asn1Item):
if self._componentType is None:
raise error.PyAsn1Error('Component type not defined')
if isinstance(self._componentType, base.AbstractSimpleAsn1Item):
value = self._componentType.clone(value=value)
else:
raise error.PyAsn1Error('Instance value required')
if verifyConstraints:
if self._componentType is not None:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
if self._componentValues[idx] is None:
self._componentValuesSet = self._componentValuesSet + 1
self._componentValues[idx] = value
return self
def getComponentTagMap(self):
if self._componentType is not None:
return self._componentType.getTagMap()
def prettyPrint(self, scope=0):
scope = scope + 1
r = self.__class__.__name__ + ':\n'
for idx in range(len(self._componentValues)):
r = r + ' '*scope
if self._componentValues[idx] is None:
r = r + '<empty>'
else:
r = r + self._componentValues[idx].prettyPrint(scope)
return r
class SequenceOf(SetOf):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
)
typeId = 2
class SequenceAndSetBase(base.AbstractConstructedAsn1Item):
componentType = namedtype.NamedTypes()
def __init__(self, componentType=None, tagSet=None,
subtypeSpec=None, sizeSpec=None):
base.AbstractConstructedAsn1Item.__init__(
self, componentType, tagSet, subtypeSpec, sizeSpec
)
if self._componentType is None:
self._componentTypeLen = 0
else:
self._componentTypeLen = len(self._componentType)
def __getitem__(self, idx):
if isinstance(idx, str):
return self.getComponentByName(idx)
else:
return base.AbstractConstructedAsn1Item.__getitem__(self, idx)
def __setitem__(self, idx, value):
if isinstance(idx, str):
self.setComponentByName(idx, value)
else:
base.AbstractConstructedAsn1Item.__setitem__(self, idx, value)
def _cloneComponentValues(self, myClone, cloneValueFlag):
idx = 0; l = len(self._componentValues)
while idx < l:
c = self._componentValues[idx]
if c is not None:
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByPosition(
idx, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByPosition(idx, c.clone())
idx = idx + 1
def _verifyComponent(self, idx, value):
if idx >= self._componentTypeLen:
raise error.PyAsn1Error(
'Component type error out of range'
)
t = self._componentType[idx].getType()
if not t.isSuperTypeOf(value):
raise error.PyAsn1Error('Component type error %r vs %r' % (t, value))
def getComponentByName(self, name):
return self.getComponentByPosition(
self._componentType.getPositionByName(name)
)
def setComponentByName(self, name, value=None, verifyConstraints=True):
return self.setComponentByPosition(
self._componentType.getPositionByName(name), value,
verifyConstraints
)
def getComponentByPosition(self, idx):
try:
return self._componentValues[idx]
except IndexError:
if idx < self._componentTypeLen:
return
raise
def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx-l+1)*[None]
if value is None:
if self._componentValues[idx] is None:
self._componentValues[idx] = self._componentType.getTypeByPosition(idx).clone()
self._componentValuesSet = self._componentValuesSet + 1
return self
elif not isinstance(value, base.Asn1Item):
t = self._componentType.getTypeByPosition(idx)
if isinstance(t, base.AbstractSimpleAsn1Item):
value = t.clone(value=value)
else:
raise error.PyAsn1Error('Instance value required')
if verifyConstraints:
if self._componentTypeLen:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
if self._componentValues[idx] is None:
self._componentValuesSet = self._componentValuesSet + 1
self._componentValues[idx] = value
return self
def getNameByPosition(self, idx):
if self._componentTypeLen:
return self._componentType.getNameByPosition(idx)
def getDefaultComponentByPosition(self, idx):
if self._componentTypeLen and self._componentType[idx].isDefaulted:
return self._componentType[idx].getType()
def getComponentType(self):
if self._componentTypeLen:
return self._componentType
def setDefaultComponents(self):
if self._componentTypeLen == self._componentValuesSet:
return
idx = self._componentTypeLen
while idx:
idx = idx - 1
if self._componentType[idx].isDefaulted:
if self.getComponentByPosition(idx) is None:
self.setComponentByPosition(idx)
elif not self._componentType[idx].isOptional:
if self.getComponentByPosition(idx) is None:
raise error.PyAsn1Error(
'Uninitialized component #%s at %r' % (idx, self)
)
def prettyPrint(self, scope=0):
scope = scope + 1
r = self.__class__.__name__ + ':\n'
for idx in range(len(self._componentValues)):
if self._componentValues[idx] is not None:
r = r + ' '*scope
componentType = self.getComponentType()
if componentType is None:
r = r + '<no-name>'
else:
r = r + componentType.getNameByPosition(idx)
r = '%s=%s\n' % (
r, self._componentValues[idx].prettyPrint(scope)
)
return r
class Sequence(SequenceAndSetBase):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
)
typeId = 3
def getComponentTagMapNearPosition(self, idx):
if self._componentType:
return self._componentType.getTagMapNearPosition(idx)
def getComponentPositionNearType(self, tagSet, idx):
if self._componentType:
return self._componentType.getPositionNearType(tagSet, idx)
else:
return idx
class Set(SequenceAndSetBase):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
)
typeId = 4
def getComponent(self, innerFlag=0): return self
def getComponentByType(self, tagSet, innerFlag=0):
c = self.getComponentByPosition(
self._componentType.getPositionByType(tagSet)
)
if innerFlag and isinstance(c, Set):
# get inner component by inner tagSet
return c.getComponent(1)
else:
# get outer component by inner tagSet
return c
def setComponentByType(self, tagSet, value=None, innerFlag=0,
verifyConstraints=True):
idx = self._componentType.getPositionByType(tagSet)
t = self._componentType.getTypeByPosition(idx)
if innerFlag: # set inner component by inner tagSet
if t.getTagSet():
return self.setComponentByPosition(
idx, value, verifyConstraints
)
else:
t = self.setComponentByPosition(idx).getComponentByPosition(idx)
return t.setComponentByType(
tagSet, value, innerFlag, verifyConstraints
)
else: # set outer component by inner tagSet
return self.setComponentByPosition(
idx, value, verifyConstraints
)
def getComponentTagMap(self):
if self._componentType:
return self._componentType.getTagMap(True)
def getComponentPositionByType(self, tagSet):
if self._componentType:
return self._componentType.getPositionByType(tagSet)
class Choice(Set):
tagSet = baseTagSet = tag.TagSet() # untagged
sizeSpec = constraint.ConstraintsIntersection(
constraint.ValueSizeConstraint(1, 1)
)
typeId = 5
_currentIdx = None
def __eq__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] == other
return NotImplemented
def __ne__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] != other
return NotImplemented
def __lt__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] < other
return NotImplemented
def __le__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] <= other
return NotImplemented
def __gt__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] > other
return NotImplemented
def __ge__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] >= other
return NotImplemented
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._componentValues)
else:
def __bool__(self): return bool(self._componentValues)
def __len__(self): return self._currentIdx is not None and 1 or 0
def verifySizeSpec(self):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
self._sizeSpec(' ')
def _cloneComponentValues(self, myClone, cloneValueFlag):
try:
c = self.getComponent()
except error.PyAsn1Error:
pass
else:
if isinstance(c, Choice):
tagSet = c.getEffectiveTagSet()
else:
tagSet = c.getTagSet()
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByType(
tagSet, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByType(tagSet, c.clone())
def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx-l+1)*[None]
if self._currentIdx is not None:
self._componentValues[self._currentIdx] = None
if value is None:
if self._componentValues[idx] is None:
self._componentValues[idx] = self._componentType.getTypeByPosition(idx).clone()
self._componentValuesSet = 1
self._currentIdx = idx
return self
elif not isinstance(value, base.Asn1Item):
value = self._componentType.getTypeByPosition(idx).clone(
value=value
)
if verifyConstraints:
if self._componentTypeLen:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
self._componentValues[idx] = value
self._currentIdx = idx
self._componentValuesSet = 1
return self
def getMinTagSet(self):
if self._tagSet:
return self._tagSet
else:
return self._componentType.genMinTagSet()
def getEffectiveTagSet(self):
if self._tagSet:
return self._tagSet
else:
c = self.getComponent()
if isinstance(c, Choice):
return c.getEffectiveTagSet()
else:
return c.getTagSet()
def getTagMap(self):
if self._tagSet:
return Set.getTagMap(self)
else:
return Set.getComponentTagMap(self)
def getComponent(self, innerFlag=0):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
c = self._componentValues[self._currentIdx]
if innerFlag and isinstance(c, Choice):
return c.getComponent(innerFlag)
else:
return c
def getName(self, innerFlag=0):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
if innerFlag:
c = self._componentValues[self._currentIdx]
if isinstance(c, Choice):
return c.getName(innerFlag)
return self._componentType.getNameByPosition(self._currentIdx)
def setDefaultComponents(self): pass
class Any(OctetString):
tagSet = baseTagSet = tag.TagSet() # untagged
typeId = 6
def getTagMap(self):
return tagmap.TagMap(
{ self.getTagSet(): self },
{ eoo.endOfOctets.getTagSet(): eoo.endOfOctets },
self
)
# XXX
# coercion rules?
| gpl-2.0 |
liyu1990/sklearn | examples/manifold/plot_swissroll.py | 330 | 1446 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
except:
ax = fig.add_subplot(211)
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause |
barthisrael/OmniDB | OmniDB/OmniDB_app/include/paramiko/kex_gex.py | 7 | 10302 | # Copyright (C) 2003-2007 Robey Pointer <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Variant on `KexGroup1 <paramiko.kex_group1.KexGroup1>` where the prime "p" and
generator "g" are provided by the server. A bit more work is required on the
client side, and a **lot** more on the server side.
"""
import os
from hashlib import sha1, sha256
from paramiko import util
from paramiko.common import DEBUG
from paramiko.message import Message
from paramiko.py3compat import byte_chr, byte_ord, byte_mask
from paramiko.ssh_exception import SSHException
(
_MSG_KEXDH_GEX_REQUEST_OLD,
_MSG_KEXDH_GEX_GROUP,
_MSG_KEXDH_GEX_INIT,
_MSG_KEXDH_GEX_REPLY,
_MSG_KEXDH_GEX_REQUEST,
) = range(30, 35)
(
c_MSG_KEXDH_GEX_REQUEST_OLD,
c_MSG_KEXDH_GEX_GROUP,
c_MSG_KEXDH_GEX_INIT,
c_MSG_KEXDH_GEX_REPLY,
c_MSG_KEXDH_GEX_REQUEST,
) = [byte_chr(c) for c in range(30, 35)]
class KexGex(object):
name = "diffie-hellman-group-exchange-sha1"
min_bits = 1024
max_bits = 8192
preferred_bits = 2048
hash_algo = sha1
def __init__(self, transport):
self.transport = transport
self.p = None
self.q = None
self.g = None
self.x = None
self.e = None
self.f = None
self.old_style = False
def start_kex(self, _test_old_style=False):
if self.transport.server_mode:
self.transport._expect_packet(
_MSG_KEXDH_GEX_REQUEST, _MSG_KEXDH_GEX_REQUEST_OLD
)
return
# request a bit range: we accept (min_bits) to (max_bits), but prefer
# (preferred_bits). according to the spec, we shouldn't pull the
# minimum up above 1024.
m = Message()
if _test_old_style:
# only used for unit tests: we shouldn't ever send this
m.add_byte(c_MSG_KEXDH_GEX_REQUEST_OLD)
m.add_int(self.preferred_bits)
self.old_style = True
else:
m.add_byte(c_MSG_KEXDH_GEX_REQUEST)
m.add_int(self.min_bits)
m.add_int(self.preferred_bits)
m.add_int(self.max_bits)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXDH_GEX_GROUP)
def parse_next(self, ptype, m):
if ptype == _MSG_KEXDH_GEX_REQUEST:
return self._parse_kexdh_gex_request(m)
elif ptype == _MSG_KEXDH_GEX_GROUP:
return self._parse_kexdh_gex_group(m)
elif ptype == _MSG_KEXDH_GEX_INIT:
return self._parse_kexdh_gex_init(m)
elif ptype == _MSG_KEXDH_GEX_REPLY:
return self._parse_kexdh_gex_reply(m)
elif ptype == _MSG_KEXDH_GEX_REQUEST_OLD:
return self._parse_kexdh_gex_request_old(m)
msg = "KexGex {} asked to handle packet type {:d}"
raise SSHException(msg.format(self.name, ptype))
# ...internals...
def _generate_x(self):
# generate an "x" (1 < x < (p-1)/2).
q = (self.p - 1) // 2
qnorm = util.deflate_long(q, 0)
qhbyte = byte_ord(qnorm[0])
byte_count = len(qnorm)
qmask = 0xff
while not (qhbyte & 0x80):
qhbyte <<= 1
qmask >>= 1
while True:
x_bytes = os.urandom(byte_count)
x_bytes = byte_mask(x_bytes[0], qmask) + x_bytes[1:]
x = util.inflate_long(x_bytes, 1)
if (x > 1) and (x < q):
break
self.x = x
def _parse_kexdh_gex_request(self, m):
minbits = m.get_int()
preferredbits = m.get_int()
maxbits = m.get_int()
# smoosh the user's preferred size into our own limits
if preferredbits > self.max_bits:
preferredbits = self.max_bits
if preferredbits < self.min_bits:
preferredbits = self.min_bits
# fix min/max if they're inconsistent. technically, we could just pout
# and hang up, but there's no harm in giving them the benefit of the
# doubt and just picking a bitsize for them.
if minbits > preferredbits:
minbits = preferredbits
if maxbits < preferredbits:
maxbits = preferredbits
# now save a copy
self.min_bits = minbits
self.preferred_bits = preferredbits
self.max_bits = maxbits
# generate prime
pack = self.transport._get_modulus_pack()
if pack is None:
raise SSHException("Can't do server-side gex with no modulus pack")
self.transport._log(
DEBUG,
"Picking p ({} <= {} <= {} bits)".format(
minbits, preferredbits, maxbits
),
)
self.g, self.p = pack.get_modulus(minbits, preferredbits, maxbits)
m = Message()
m.add_byte(c_MSG_KEXDH_GEX_GROUP)
m.add_mpint(self.p)
m.add_mpint(self.g)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXDH_GEX_INIT)
def _parse_kexdh_gex_request_old(self, m):
# same as above, but without min_bits or max_bits (used by older
# clients like putty)
self.preferred_bits = m.get_int()
# smoosh the user's preferred size into our own limits
if self.preferred_bits > self.max_bits:
self.preferred_bits = self.max_bits
if self.preferred_bits < self.min_bits:
self.preferred_bits = self.min_bits
# generate prime
pack = self.transport._get_modulus_pack()
if pack is None:
raise SSHException("Can't do server-side gex with no modulus pack")
self.transport._log(
DEBUG, "Picking p (~ {} bits)".format(self.preferred_bits)
)
self.g, self.p = pack.get_modulus(
self.min_bits, self.preferred_bits, self.max_bits
)
m = Message()
m.add_byte(c_MSG_KEXDH_GEX_GROUP)
m.add_mpint(self.p)
m.add_mpint(self.g)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXDH_GEX_INIT)
self.old_style = True
def _parse_kexdh_gex_group(self, m):
self.p = m.get_mpint()
self.g = m.get_mpint()
# reject if p's bit length < 1024 or > 8192
bitlen = util.bit_length(self.p)
if (bitlen < 1024) or (bitlen > 8192):
raise SSHException(
"Server-generated gex p (don't ask) is out of range "
"({} bits)".format(bitlen)
)
self.transport._log(DEBUG, "Got server p ({} bits)".format(bitlen))
self._generate_x()
# now compute e = g^x mod p
self.e = pow(self.g, self.x, self.p)
m = Message()
m.add_byte(c_MSG_KEXDH_GEX_INIT)
m.add_mpint(self.e)
self.transport._send_message(m)
self.transport._expect_packet(_MSG_KEXDH_GEX_REPLY)
def _parse_kexdh_gex_init(self, m):
self.e = m.get_mpint()
if (self.e < 1) or (self.e > self.p - 1):
raise SSHException('Client kex "e" is out of range')
self._generate_x()
self.f = pow(self.g, self.x, self.p)
K = pow(self.e, self.x, self.p)
key = self.transport.get_server_key().asbytes()
# okay, build up the hash H of
# (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa
hm = Message()
hm.add(
self.transport.remote_version,
self.transport.local_version,
self.transport.remote_kex_init,
self.transport.local_kex_init,
key,
)
if not self.old_style:
hm.add_int(self.min_bits)
hm.add_int(self.preferred_bits)
if not self.old_style:
hm.add_int(self.max_bits)
hm.add_mpint(self.p)
hm.add_mpint(self.g)
hm.add_mpint(self.e)
hm.add_mpint(self.f)
hm.add_mpint(K)
H = self.hash_algo(hm.asbytes()).digest()
self.transport._set_K_H(K, H)
# sign it
sig = self.transport.get_server_key().sign_ssh_data(H)
# send reply
m = Message()
m.add_byte(c_MSG_KEXDH_GEX_REPLY)
m.add_string(key)
m.add_mpint(self.f)
m.add_string(sig)
self.transport._send_message(m)
self.transport._activate_outbound()
def _parse_kexdh_gex_reply(self, m):
host_key = m.get_string()
self.f = m.get_mpint()
sig = m.get_string()
if (self.f < 1) or (self.f > self.p - 1):
raise SSHException('Server kex "f" is out of range')
K = pow(self.f, self.x, self.p)
# okay, build up the hash H of
# (V_C || V_S || I_C || I_S || K_S || min || n || max || p || g || e || f || K) # noqa
hm = Message()
hm.add(
self.transport.local_version,
self.transport.remote_version,
self.transport.local_kex_init,
self.transport.remote_kex_init,
host_key,
)
if not self.old_style:
hm.add_int(self.min_bits)
hm.add_int(self.preferred_bits)
if not self.old_style:
hm.add_int(self.max_bits)
hm.add_mpint(self.p)
hm.add_mpint(self.g)
hm.add_mpint(self.e)
hm.add_mpint(self.f)
hm.add_mpint(K)
self.transport._set_K_H(K, self.hash_algo(hm.asbytes()).digest())
self.transport._verify_key(host_key, sig)
self.transport._activate_outbound()
class KexGexSHA256(KexGex):
name = "diffie-hellman-group-exchange-sha256"
hash_algo = sha256
| mit |
numerigraphe/odoo | addons/project/report/project_report.py | 279 | 5789 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import tools
class report_project_task_user(osv.osv):
_name = "report.project.task.user"
_description = "Tasks by user and project"
_auto = False
_columns = {
'name': fields.char('Task Summary', readonly=True),
'user_id': fields.many2one('res.users', 'Assigned To', readonly=True),
'reviewer_id': fields.many2one('res.users', 'Reviewer', readonly=True),
'date_start': fields.datetime('Assignation Date', readonly=True),
'no_of_days': fields.integer('# of Days', size=128, readonly=True),
'date_end': fields.datetime('Ending Date', readonly=True),
'date_deadline': fields.date('Deadline', readonly=True),
'date_last_stage_update': fields.datetime('Last Stage Update', readonly=True),
'project_id': fields.many2one('project.project', 'Project', readonly=True),
'hours_planned': fields.float('Planned Hours', readonly=True),
'hours_effective': fields.float('Effective Hours', readonly=True),
'hours_delay': fields.float('Avg. Plan.-Eff.', readonly=True),
'remaining_hours': fields.float('Remaining Hours', readonly=True),
'progress': fields.float('Progress', readonly=True, group_operator='avg'),
'total_hours': fields.float('Total Hours', readonly=True),
'closing_days': fields.float('Days to Close', digits=(16,2), readonly=True, group_operator="avg",
help="Number of Days to close the task"),
'opening_days': fields.float('Days to Assign', digits=(16,2), readonly=True, group_operator="avg",
help="Number of Days to Open the task"),
'delay_endings_days': fields.float('Overpassed Deadline', digits=(16,2), readonly=True),
'nbr': fields.integer('# of Tasks', readonly=True), # TDE FIXME master: rename into nbr_tasks
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')],
string='Priority', size=1, readonly=True),
'state': fields.selection([('normal', 'In Progress'),('blocked', 'Blocked'),('done', 'Ready for next stage')],'Status', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'partner_id': fields.many2one('res.partner', 'Contact', readonly=True),
'stage_id': fields.many2one('project.task.type', 'Stage'),
}
_order = 'name desc, project_id'
def init(self, cr):
tools.sql.drop_view_if_exists(cr, 'report_project_task_user')
cr.execute("""
CREATE view report_project_task_user as
SELECT
(select 1 ) AS nbr,
t.id as id,
t.date_start as date_start,
t.date_end as date_end,
t.date_last_stage_update as date_last_stage_update,
t.date_deadline as date_deadline,
abs((extract('epoch' from (t.write_date-t.date_start)))/(3600*24)) as no_of_days,
t.user_id,
t.reviewer_id,
progress as progress,
t.project_id,
t.effective_hours as hours_effective,
t.priority,
t.name as name,
t.company_id,
t.partner_id,
t.stage_id as stage_id,
t.kanban_state as state,
remaining_hours as remaining_hours,
total_hours as total_hours,
t.delay_hours as hours_delay,
planned_hours as hours_planned,
(extract('epoch' from (t.write_date-t.create_date)))/(3600*24) as closing_days,
(extract('epoch' from (t.date_start-t.create_date)))/(3600*24) as opening_days,
(extract('epoch' from (t.date_deadline-(now() at time zone 'UTC'))))/(3600*24) as delay_endings_days
FROM project_task t
WHERE t.active = 'true'
GROUP BY
t.id,
remaining_hours,
t.effective_hours,
progress,
total_hours,
planned_hours,
hours_delay,
create_date,
write_date,
date_start,
date_end,
date_deadline,
date_last_stage_update,
t.user_id,
t.reviewer_id,
t.project_id,
t.priority,
name,
t.company_id,
t.partner_id,
stage_id
""")
| agpl-3.0 |
liavkoren/djangoDev | django/db/backends/schema.py | 2 | 38689 | import hashlib
import operator
from django.db.backends.creation import BaseDatabaseCreation
from django.db.backends.utils import truncate_name
from django.db.models.fields.related import ManyToManyField
from django.db.transaction import atomic
from django.utils.encoding import force_bytes
from django.utils.log import getLogger
from django.utils.six.moves import reduce
from django.utils.six import callable
logger = getLogger('django.db.backends.schema')
class BaseDatabaseSchemaEditor(object):
"""
This class (and its subclasses) are responsible for emitting schema-changing
statements to the databases - model creation/removal/alteration, field
renaming, index fiddling, and so on.
It is intended to eventually completely replace DatabaseCreation.
This class should be used by creating an instance for each set of schema
changes (e.g. a syncdb run, a migration file), and by first calling start(),
then the relevant actions, and then commit(). This is necessary to allow
things like circular foreign key references - FKs will only be created once
commit() is called.
"""
# Overrideable SQL templates
sql_create_table = "CREATE TABLE %(table)s (%(definition)s)"
sql_create_table_unique = "UNIQUE (%(columns)s)"
sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s"
sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s"
sql_delete_table = "DROP TABLE %(table)s CASCADE"
sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s"
sql_alter_column = "ALTER TABLE %(table)s %(changes)s"
sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s"
sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL"
sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL"
sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s"
sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT"
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE"
sql_rename_column = "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s"
sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)"
sql_delete_check = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_unique = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s UNIQUE (%(columns)s)"
sql_delete_unique = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_fk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED"
sql_create_inline_fk = None
sql_delete_fk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s"
sql_delete_index = "DROP INDEX %(name)s"
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
def __init__(self, connection, collect_sql=False):
self.connection = connection
self.collect_sql = collect_sql
if self.collect_sql:
self.collected_sql = []
# State-managing methods
def __enter__(self):
self.deferred_sql = []
atomic(self.connection.alias, self.connection.features.can_rollback_ddl).__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
for sql in self.deferred_sql:
self.execute(sql)
atomic(self.connection.alias, self.connection.features.can_rollback_ddl).__exit__(exc_type, exc_value, traceback)
# Core utility functions
def execute(self, sql, params=[]):
"""
Executes the given SQL statement, with optional parameters.
"""
# Log the command we're running, then run it
logger.debug("%s; (params %r)" % (sql, params))
if self.collect_sql:
self.collected_sql.append((sql % tuple(map(self.quote_value, params))) + ";")
else:
with self.connection.cursor() as cursor:
cursor.execute(sql, params)
def quote_name(self, name):
return self.connection.ops.quote_name(name)
# Field <-> database mapping functions
def column_sql(self, model, field, include_default=False):
"""
Takes a field and returns its column definition.
The field must already have had set_attributes_from_name called.
"""
# Get the column's type and use that as the basis of the SQL
db_params = field.db_parameters(connection=self.connection)
sql = db_params['type']
params = []
# Check for fields that aren't actually columns (e.g. M2M)
if sql is None:
return None, None
# Work out nullability
null = field.null
# If we were told to include a default value, do so
default_value = self.effective_default(field)
if include_default and default_value is not None:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
sql += " DEFAULT %s" % self.prepare_default(default_value)
else:
sql += " DEFAULT %s"
params += [default_value]
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if (field.empty_strings_allowed and not field.primary_key and
self.connection.features.interprets_empty_strings_as_nulls):
null = True
if null:
sql += " NULL"
else:
sql += " NOT NULL"
# Primary key/unique outputs
if field.primary_key:
sql += " PRIMARY KEY"
elif field.unique:
sql += " UNIQUE"
# Optionally add the tablespace if it's an implicitly indexed column
tablespace = field.db_tablespace or model._meta.db_tablespace
if tablespace and self.connection.features.supports_tablespaces and field.unique:
sql += " %s" % self.connection.ops.tablespace_sql(tablespace, inline=True)
# Return the sql
return sql, params
def prepare_default(self, value):
"""
Only used for backends which have requires_literal_defaults feature
"""
raise NotImplementedError('subclasses of BaseDatabaseSchemaEditor for backends which have requires_literal_defaults must provide a prepare_default() method')
def effective_default(self, field):
"""
Returns a field's effective database default value
"""
if field.has_default():
default = field.get_default()
elif not field.null and field.blank and field.empty_strings_allowed:
default = ""
else:
default = None
# If it's a callable, call it
if callable(default):
default = default()
return default
def quote_value(self, value):
"""
Returns a quoted version of the value so it's safe to use in an SQL
string. This is not safe against injection from user code; it is
intended only for use in making SQL scripts or preparing default values
for particularly tricky backends (defaults are not user-defined, though,
so this is safe).
"""
raise NotImplementedError()
# Actions
def create_model(self, model):
"""
Takes a model and creates a table for it in the database.
Will also create any accompanying indexes or unique constraints.
"""
# Create column SQL, add FK deferreds if needed
column_sqls = []
params = []
for field in model._meta.local_fields:
# SQL
definition, extra_params = self.column_sql(model, field)
if definition is None:
continue
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Autoincrement SQL (for backends with inline variant)
col_type_suffix = field.db_type_suffix(connection=self.connection)
if col_type_suffix:
definition += " %s" % col_type_suffix
params.extend(extra_params)
# Indexes
if field.db_index and not field.unique:
self.deferred_sql.append(
self.sql_create_index % {
"name": self._create_index_name(model, [field.column], suffix=""),
"table": self.quote_name(model._meta.db_table),
"columns": self.quote_name(field.column),
"extra": "",
}
)
# FK
if field.rel:
to_table = field.rel.to._meta.db_table
to_column = field.rel.to._meta.get_field(field.rel.field_name).column
if self.connection.features.supports_foreign_keys:
self.deferred_sql.append(
self.sql_create_fk % {
"name": self._create_index_name(model, [field.column], suffix="_fk_%s_%s" % (to_table, to_column)),
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
)
elif self.sql_create_inline_fk:
definition += " " + self.sql_create_inline_fk % {
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
# Add the SQL to our big list
column_sqls.append("%s %s" % (
self.quote_name(field.column),
definition,
))
# Autoincrement SQL (for backends with post table definition variant)
if field.get_internal_type() == "AutoField":
autoinc_sql = self.connection.ops.autoinc_sql(model._meta.db_table, field.column)
if autoinc_sql:
self.deferred_sql.extend(autoinc_sql)
# Add any unique_togethers
for fields in model._meta.unique_together:
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
column_sqls.append(self.sql_create_table_unique % {
"columns": ", ".join(self.quote_name(column) for column in columns),
})
# Make the table
sql = self.sql_create_table % {
"table": self.quote_name(model._meta.db_table),
"definition": ", ".join(column_sqls)
}
self.execute(sql, params)
# Add any index_togethers
for fields in model._meta.index_together:
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
self.execute(self.sql_create_index % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, columns, suffix="_idx"),
"columns": ", ".join(self.quote_name(column) for column in columns),
"extra": "",
})
# Make M2M tables
for field in model._meta.local_many_to_many:
if field.rel.through._meta.auto_created:
self.create_model(field.rel.through)
def delete_model(self, model):
"""
Deletes a model from the database.
"""
# Handle auto-created intermediary models
for field in model._meta.local_many_to_many:
if field.rel.through._meta.auto_created:
self.delete_model(field.rel.through)
# Delete the table
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deals with a model changing its unique_together.
Note: The input unique_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_unique_together)
news = set(tuple(fields) for fields in new_unique_together)
# Deleted uniques
for fields in olds.difference(news):
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
constraint_names = self._constraint_names(model, columns, unique=True)
if len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % (
len(constraint_names),
model._meta.db_table,
", ".join(columns),
))
self.execute(
self.sql_delete_unique % {
"table": self.quote_name(model._meta.db_table),
"name": constraint_names[0],
},
)
# Created uniques
for fields in news.difference(olds):
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
self.execute(self.sql_create_unique % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, columns, suffix="_uniq"),
"columns": ", ".join(self.quote_name(column) for column in columns),
})
def alter_index_together(self, model, old_index_together, new_index_together):
"""
Deals with a model changing its index_together.
Note: The input index_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_index_together)
news = set(tuple(fields) for fields in new_index_together)
# Deleted indexes
for fields in olds.difference(news):
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
constraint_names = self._constraint_names(model, list(columns), index=True)
if len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % (
len(constraint_names),
model._meta.db_table,
", ".join(columns),
))
self.execute(
self.sql_delete_index % {
"table": self.quote_name(model._meta.db_table),
"name": constraint_names[0],
},
)
# Created indexes
for fields in news.difference(olds):
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
self.execute(self.sql_create_index % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, columns, suffix="_idx"),
"columns": ", ".join(self.quote_name(column) for column in columns),
"extra": "",
})
def alter_db_table(self, model, old_db_table, new_db_table):
"""
Renames the table a model points to.
"""
self.execute(self.sql_rename_table % {
"old_table": self.quote_name(old_db_table),
"new_table": self.quote_name(new_db_table),
})
def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace):
"""
Moves a model's table between tablespaces
"""
self.execute(self.sql_retablespace_table % {
"table": self.quote_name(model._meta.db_table),
"old_tablespace": self.quote_name(old_db_tablespace),
"new_tablespace": self.quote_name(new_db_tablespace),
})
def add_field(self, model, field):
"""
Creates a field on a model.
Usually involves adding a column, but may involve adding a
table instead (for M2M fields)
"""
# Special-case implicit M2M tables
if isinstance(field, ManyToManyField) and field.rel.through._meta.auto_created:
return self.create_model(field.rel.through)
# Get the column's definition
definition, params = self.column_sql(model, field, include_default=True)
# It might not actually have a column behind it
if definition is None:
return
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Build the SQL and run it
sql = self.sql_create_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"definition": definition,
}
self.execute(sql, params)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if field.default is not None:
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": self.sql_alter_column_no_default % {
"column": self.quote_name(field.column),
}
}
self.execute(sql)
# Add an index, if required
if field.db_index and not field.unique:
self.deferred_sql.append(
self.sql_create_index % {
"name": self._create_index_name(model, [field.column], suffix=""),
"table": self.quote_name(model._meta.db_table),
"columns": self.quote_name(field.column),
"extra": "",
}
)
# Add any FK constraints later
if field.rel and self.connection.features.supports_foreign_keys:
to_table = field.rel.to._meta.db_table
to_column = field.rel.to._meta.get_field(field.rel.field_name).column
self.deferred_sql.append(
self.sql_create_fk % {
"name": '%s_refs_%s_%x' % (
field.column,
to_column,
abs(hash((model._meta.db_table, to_table)))
),
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def remove_field(self, model, field):
"""
Removes a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# Special-case implicit M2M tables
if isinstance(field, ManyToManyField) and field.rel.through._meta.auto_created:
return self.delete_model(field.rel.through)
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
# Get the column's definition
definition, params = self.column_sql(model, field)
# Delete the column
sql = self.sql_delete_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def alter_field(self, model, old_field, new_field, strict=False):
"""
Allows a field's type, uniqueness, nullability, default, column,
constraints etc. to be modified.
Requires a copy of the old field as well so we can only perform
changes that are required.
If strict is true, raises errors if the old column does not match old_field precisely.
"""
# Ensure this field is even column-based
old_db_params = old_field.db_parameters(connection=self.connection)
old_type = old_db_params['type']
new_db_params = new_field.db_parameters(connection=self.connection)
new_type = new_db_params['type']
if old_type is None and new_type is None and (old_field.rel.through and new_field.rel.through and old_field.rel.through._meta.auto_created and new_field.rel.through._meta.auto_created):
return self._alter_many_to_many(model, old_field, new_field, strict)
elif old_type is None or new_type is None:
raise ValueError("Cannot alter field %s into %s - they are not compatible types (probably means only one is an M2M with implicit through model)" % (
old_field,
new_field,
))
# Has unique been removed?
if old_field.unique and (not new_field.unique or (not old_field.primary_key and new_field.primary_key)):
# Find the unique constraint for this field
constraint_names = self._constraint_names(model, [old_field.column], unique=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of unique constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(
self.sql_delete_unique % {
"table": self.quote_name(model._meta.db_table),
"name": constraint_name,
},
)
# Removed an index?
if old_field.db_index and not new_field.db_index and not old_field.unique and not (not new_field.unique and old_field.unique):
# Find the index for this field
index_names = self._constraint_names(model, [old_field.column], index=True)
if strict and len(index_names) != 1:
raise ValueError("Found wrong number (%s) of indexes for %s.%s" % (
len(index_names),
model._meta.db_table,
old_field.column,
))
for index_name in index_names:
self.execute(
self.sql_delete_index % {
"table": self.quote_name(model._meta.db_table),
"name": index_name,
}
)
# Drop any FK constraints, we'll remake them later
if old_field.rel:
fk_names = self._constraint_names(model, [old_field.column], foreign_key=True)
if strict and len(fk_names) != 1:
raise ValueError("Found wrong number (%s) of foreign key constraints for %s.%s" % (
len(fk_names),
model._meta.db_table,
old_field.column,
))
for fk_name in fk_names:
self.execute(
self.sql_delete_fk % {
"table": self.quote_name(model._meta.db_table),
"name": fk_name,
}
)
# Drop incoming FK constraints if we're a primary key and things are going
# to change.
if old_field.primary_key and new_field.primary_key and old_type != new_type:
for rel in new_field.model._meta.get_all_related_objects():
rel_fk_names = self._constraint_names(rel.model, [rel.field.column], foreign_key=True)
for fk_name in rel_fk_names:
self.execute(
self.sql_delete_fk % {
"table": self.quote_name(rel.model._meta.db_table),
"name": fk_name,
}
)
# Change check constraints?
if old_db_params['check'] != new_db_params['check'] and old_db_params['check']:
constraint_names = self._constraint_names(model, [old_field.column], check=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of check constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(
self.sql_delete_check % {
"table": self.quote_name(model._meta.db_table),
"name": constraint_name,
}
)
# Have they renamed the column?
if old_field.column != new_field.column:
self.execute(self.sql_rename_column % {
"table": self.quote_name(model._meta.db_table),
"old_column": self.quote_name(old_field.column),
"new_column": self.quote_name(new_field.column),
"type": new_type,
})
# Next, start accumulating actions to do
actions = []
post_actions = []
# Type change?
if old_type != new_type:
fragment, other_actions = self._alter_column_type_sql(model._meta.db_table, new_field.column, new_type)
actions.append(fragment)
post_actions.extend(other_actions)
# Default change?
old_default = self.effective_default(old_field)
new_default = self.effective_default(new_field)
if old_default != new_default:
if new_default is None:
actions.append((
self.sql_alter_column_no_default % {
"column": self.quote_name(new_field.column),
},
[],
))
else:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": self.prepare_default(new_default),
},
[],
))
else:
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
))
# Nullability change?
if old_field.null != new_field.null:
if new_field.null:
actions.append((
self.sql_alter_column_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
else:
actions.append((
self.sql_alter_column_not_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
if actions:
# Combine actions together if we can (e.g. postgres)
if self.connection.features.supports_combined_alters:
sql, params = tuple(zip(*actions))
actions = [(", ".join(sql), reduce(operator.add, params))]
# Apply those actions
for sql, params in actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if post_actions:
for sql, params in post_actions:
self.execute(sql, params)
# Added a unique?
if not old_field.unique and new_field.unique:
self.execute(
self.sql_create_unique % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, [new_field.column], suffix="_uniq"),
"columns": self.quote_name(new_field.column),
}
)
# Added an index?
if not old_field.db_index and new_field.db_index and not new_field.unique and not (not old_field.unique and new_field.unique):
self.execute(
self.sql_create_index % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, [new_field.column], suffix="_uniq"),
"columns": self.quote_name(new_field.column),
"extra": "",
}
)
# Type alteration on primary key? Then we need to alter the column
# referring to us.
rels_to_update = []
if old_field.primary_key and new_field.primary_key and old_type != new_type:
rels_to_update.extend(new_field.model._meta.get_all_related_objects())
# Changed to become primary key?
# Note that we don't detect unsetting of a PK, as we assume another field
# will always come along and replace it.
if not old_field.primary_key and new_field.primary_key:
# First, drop the old PK
constraint_names = self._constraint_names(model, primary_key=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of PK constraints for %s" % (
len(constraint_names),
model._meta.db_table,
))
for constraint_name in constraint_names:
self.execute(
self.sql_delete_pk % {
"table": self.quote_name(model._meta.db_table),
"name": constraint_name,
},
)
# Make the new one
self.execute(
self.sql_create_pk % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, [new_field.column], suffix="_pk"),
"columns": self.quote_name(new_field.column),
}
)
# Update all referencing columns
rels_to_update.extend(new_field.model._meta.get_all_related_objects())
# Handle our type alters on the other end of rels from the PK stuff above
for rel in rels_to_update:
rel_db_params = rel.field.db_parameters(connection=self.connection)
rel_type = rel_db_params['type']
self.execute(
self.sql_alter_column % {
"table": self.quote_name(rel.model._meta.db_table),
"changes": self.sql_alter_column_type % {
"column": self.quote_name(rel.field.column),
"type": rel_type,
}
}
)
# Does it have a foreign key?
if new_field.rel:
self.execute(
self.sql_create_fk % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, [new_field.column], suffix="_fk"),
"column": self.quote_name(new_field.column),
"to_table": self.quote_name(new_field.rel.to._meta.db_table),
"to_column": self.quote_name(new_field.rel.get_related_field().column),
}
)
# Rebuild FKs that pointed to us if we previously had to drop them
if old_field.primary_key and new_field.primary_key and old_type != new_type:
for rel in new_field.model._meta.get_all_related_objects():
self.execute(
self.sql_create_fk % {
"table": self.quote_name(rel.model._meta.db_table),
"name": self._create_index_name(rel.model, [rel.field.column], suffix="_fk"),
"column": self.quote_name(rel.field.column),
"to_table": self.quote_name(model._meta.db_table),
"to_column": self.quote_name(new_field.column),
}
)
# Does it have check constraints we need to add?
if old_db_params['check'] != new_db_params['check'] and new_db_params['check']:
self.execute(
self.sql_create_check % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, [new_field.column], suffix="_check"),
"column": self.quote_name(new_field.column),
"check": new_db_params['check'],
}
)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def _alter_column_type_sql(self, table, column, type):
"""
Hook to specialize column type alteration for different backends,
for cases when a creation type is different to an alteration type
(e.g. SERIAL in PostgreSQL, PostGIS fields).
Should return two things; an SQL fragment of (sql, params) to insert
into an ALTER TABLE statement, and a list of extra (sql, params) tuples
to run once the field is altered.
"""
return (
(
self.sql_alter_column_type % {
"column": self.quote_name(column),
"type": type,
},
[],
),
[],
)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""
Alters M2Ms to repoint their to= endpoints.
"""
# Rename the through table
if old_field.rel.through._meta.db_table != new_field.rel.through._meta.db_table:
self.alter_db_table(old_field.rel.through, old_field.rel.through._meta.db_table, new_field.rel.through._meta.db_table)
# Repoint the FK to the other side
self.alter_field(
new_field.rel.through,
# We need the field that points to the target model, so we can tell alter_field to change it -
# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)
old_field.rel.through._meta.get_field_by_name(old_field.m2m_reverse_field_name())[0],
new_field.rel.through._meta.get_field_by_name(new_field.m2m_reverse_field_name())[0],
)
def _create_index_name(self, model, column_names, suffix=""):
"""
Generates a unique name for an index/unique constraint.
"""
# If there is just one column in the index, use a default algorithm from Django
if len(column_names) == 1 and not suffix:
return truncate_name(
'%s_%s' % (model._meta.db_table, BaseDatabaseCreation._digest(column_names[0])),
self.connection.ops.max_name_length()
)
# Else generate the name for the index using a different algorithm
table_name = model._meta.db_table.replace('"', '').replace('.', '_')
index_unique_name = '_%x' % abs(hash((table_name, ','.join(column_names))))
max_length = self.connection.ops.max_name_length() or 200
# If the index name is too long, truncate it
index_name = ('%s_%s%s%s' % (table_name, column_names[0], index_unique_name, suffix)).replace('"', '').replace('.', '_')
if len(index_name) > max_length:
part = ('_%s%s%s' % (column_names[0], index_unique_name, suffix))
index_name = '%s%s' % (table_name[:(max_length - len(part))], part)
# It shouldn't start with an underscore (Oracle hates this)
if index_name[0] == "_":
index_name = index_name[1:]
# If it's STILL too long, just hash it down
if len(index_name) > max_length:
index_name = hashlib.md5(force_bytes(index_name)).hexdigest()[:max_length]
# It can't start with a number on Oracle, so prepend D if we need to
if index_name[0].isdigit():
index_name = "D%s" % index_name[:-1]
return index_name
def _constraint_names(self, model, column_names=None, unique=None, primary_key=None, index=None, foreign_key=None, check=None):
"""
Returns all constraint names matching the columns and conditions
"""
column_names = list(column_names) if column_names else None
with self.connection.cursor() as cursor:
constraints = self.connection.introspection.get_constraints(cursor, model._meta.db_table)
result = []
for name, infodict in constraints.items():
if column_names is None or column_names == infodict['columns']:
if unique is not None and infodict['unique'] != unique:
continue
if primary_key is not None and infodict['primary_key'] != primary_key:
continue
if index is not None and infodict['index'] != index:
continue
if check is not None and infodict['check'] != check:
continue
if foreign_key is not None and not infodict['foreign_key']:
continue
result.append(name)
return result
| bsd-3-clause |
rmfranciacastillo/freecodecamp_projects | weather/node_modules/node-gyp/gyp/pylib/gyp/MSVSProject.py | 2736 | 6387 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
class Tool(object):
"""Visual Studio tool."""
def __init__(self, name, attrs=None):
"""Initializes the tool.
Args:
name: Tool name.
attrs: Dict of tool attributes; may be None.
"""
self._attrs = attrs or {}
self._attrs['Name'] = name
def _GetSpecification(self):
"""Creates an element for the tool.
Returns:
A new xml.dom.Element for the tool.
"""
return ['Tool', self._attrs]
class Filter(object):
"""Visual Studio filter - that is, a virtual folder."""
def __init__(self, name, contents=None):
"""Initializes the folder.
Args:
name: Filter (folder) name.
contents: List of filenames and/or Filter objects contained.
"""
self.name = name
self.contents = list(contents or [])
#------------------------------------------------------------------------------
class Writer(object):
"""Visual Studio XML project writer."""
def __init__(self, project_path, version, name, guid=None, platforms=None):
"""Initializes the project.
Args:
project_path: Path to the project file.
version: Format version to emit.
name: Name of the project.
guid: GUID to use for project, if not None.
platforms: Array of string, the supported platforms. If null, ['Win32']
"""
self.project_path = project_path
self.version = version
self.name = name
self.guid = guid
# Default to Win32 for platforms.
if not platforms:
platforms = ['Win32']
# Initialize the specifications of the various sections.
self.platform_section = ['Platforms']
for platform in platforms:
self.platform_section.append(['Platform', {'Name': platform}])
self.tool_files_section = ['ToolFiles']
self.configurations_section = ['Configurations']
self.files_section = ['Files']
# Keep a dict keyed on filename to speed up access.
self.files_dict = dict()
def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
self.tool_files_section.append(['ToolFile', {'RelativePath': path}])
def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools):
"""Returns the specification for a configuration.
Args:
config_type: Type of configuration node.
config_name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Returns:
"""
# Handle defaults
if not attrs:
attrs = {}
if not tools:
tools = []
# Add configuration node and its attributes
node_attrs = attrs.copy()
node_attrs['Name'] = config_name
specification = [config_type, node_attrs]
# Add tool nodes and their attributes
if tools:
for t in tools:
if isinstance(t, Tool):
specification.append(t._GetSpecification())
else:
specification.append(Tool(t)._GetSpecification())
return specification
def AddConfig(self, name, attrs=None, tools=None):
"""Adds a configuration to the project.
Args:
name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools)
self.configurations_section.append(spec)
def _AddFilesToNode(self, parent, files):
"""Adds files and/or filters to the parent node.
Args:
parent: Destination node
files: A list of Filter objects and/or relative paths to files.
Will call itself recursively, if the files list contains Filter objects.
"""
for f in files:
if isinstance(f, Filter):
node = ['Filter', {'Name': f.name}]
self._AddFilesToNode(node, f.contents)
else:
node = ['File', {'RelativePath': f}]
self.files_dict[f] = node
parent.append(node)
def AddFiles(self, files):
"""Adds files to the project.
Args:
files: A list of Filter objects and/or relative paths to files.
This makes a copy of the file/filter tree at the time of this call. If you
later add files to a Filter object which was passed into a previous call
to AddFiles(), it will not be reflected in this project.
"""
self._AddFilesToNode(self.files_section, files)
# TODO(rspangler) This also doesn't handle adding files to an existing
# filter. That is, it doesn't merge the trees.
def AddFileConfig(self, path, config, attrs=None, tools=None):
"""Adds a configuration to a file.
Args:
path: Relative path to the file.
config: Name of configuration to add.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Raises:
ValueError: Relative path does not match any file added via AddFiles().
"""
# Find the file node with the right relative path
parent = self.files_dict.get(path)
if not parent:
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
# Add the config to the file node
spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs,
tools)
parent.append(spec)
def WriteIfChanged(self):
"""Writes the project file."""
# First create XML content definition
content = [
'VisualStudioProject',
{'ProjectType': 'Visual C++',
'Version': self.version.ProjectVersion(),
'Name': self.name,
'ProjectGUID': self.guid,
'RootNamespace': self.name,
'Keyword': 'Win32Proj'
},
self.platform_section,
self.tool_files_section,
self.configurations_section,
['References'], # empty section
self.files_section,
['Globals'] # empty section
]
easy_xml.WriteXmlIfChanged(content, self.project_path,
encoding="Windows-1252")
| apache-2.0 |
tvenkat/askbot-devel | askbot/management/commands/add_admin.py | 13 | 1434 | from django.core.management.base import NoArgsCommand
from django.contrib.auth.models import User
from django.db.models.signals import pre_save, post_save
import sys
class Command(NoArgsCommand):
def get_user(self, uid_str):
try:
uid = int(uid_str)
return User.objects.get(id=uid)
except User.DoesNotExist:
print 'sorry there is no user with id=%d' % uid
sys.exit(1)
except ValueError:
print 'user id must be integer, have %s' % uid_str
sys.exit(1)
def parse_arguments(self, arguments):
if len(arguments) != 1:
print 'argument for this command id <user_id>'
sys.exit(1)
self.user = self.get_user(arguments[0])
def confirm_action(self):
u = self.user
print ''
prompt = 'Do you really wish to make user (id=%d, name=%s) a site administrator? yes/no: ' \
% (u.id, u.username)
str = raw_input(prompt)
if str != 'yes':
print 'action canceled'
sys.exit(1)
def remove_signals(self):
pre_save.receivers = []
post_save.receivers = []
def handle(self, *arguments, **options):
#destroy pre_save and post_save signals
self.parse_arguments(arguments)
self.confirm_action()
self.remove_signals()
self.user.set_admin_status()
self.user.save()
| gpl-3.0 |
cmusatyalab/django-s3 | django_s3/forms.py | 1 | 1062 | #
# Copyright (C) 2012 Carnegie Mellon University
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
from django import forms
class UploadStartForm(forms.Form):
blob = forms.IntegerField()
class UploadForm(forms.Form):
blob = forms.IntegerField()
token = forms.CharField()
file = forms.FileField()
resumableChunkNumber = forms.IntegerField()
class UploadFinishForm(forms.Form):
blob = forms.IntegerField()
token = forms.CharField()
| gpl-2.0 |
rigmar/idapython | examples/ex_patch.py | 18 | 1139 | # -------------------------------------------------------------------------
# This is an example illustrating how to visit all patched bytes in Python
# (c) Hex-Rays
import idaapi
# -------------------------------------------------------------------------
class patched_bytes_visitor(object):
def __init__(self):
self.skip = 0
self.patch = 0
def __call__(self, ea, fpos, o, v, cnt=()):
if fpos == -1:
self.skip += 1
print(" ea: %x o: %x v: %x...skipped" % (ea, fpos, o, v))
else:
self.patch += 1
print(" ea: %x fpos: %x o: %x v: %x" % (ea, fpos, o, v))
return 0
# -------------------------------------------------------------------------
def main():
print("Visiting all patched bytes:")
v = patched_bytes_visitor()
r = idaapi.visit_patched_bytes(0, idaapi.BADADDR, v)
if r != 0:
print("visit_patched_bytes() returned %d" % r)
else:
print("Patched: %d Skipped: %d" % (v.patch, v.skip))
# -------------------------------------------------------------------------
if __name__ == '__main__':
main() | bsd-3-clause |
cirrax/openstack-nagios-plugins | openstacknagios/nova/Services.py | 1 | 4298 | #
# Copyright (C) 2014 Cirrax GmbH http://www.cirrax.com
# Benedikt Trefzer <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Nagios/Icinga plugin to check running nova services.
This corresponds to the output of 'nova service-list'.
"""
import openstacknagios.openstacknagios as osnag
from novaclient.client import Client
class NovaServices(osnag.Resource):
"""
Determines the status of the nova services.
"""
def __init__(self, binary=None, host=None, args=None):
self.binary = binary
self.host = host
self.openstack = self.get_openstack_vars(args=args)
osnag.Resource.__init__(self)
def probe(self):
try:
nova=Client('2',
session = self.get_session(),
cacert = self.openstack['cacert'],
insecure = self.openstack['insecure'])
except Exception as e:
self.exit_error(str(e))
try:
result=nova.services.list(host=self.host,binary=self.binary)
except Exception as e:
self.exit_error(str(e))
stati=dict(up=0, disabled=0, down=0, total=0)
for agent in result:
stati['total'] += 1
if agent.status == 'enabled' and agent.state =='up':
stati['up'] += 1
elif agent.status == 'disabled':
stati['disabled'] += 1
else:
stati['down'] += 1
for r in stati.keys():
yield osnag.Metric(r, stati[r], min=0)
@osnag.guarded
def main():
argp = osnag.ArgumentParser(description=__doc__)
argp.add_argument('-w', '--warn', metavar='RANGE', default='0:',
help='return warning if number of up agents is outside RANGE (default: 0:, never warn)')
argp.add_argument('-c', '--critical', metavar='RANGE', default='0:',
help='return critical if number of up agents is outside RANGE (default 1:, never critical)')
argp.add_argument('--warn_disabled', metavar='RANGE', default='@1:',
help='return warning if number of disabled agents is outside RANGE (default: @1:, warn if any disabled agents')
argp.add_argument('--critical_disabled', metavar='RANGE', default='0:',
help='return critical if number of disabled agents is outside RANGE (default: 0:, never critical')
argp.add_argument( '--warn_down', metavar='RANGE', default='0:',
help='return warning if number of down agents is outside RANGE (default: 0:, never warn)')
argp.add_argument( '--critical_down', metavar='RANGE', default='0',
help='return critical if number of down agents is outside RANGE (default: 0, always critical if any')
argp.add_argument('--binary',
dest='binary',
default=None,
help='filter agent binary')
argp.add_argument('--host',
dest='host',
default=None,
help='filter hostname')
args = argp.parse_args()
check = osnag.Check(
NovaServices(args=args, host=args.host, binary=args.binary),
osnag.ScalarContext('up', args.warn, args.critical),
osnag.ScalarContext('disabled', args.warn_disabled, args.critical_disabled),
osnag.ScalarContext('down', args.warn_down, args.critical_down),
osnag.ScalarContext('total', '0:', '@0'),
osnag.Summary(show=['up','disabled','down','total'])
)
check.main(verbose=args.verbose, timeout=args.timeout)
if __name__ == '__main__':
main()
| gpl-3.0 |
studio666/gnuradio | grc/base/Connection.py | 17 | 5254 | """
Copyright 2008-2011 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from Element import Element
from . import odict
class Connection(Element):
def __init__(self, flow_graph, porta, portb):
"""
Make a new connection given the parent and 2 ports.
Args:
flow_graph: the parent of this element
porta: a port (any direction)
portb: a port (any direction)
@throws Error cannot make connection
Returns:
a new connection
"""
Element.__init__(self, flow_graph)
source = sink = None
#separate the source and sink
for port in (porta, portb):
if port.is_source(): source = port
if port.is_sink(): sink = port
if not source: raise ValueError('Connection could not isolate source')
if not sink: raise ValueError('Connection could not isolate sink')
busses = len(filter(lambda a: a.get_type() == 'bus', [source, sink]))%2
if not busses == 0: raise ValueError('busses must get with busses')
if not len(source.get_associated_ports()) == len(sink.get_associated_ports()):
raise ValueError('port connections must have same cardinality');
#ensure that this connection (source -> sink) is unique
for connection in self.get_parent().get_connections():
if connection.get_source() is source and connection.get_sink() is sink:
raise LookupError('This connection between source and sink is not unique.')
self._source = source
self._sink = sink
if source.get_type() == 'bus':
sources = source.get_associated_ports();
sinks = sink.get_associated_ports();
for i in range(len(sources)):
try:
flow_graph.connect(sources[i], sinks[i]);
except:
pass
def __str__(self):
return 'Connection (\n\t%s\n\t\t%s\n\t%s\n\t\t%s\n)'%(
self.get_source().get_parent(),
self.get_source(),
self.get_sink().get_parent(),
self.get_sink(),
)
def is_connection(self): return True
def validate(self):
"""
Validate the connections.
The ports must match in type.
"""
Element.validate(self)
platform = self.get_parent().get_parent()
source_domain = self.get_source().get_domain()
sink_domain = self.get_sink().get_domain()
if (source_domain, sink_domain) not in platform.get_connection_templates():
self.add_error_message('No connection known for domains "%s", "%s"'
% (source_domain, sink_domain))
too_many_other_sinks = (
source_domain in platform.get_domains() and
not platform.get_domain(key=source_domain)['multiple_sinks'] and
len(self.get_source().get_enabled_connections()) > 1
)
too_many_other_sources = (
sink_domain in platform.get_domains() and
not platform.get_domain(key=sink_domain)['multiple_sources'] and
len(self.get_sink().get_enabled_connections()) > 1
)
if too_many_other_sinks:
self.add_error_message(
'Domain "%s" can have only one downstream block' % source_domain)
if too_many_other_sources:
self.add_error_message(
'Domain "%s" can have only one upstream block' % sink_domain)
def get_enabled(self):
"""
Get the enabled state of this connection.
Returns:
true if source and sink blocks are enabled
"""
return self.get_source().get_parent().get_enabled() and \
self.get_sink().get_parent().get_enabled()
#############################
# Access Ports
#############################
def get_sink(self): return self._sink
def get_source(self): return self._source
##############################################
## Import/Export Methods
##############################################
def export_data(self):
"""
Export this connection's info.
Returns:
a nested data odict
"""
n = odict()
n['source_block_id'] = self.get_source().get_parent().get_id()
n['sink_block_id'] = self.get_sink().get_parent().get_id()
n['source_key'] = self.get_source().get_key()
n['sink_key'] = self.get_sink().get_key()
return n
| gpl-3.0 |
Ichag/odoo | openerp/addons/base/__init__.py | 379 | 1134 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ir
import workflow
import module
import res
import report
import tests
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
IndigoTiger/ezzybot | ezzybot/util/bucket.py | 5 | 1252 | from time import time
class TokenBucket(object):
"""An implementation of the token bucket algorithm.
>>> bucket = TokenBucket(80, 0.5)
>>> print bucket.consume(10)
True
>>> print bucket.consume(90)
False
"""
def __init__(self, tokens, fill_rate):
"""tokens is the total tokens in the bucket. fill_rate is the
rate in tokens/second that the bucket will be refilled."""
self.capacity = float(tokens)
self._tokens = float(tokens)
self.fill_rate = float(fill_rate)
self.timestamp = time()
def consume(self, tokens):
"""Consume tokens from the bucket. Returns True if there were
sufficient tokens otherwise False."""
if tokens <= self.tokens:
self._tokens -= tokens
else:
return False
return True
def refill(self):
"""Refills the token bucket"""
self._tokens = self.capacity
def get_tokens(self):
now = time()
if self._tokens < self.capacity:
delta = self.fill_rate * (now - self.timestamp)
self._tokens = min(self.capacity, self._tokens + delta)
self.timestamp = now
return self._tokens
tokens = property(get_tokens)
| gpl-3.0 |
sauloal/linuxscripts | apache/var/www/html/saulo/torrent/html/bin/clients/mainline/BTL/yielddefer25.py | 11 | 6035 | # yielddefer is an async programming mechanism with a blocking look-alike syntax
#
# The contents of this file are subject to the Python Software Foundation
# License Version 2.3 (the License). You may not copy or use this file, in
# either source code or executable form, except in compliance with the License.
# You may obtain a copy of the License at http://www.python.org/license.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# launch_coroutine maintains the illusion that the passed function
# (a generator) runs from beginning to end yielding when necessary
# for some job to complete and then continuing where it left off.
#
# def f():
# ...
# yield some_thing_that_takes_time()
# ...
# result = yield something_else()
# ...
#
# from inside a generator launched with launch_coroutine:
# wait on a deferred to be called back by yielding it
# return None by simply returning
# return an exception by throwing one
# return a value by yielding a non-Deferred
#
# by Greg Hazel
from __future__ import generators
import sys
import types
import traceback
from BTL.defer import Deferred, Failure
from BTL.stackthreading import _print_traceback
from twisted.python import failure
debug = False
name_debug = False
class GenWithDeferred(object):
if debug:
__slots__ = ['gen', 'current_deferred', 'deferred', 'queue_task' 'stack']
else:
__slots__ = ['gen', 'current_deferred', 'deferred', 'queue_task']
def __init__(self, gen, deferred, queue_task):
self.gen = gen
self.deferred = deferred
self.queue_task = queue_task
self.current_deferred = None
if debug:
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
self.stack = traceback.extract_stack(f)
# cut out GenWithDeferred() and launch_coroutine
self.stack = self.stack[:-2]
def cleanup(self):
del self.gen
del self.deferred
del self.queue_task
del self.current_deferred
if debug:
del self.stack
if name_debug:
def __getattr__(self, attr):
if '_recall' not in attr:
raise AttributeError(attr)
return self._recall
def _queue_task_chain(self, v):
recall = getattr(self, "_recall_%s" % self.gen.gi_frame.f_code.co_name)
self.queue_task(recall)
return v
else:
def _queue_task_chain(self, v):
self.queue_task(self._recall)
return v
def next(self):
if not self.current_deferred:
return self.gen.next()
if isinstance(self.current_deferred.result, failure.Failure):
r = self.current_deferred.result
self.current_deferred.addErrback(lambda fuckoff: None)
return self.gen.throw(*r.exc_info())
return self.gen.send(self.current_deferred.result)
def _recall(self):
try:
df = self.next()
except StopIteration:
self.deferred.callback(None)
self.cleanup()
except Exception, e:
exc_type, value, tb = sys.exc_info()
## Magic Traceback Hacking
if debug:
# interpreter shutdown
if not sys:
return
# HERE. This should really be logged or else bittorrent-
# curses will never be able to properly output. --Dave
_print_traceback(sys.stderr, self.stack,
"generator %s" % self.gen.gi_frame.f_code.co_name, 0,
exc_type, value, tb)
else:
#if (tb.tb_lineno != self.gen.gi_frame.f_lineno or
# tb.f_code.co_filename != self.gen.gi_frame.f_code.co_filename):
# tb = FakeTb(self.gen.gi_frame, tb)
pass
## Magic Traceback Hacking
self.deferred.errback(Failure(value, exc_type, tb))
del tb
self.cleanup()
else:
if not isinstance(df, Deferred):
self.deferred.callback(df)
self.cleanup()
return
self.current_deferred = df
df.addCallback(self._queue_task_chain)
df.addErrback(self._queue_task_chain)
del df
class FakeTb(object):
__slots__ = ['tb_frame', 'tb_lineno', 'tb_orig', 'tb_next']
def __init__(self, frame, tb):
self.tb_frame = frame
self.tb_lineno = frame.f_lineno
self.tb_orig = tb
self.tb_next = tb.tb_next
def _launch_generator(queue_task, g, main_df):
gwd = GenWithDeferred(g, main_df, queue_task)
## the first one is fired for you
##gwd._recall()
# the first one is not fired for you, because if it errors the sys.exc_info
# causes an unresolvable circular reference that makes the gwd.deferred never
# be deleted.
gwd._queue_task_chain(None)
def launch_coroutine(queue_task, f, *args, **kwargs):
main_df = Deferred()
try:
g = f(*args, **kwargs)
except Exception, e:
if debug:
traceback.print_exc()
main_df.errback(Failure())
else:
if isinstance(g, types.GeneratorType):
_launch_generator(queue_task, g, main_df)
else:
# we got a non-generator, just callback with the return value
main_df.callback(g)
return main_df
# decorator
def coroutine(func, queue_task):
def replacement(*a, **kw):
return launch_coroutine(queue_task, func, *a, **kw)
return replacement
def wrap_task(add_task):
return lambda _f, *args, **kwargs : add_task(0, _f, *args, **kwargs)
_wrap_task = wrap_task
| mit |
wsdream/CARP | CARP/src/evaluator.py | 1 | 5610 | ########################################################
# evaluator.py
# Author: Jamie Zhu <jimzhu@GitHub>
# Created: 2014/2/6
# Last updated: 2014/11/03
########################################################
import numpy as np
from numpy import linalg as LA
import time, sys
import random
import core
from utilities import *
########################################################
# Function to run the prediction approach at each density
#
def execute(tensor, density, para):
startTime = time.clock()
startTime = time.clock()
[numUser, numService, numTime] = tensor.shape
rounds = para['rounds']
logger.info('Data size: %d users * %d services * %d timeslices'\
%(numUser, numService, numTime))
logger.info('Run the algorithm for %d rounds: density = %.2f.'%(rounds, density))
evalResults = np.zeros((rounds, len(para['metrics'])))
timeResults = np.zeros((rounds, 1))
for k in range(rounds):
logger.info('----------------------------------------------')
logger.info('%d-round starts.'%(k + 1))
logger.info('----------------------------------------------')
# remove the entries of data to generate trainTensor and testTensor
(trainTensor, testTensor) = removeTensor(tensor, density, k, para)
logger.info('Removing data entries done.')
# invocation to the prediction function
iterStartTime = time.clock() # to record the running time for one round
predictedTensor = core.predict(trainTensor, para)
timeResults[k] = time.clock() - iterStartTime
# calculate the prediction error
result = np.zeros((numTime, len(para['metrics'])))
for i in range(numTime):
testMatrix = testTensor[:, :, i]
predictedMatrix = predictedTensor[:, :, i]
(testVecX, testVecY) = np.where(testMatrix)
testVec = testMatrix[testVecX, testVecY]
predVec = predictedMatrix[testVecX, testVecY]
result[i, :] = errMetric(testVec, predVec, para['metrics'])
evalResults[k, :] = np.average(result, axis=0)
logger.info('%d-round done. Running time: %.2f sec'%(k + 1, timeResults[k]))
logger.info('----------------------------------------------')
outFile = '%savg_%sResult_%.2f.txt'%(para['outPath'], para['dataType'], density)
saveResult(outFile, evalResults, timeResults, para)
logger.info('Density = %.2f done. Running time: %.2f sec'
%(density, time.clock() - startTime))
logger.info('==============================================')
########################################################
########################################################
# Function to remove the entries of data tensor
# Return the trainTensor and the corresponding testTensor
#
def removeTensor(tensor, density, round, para):
numTime = tensor.shape[2]
trainTensor = np.zeros(tensor.shape)
testTensor = np.zeros(tensor.shape)
for i in range(numTime):
seedID = round + i * 100
(trainMatrix, testMatrix) = removeEntries(tensor[:, :, i], density, seedID)
trainTensor[:, :, i] = trainMatrix
testTensor[:, :, i] = testMatrix
return trainTensor, testTensor
########################################################
########################################################
# Function to remove the entries of data matrix
# Use guassian random sampling
# Return trainMatrix and testMatrix
#
def removeEntries(matrix, density, seedID):
numAll = matrix.size
numTrain = int(numAll * density)
(vecX, vecY) = np.where(matrix > -1000)
np.random.seed(seedID % 100)
randPermut = np.random.permutation(numAll)
np.random.seed(seedID)
randSequence = np.random.normal(0, numAll / 6.0, numAll * 50)
trainSet = []
flags = np.zeros(numAll)
for i in xrange(randSequence.shape[0]):
sample = int(abs(randSequence[i]))
if sample < numAll:
idx = randPermut[sample]
if flags[idx] == 0 and matrix[vecX[idx], vecY[idx]] > 0:
trainSet.append(idx)
flags[idx] = 1
if len(trainSet) == numTrain:
break
if len(trainSet) < numTrain:
logger.critical('Exit unexpectedly: not enough data for density = %.2f.', density)
sys.exit()
trainMatrix = np.zeros(matrix.shape)
trainMatrix[vecX[trainSet], vecY[trainSet]] = matrix[vecX[trainSet], vecY[trainSet]]
testMatrix = np.zeros(matrix.shape)
testMatrix[matrix > 0] = matrix[matrix > 0]
testMatrix[vecX[trainSet], vecY[trainSet]] = 0
# ignore invalid testing users or services
idxX = (np.sum(trainMatrix, axis=1) == 0)
testMatrix[idxX, :] = 0
idxY = (np.sum(trainMatrix, axis=0) == 0)
testMatrix[:, idxY] = 0
return trainMatrix, testMatrix
########################################################
########################################################
# Function to compute the evaluation metrics
#
def errMetric(realVec, predVec, metrics):
result = []
absError = np.abs(predVec - realVec)
mae = np.sum(absError)/absError.shape
for metric in metrics:
if 'MAE' == metric:
result = np.append(result, mae)
if 'NMAE' == metric:
nmae = mae / (np.sum(realVec) / absError.shape)
result = np.append(result, nmae)
if 'RMSE' == metric:
rmse = LA.norm(absError) / np.sqrt(absError.shape)
result = np.append(result, rmse)
if 'MRE' == metric or 'NPRE' == metric:
relativeError = absError / realVec
relativeError = np.sort(relativeError)
if 'MRE' == metric:
mre = np.median(relativeError)
result = np.append(result, mre)
if 'NPRE' == metric:
npre = relativeError[np.floor(0.9 * relativeError.shape[0])]
result = np.append(result, npre)
return result
########################################################
| mit |
bhargav2408/python-for-android | python-modules/twisted/twisted/conch/ssh/service.py | 61 | 1418 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
The parent class for all the SSH services. Currently implemented services
are ssh-userauth and ssh-connection.
Maintainer: Paul Swartz
"""
from twisted.python import log
class SSHService(log.Logger):
name = None # this is the ssh name for the service
protocolMessages = {} # these map #'s -> protocol names
transport = None # gets set later
def serviceStarted(self):
"""
called when the service is active on the transport.
"""
def serviceStopped(self):
"""
called when the service is stopped, either by the connection ending
or by another service being started
"""
def logPrefix(self):
return "SSHService %s on %s" % (self.name,
self.transport.transport.logPrefix())
def packetReceived(self, messageNum, packet):
"""
called when we receive a packet on the transport
"""
#print self.protocolMessages
if messageNum in self.protocolMessages:
messageType = self.protocolMessages[messageNum]
f = getattr(self,'ssh_%s' % messageType[4:],
None)
if f is not None:
return f(packet)
log.msg("couldn't handle %r" % messageNum)
log.msg(repr(packet))
self.transport.sendUnimplemented()
| apache-2.0 |
manojgudi/sandhi | modules/gr36/gnuradio-core/src/python/gnuradio/gr/qa_kludged_imports.py | 18 | 1325 | #!/usr/bin/env python
#
# Copyright 2005,2008,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
class test_kludged_imports (gr_unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_blks_import(self):
# make sure that this somewhat magic import works
from gnuradio import blks2
def test_gru_import(self):
# make sure that this somewhat magic import works
from gnuradio import gru
if __name__ == '__main__':
gr_unittest.run(test_kludged_imports, "test_kludged_imports.xml")
| gpl-3.0 |
jarathomas/openVA-Pipeline | pipeline.py | 1 | 49777 | #-------------------------------------------------------------------------------------------------------------------------------------------#
# openVA Pipeline: pipeline.py -- Software for processing Verbal Autopsy data with automated cause of death assignment. #
# Copyright (C) 2018 Jason Thomas, Samuel Clark, Martin Bratschi in collaboration with the Bloomberg Data for Health Initiative #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
#-------------------------------------------------------------------------------------------------------------------------------------------#
#-------------------------------------------------------------------------------------------------------------------------------------------#
# User Settings
sqlitePW = "enilepiP"
dbName = "Pipeline.db"
#-------------------------------------------------------------------------------------------------------------------------------------------#
from pysqlcipher3 import dbapi2 as sqlcipher
from pandas import read_csv, groupby
import pandas as pd
import sys
import csv
import datetime
import os
import subprocess
import shutil
import requests
import json
import sqlite3
import time
import re
import pickle
#-------------------------------------------------------------------------------------------------------------------------------------------#
# Define functions and objects needed for functioning of pipeline; then set up log files and configuration of pipeline
#-------------------------------------------------------------------------------------------------------------------------------------------#
class Dhis(object):
"""Access DHIS2 API."""
def __init__(self, dhisURL, dhisUser, dhisPass):
if '/api' in dhisURL:
print('Please do not specify /api/ in the server argument: e.g. --server=play.dhis2.org/demo')
sys.exit()
if dhisURL.startswith('localhost') or dhisURL.startswith('127.0.0.1'):
dhisURL = 'http://{}'.format(dhisURL)
elif dhisURL.startswith('http://'):
dhisURL = dhisURL
elif not dhisURL.startswith('https://'):
dhisURL = 'https://{}'.format(dhisURL)
self.auth = (dhisUser, dhisPass)
self.url = '{}/api/25'.format(dhisURL)
def get(self, endpoint, params=None):
"""
GET method for DHIS2 API.
:rtype: dict
"""
url = '{}/{}.json'.format(self.url, endpoint)
if not params:
params = {}
params['paging'] = False
try:
r = requests.get(url=url, params=params, auth=self.auth)
if r.status_code != 200:
print("HTTP Code: {}".format(r.status_code)) ## HERE
print(r.text)
else:
return r.json()
except requests.RequestException:
raise requests.RequestException
def post(self, endpoint, data):
"""
POST method for DHIS2 API.
:rtype: dict
"""
url = '{}/{}.json'.format(self.url, endpoint)
try:
r = requests.post(url=url, json=data, auth=self.auth)
if r.status_code not in range(200, 206):
print("HTTP Code: {}".format(r.status_code)) ## HERE
print(r.text)
else:
return r.json()
except requests.RequestException:
raise requests.RequestException
def post_blob(self, f):
""" Post file to DHIS2 and return created UID for that file
:rtype: str
"""
url = '{}/fileResources'.format(self.url)
files = {'file': (f, open(f, 'rb'), 'application/x-sqlite3', {'Expires': '0'})}
try:
r = requests.post(url, files=files, auth=self.auth)
if r.status_code not in (200, 202):
print("HTTP Code: {}".format(r.status_code)) ## HERE
print(r.text)
else:
response = r.json()
file_id = response['response']['fileResource']['id']
return file_id
except requests.RequestException:
raise requests.RequestException
class VerbalAutopsyEvent(object):
""" DHIS2 event + a BLOB file resource"""
def __init__(self, va_id, program, dhis_orgunit, event_date, sex, dob, age, cod_code, algorithm_metadata, file_id):
self.va_id = va_id
self.program = program
self.dhis_orgunit = dhis_orgunit
self.event_date = event_date
self.sex = sex
self.dob = dob
self.age = age
self.cod_code = cod_code
self.algorithm_metadata = algorithm_metadata
self.datavalues = [
{"dataElement": "htm6PixLJNy", "value": self.va_id},
{"dataElement": "hi7qRC4SMMk", "value": self.sex},
{"dataElement": "mwSaVq64k7j", "value": self.dob},
{"dataElement": "F4XGdOBvWww", "value": self.cod_code},
{"dataElement": "wiJviUqN1io", "value": self.algorithm_metadata},
{"dataElement": "oPAg4MA0880", "value": self.age},
{"dataElement": "XLHIBoLtjGt", "value": file_id}
]
def format_to_dhis2(self, dhisUser):
"""
Format object to DHIS2 compatible event for DHIS2 API
:rtype: dict
"""
event = {
"program": self.program,
"orgUnit": self.dhis_orgunit,
"eventDate": datetime.datetime.strftime(self.event_date, '%Y-%m-%d'),
"status": "COMPLETED",
"storedBy": dhisUser,
"dataValues": self.datavalues
}
return event
def __str__(self):
return json.dumps(self, default=lambda o: o.__dict__)
def create_db(fName, evaList):
"""
Create a SQLite database with VA data + COD
:rtype: None
"""
conn = sqlite3.connect(fName)
with conn:
cur = conn.cursor()
cur.execute("CREATE TABLE vaRecord(ID INT, Attrtibute TEXT, Value TEXT)")
cur.executemany("INSERT INTO vaRecord VALUES (?,?,?)", evaList)
def getCODCode(myDict, searchFor):
for i in range(len(myDict.keys())):
match = re.search(searchFor, list(myDict.keys())[i])
if match:
return list(myDict.values())[i]
# set the ODK_Conf table item odkLastRunResult as 0, log error message, and exit script
def cleanup(errorMsg):
# handle single case when DB file not found
if connectionError == "1":
with open(connectionErrorFile, "w", newline="") as f:
writer = csv.writer(f)
writer.writerow([timeFMT, "Unable to Connect to SQLite Database, see {} for details".format(errorFile)])
sys.exit(1)
else:
# update ODK_Conf table with LastRunResult = 0
try:
sql = "UPDATE ODK_Conf SET odkLastRunResult = ?"
par = ("0",)
cursor.execute(sql, par)
db.commit()
if os.path.isfile(connectionErrorFile) == True:
try:
os.remove(connectionErrorFile)
except OSError:
print("Could not remove {}".format(connectionErrorFile))
# write errorMsg to errorFile if DB is inaccessible
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError):
db.rollback()
errorMsg[2] += "; unable to set odkLastRunResult to 0 (in ODK_Conf table)"
try:
with open(errorFile, "a", newline="") as f:
writer = csv.writer(f)
writer.writerow(errorMsg)
except OSError:
print(errorMsg)
# close DB resources and exit script
finally:
cursor.close()
db.close()
sys.exit(1)
def findKeyValue(key, d):
if key in d:
yield d[key]
for k in d:
if isinstance(d[k], list):
for i in d[k]:
for j in findKeyValue(key, i):
yield j
# error log files
errorFile = "./dbErrorLog.csv"
timeFMT = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
connectionError = "0"
connectionErrorFile = "./sqlConnect.csv"
## create error file if it does not exist
if os.path.isfile(errorFile) == False:
try:
with open(errorFile, "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(["Date"] + ["Description"] + ["Additional Information"])
except (OSError) as e:
print(str(e))
sys.exit(1)
# connect to the database and configure the pipeline's settings for ODK Aggregate, openVA, and DHIS2.
if os.path.isfile(dbName) == False:
connectionError = "1"
with open(errorFile, "a", newline="") as f:
writer = csv.writer(f)
writer.writerow([timeFMT, "Database {}.db not found".format(dbName), ])
cleanup()
db = sqlcipher.connect(dbName)
db.execute("PRAGMA key = " + sqlitePW)
sqlODK = "SELECT odkID, odkURL, odkUser, odkPass, odkFormID, odkLastRun, odkLastRunResult FROM ODK_Conf"
sqlPipeline = "SELECT workingDirectory, openVA_Algorithm, algorithmMetadataCode, codSource FROM Pipeline_Conf"
sqlInterVA4 = "SELECT HIV, Malaria FROM InterVA4_Conf"
sqlAdvancedInterVA4 = "SELECT directory, filename, output, append, groupcode, replicate, replicate_bug1, replicate_bug2, write FROM Advanced_InterVA4_Conf"
sqlInSilicoVA = "SELECT Nsim FROM InSilicoVA_Conf"
sqlAdvancedInSilicoVA = "SELECT isNumeric, updateCondProb, keepProbbase_level, CondProb, CondProbNum, datacheck, datacheck_missing," \
+ "warning_write, external_sep, thin, burnin, auto_length, conv_csmf, jump_scale," \
+ "levels_prior, levels_strength, trunc_min, trunc_max, subpop, java_option, seed," \
+ "phy_code, phy_cat, phy_unknown, phy_external, phy_debias, exclude_impossible_cause, indiv_CI " \
+ "FROM Advanced_InSilicoVA_Conf"
sqlDHIS = "SELECT dhisURL, dhisUser, dhisPass, dhisOrgUnit FROM DHIS_Conf"
sqlCODCodes_WHO = "SELECT codName, codCode FROM COD_Codes_DHIS WHERE codSource = 'WHO'"
sqlCODCodes_Tariff = "SELECT codName, codCode FROM COD_Codes_DHIS WHERE codSource = 'Tariff'"
## grab configuration settings from SQLite DB
try:
# ODK configuration
cursor = db.cursor()
cursor.execute(sqlODK)
odkQuery = cursor.fetchall()
for row in odkQuery:
odkID = row[0]
odkURL = row[1]
odkUser = row[2]
odkPass = row[3]
odkFormID = row[4]
odkLastRun = row[5]
odkLastRunDate = datetime.datetime.strptime(odkLastRun, "%Y-%m-%d_%H:%M:%S").strftime("%Y/%m/%d")
odkLastRunDatePrev = (datetime.datetime.strptime(odkLastRunDate, "%Y/%m/%d") - datetime.timedelta(days=1)).strftime("%Y/%m/%d")
odkLastRunResult = row[6]
# Pipeline configuration
cursor.execute(sqlPipeline)
pipelineQuery = cursor.fetchall()
for row in pipelineQuery:
processDir = row[0]
pipelineAlgorithm = row[1]
algorithmMetadataCode = row[2]
codSource = row[3]
# InterVA4 configuration
cursor.execute(sqlInterVA4)
interVA4Query = cursor.fetchall()
for row in interVA4Query:
interVA_HIV = row[0]
interVA_Malaria = row[1]
# InterVA4 advanced configuration
cursor.execute(sqlAdvancedInterVA4)
advancedInterVA4Query = cursor.fetchall()
for row in advancedInterVA4Query:
interVA_directory = row[0]
interVA_filename = row[1]
interVA_output = row[2]
interVA_append = row[3]
interVA_groupcode = row[4]
interVA_replicate = row[5]
interVA_replicate_bug1 = row[6]
interVA_replicate_bug2 = row[7]
interVA_write = row[8]
# InSilicoVA configuration
cursor.execute(sqlInSilicoVA)
insilicoVAQuery = cursor.fetchall()
for row in insilicoVAQuery:
insilico_Nsim = row[0]
# InSilicoVA advanced configuration
cursor.execute(sqlAdvancedInSilicoVA)
advancedInsilicoVAQuery = cursor.fetchall()
for row in advancedInsilicoVAQuery:
insilico_isNumeric = row[ 0]
insilico_updateCondProb = row[ 1]
insilico_keepProbbase_level = row[ 2]
insilico_CondProb = row[ 3]
insilico_CondProbNum = row[ 4]
insilico_datacheck = row[ 5]
insilico_datacheck_missing = row[ 6]
insilico_warning_write = row[ 7]
insilico_external_sep = row[ 8]
insilico_thin = row[ 9]
insilico_burnin = row[10]
insilico_auto_length = row[11]
insilico_conv_csmf = row[12]
insilico_jump_scale = row[13]
insilico_levels_prior = row[14]
insilico_levels_strength = row[15]
insilico_trunc_min = row[16]
insilico_trunc_max = row[17]
insilico_subpop = row[18]
insilico_java_option = row[19]
insilico_seed = row[20]
insilico_phy_code = row[21]
insilico_phy_cat = row[22]
insilico_phy_unknown = row[23]
insilico_phy_external = row[24]
insilico_phy_debias = row[25]
insilico_exclude_impossible_cause = row[26]
insilico_indiv_CI = row[27]
# DHIS2 configuration
cursor.execute(sqlDHIS)
dhisQuery = cursor.fetchall()
for row in dhisQuery:
dhisURL = row[0]
dhisUser = row[1]
dhisPass = row[2]
dhisOrgUnit = row[3]
# CoD Codes for DHIS2
cursor.execute(sqlCODCodes_WHO)
resultsWHO = cursor.fetchall()
codesWHO = dict(resultsWHO)
cursor.execute(sqlCODCodes_Tariff)
resultsTariff = cursor.fetchall()
codesTariff = dict(resultsTariff)
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Problem selecting config information from ODK_Conf ", str(e), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Problem selecting config information from ODK_Conf"]
cleanup(errorMsg)
#-------------------------------------------------------------------------------------------------------------------------------------------#
# create folders & files to store (ODK & openVA) input and output; also create call to ODK Briefcase
#-------------------------------------------------------------------------------------------------------------------------------------------#
odkBCExportDir = processDir + "/ODKExport"
odkBCExportFilename = "ODKExportNew.csv"
odkBCExportPrevious = odkBCExportDir + "/ODKExportPrevious.csv"
odkBCExportNewFile = odkBCExportDir + "/" + odkBCExportFilename
odkBCArgumentList = "java -jar ODK-Briefcase-v1.10.1.jar -oc -em -id '" + odkFormID + "' -sd '" + odkBCExportDir + "' -ed '" \
+ odkBCExportDir + "' -f '" + odkBCExportFilename + "' -url '" + odkURL + "' -u '" + odkUser \
+ "' -p '" + odkPass + "' -start '" + odkLastRunDatePrev + "'"
openVAFilesDir = processDir + "/OpenVAFiles"
openVAReadyFile = odkBCExportDir + "/OpenVAReadyFile.csv"
rScriptIn = openVAFilesDir + "/" + timeFMT + "/RScript_" + timeFMT + ".R"
rScriptOut = openVAFilesDir + "/" + timeFMT + "/RScript_" + timeFMT + ".Rout"
dhisDir = processDir + "/DHIS2"
if codSource=="WHO":
dhisCODCodes = codesWHO
else:
dhisCODCodes = codesTariff
# check if processing directory exists and create if necessary
if not os.path.exists(processDir):
try:
os.makedirs(processDir)
except OSError as e:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Could not create processing directory: " + processDir, str(e), timeFMT)
try:
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Could not create processing directory: " + processDir]
cleanup(errorMsg)
# create openVAFilesDir (if does not exist)
if not os.path.exists(openVAFilesDir + "/" + timeFMT):
try:
os.makedirs(openVAFilesDir + "/" + timeFMT)
except OSError as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Could not create openVA Directory: " + openVAFilesDir + "/" + timeFMT, str(e), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Could not create openVA directory: " + openVAFilesDir + "/" + timeFMT]
cleanup(errorMsg)
# make a copy of current ODK Briefcase Export file, to compare with new file once exported (if there is an existing export file)
if os.path.isfile(odkBCExportNewFile) == True and odkLastRunResult == 1 and not os.path.isfile(connectionErrorFile):
try:
shutil.copy(odkBCExportNewFile, odkBCExportPrevious)
except (OSError, shutil.Error) as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Error: Trying to copy export files from ODK Briefcase", str(e), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Error: Trying to copy export files from ODK Briefcase"]
cleanup(errorMsg)
try:
os.remove(openVAReadyFile)
except (OSError) as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime)"
par = ("Could not remove " + openVAReadyFile, str(e), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Could not remove " + openVAReadyFile]
cleanup(errorMsg)
# launch ODK Briefcase to collect ODK Aggregate data and export to file for further processing
try:
process = subprocess.Popen(odkBCArgumentList, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
rc = process.returncode
except:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Could not launch ODK Briefcase Java Application", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error: Could not launch ODK Briefcase Java Application",""]
cleanup(errorMsg)
# catch application errors from ODK Briefcase and log into EventLog table
if rc != 0:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = (str(stderr), "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(stderr),""]
cleanup(errorMsg)
if "SEVERE" in str(stderr):
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = (stderr,"Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(stderr),""]
cleanup(errorMsg)
else:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Briefcase Export Completed Successfully", "Information", timeFMT)
cursor.execute(sql, par)
db.commit()
sql = "UPDATE ODK_Conf SET odkLastRun=?, odkLastRunResult=?"
par = (timeFMT,"1")
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "ODK Briefcase ran successfully but problems writing to DB (check odkLastRunResult in ODK_Conf)"]
cleanup(errorMsg)
# check if previous file exists from above operations and create delta file of new entries
if os.path.isfile(odkBCExportPrevious) == True:
try:
## WARNING: odkBCExportPrevious & odkBCExportNewFil (CSV files)
## contain sensitive VA information (leaving them in folder)
with open(odkBCExportPrevious, "r", newline="") as t1, open(odkBCExportNewFile, "r", newline="") as t2:
fileone = t1.readlines()
filetwo = t2.readlines()
header = filetwo[0]
with open(openVAReadyFile, "w", newline="") as outFile:
outFile.write(header)
for line in filetwo:
if line not in fileone:
outFile.write(line)
except OSError as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES"
par = ("Could not create: " + openVAReadyFile, "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Error: Could not create: " + openVAReadyFile]
cleanup(errorMsg)
else:
# if there is no pre-existing ODK Briefcase Export file, then copy and rename to OpenVAReadyFile.csv
try:
shutil.copy(odkBCExportNewFile, openVAReadyFile)
except (OSError, shutil.Error) as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = (e, "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Error: Could not copy: " + odkBCExportNewFile + " to: " + openVAReadyFile]
cleanup(errorMsg)
# if no records retrieved, then close up shop; otherwise, create R script for running openVA
## WARNING: openVAReadyFile (CSV file) contains sensitive VA information (leaving it in folder)
with open(openVAReadyFile, "r", newline="") as outFile:
nRecords = len(list(outFile)) - 1 ## take away 1 for the column header
if nRecords == 0:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("No Records From ODK Briefcase (nothing more to do)", "Information", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "No records from ODK Briefcase, but error writing to DB"]
cleanup(errorMsg)
try:
sql = "UPDATE ODK_Conf SET odkLastRun=?, odkLastRunResult=?"
par = (timeFMT,"1")
cursor.execute(sql, par)
db.commit()
cursor.close()
db.close()
sys.exit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e),
"No records from ODK Briefcase, but error writing to DB (trying to set odkLastRun & odkLastRunResult)."]
cleanup(errorMsg)
try:
with open(rScriptIn, "w", newline="") as f:
f.write("date() \n")
f.write("library(openVA); library(CrossVA) \n")
f.write("getwd() \n")
f.write("records <- read.csv('" + openVAReadyFile + "') \n")
# InSilicoVA
if pipelineAlgorithm == "InSilicoVA":
f.write("names(data) <- tolower(data) \n")
f.write("data <- map_records_insilicova(records) \n")
## assign ID from survey (odkID) if specified, otherwise use uuid from ODK Aggregate
if odkID == None:
f.write("data$ID <- records$meta.instanceID \n")
else:
f.write("data$ID <- records$" + odkID + "\n")
f.write("results <- insilico(data=data, " + ", \n")
f.write("\t isNumeric=" + insilico_isNumeric + ", \n")
f.write("\t updateCondProb=" + insilico_updateCondProb + ", \n")
f.write("\t keepProbbase.level=" + insilico_keepProbbase_level + ", \n")
f.write("\t CondProb=" + insilico_CondProb + ", \n")
f.write("\t CondProbNum=" + insilico_CondProbNum + ", \n")
f.write("\t datacheck=" + insilico_datacheck + ", \n")
f.write("\t datacheck.missing=" + insilico_datacheck_missing + ", \n")
f.write("\t warning.write=" + insilico_warning_write + ", \n")
f.write("\t external.sep=" + insilico_external_sep + ", \n")
f.write("\t Nsim=" + insilico_Nsim + ", \n")
f.write("\t thin=" + insilico_thin + ", \n")
f.write("\t burnin=" + insilico_burnin + ", \n")
f.write("\t auto.length=" + insilico_auto_length + ", \n")
f.write("\t conv.csmf=" + insilico_conv_csmf + ", \n")
f.write("\t jump.scale=" + insilico_jump_scale + ", \n")
f.write("\t levels.prior=" + insilico_levels_prior + ", \n")
f.write("\t levels.strength=" + insilico_levels_strength + ", \n")
f.write("\t trunc.min=" + insilico_trunc_min + ", \n")
f.write("\t trunc.max=" + insilico_trunc_max + ", \n")
f.write("\t subpop=" + insilico_subpop + ", \n")
f.write("\t java.option=" + insilico_java_option + ", \n")
f.write("\t seed=" + insilico_seed + ", \n")
f.write("\t phy.code=" + insilico_phy_code + ", \n")
f.write("\t phy.cat=" + insilico_phy_cat + ", \n")
f.write("\t phy.unknown=" + insilico_phy_unknown + ", \n")
f.write("\t phy.external=" + insilico_phy_external + ", \n")
f.write("\t phy.debias=" + insilico_phy_debias + ", \n")
f.write("\t exclude.impossible.cause=" + insilico_exclude_impossible_cause + ", \n")
f.write("\t indiv.CI=" + insilico_indiv_CI + ") \n")
f.write("sex <- ifelse(tolower(data$male)=='y', 'Male', 'Female') \n")
# InterVA
if pipelineAlgorithm == "InterVA":
f.write("data <- map_records_interva4(records) \n")
## assign ID from survey (odkID) if specified, otherwise use uuid from ODK Aggregate
if odkID == None:
f.write("data$ID <- records$meta.instanceID \n")
else:
f.write("data$ID <- records$" + odkID + "\n")
f.write("results <- InterVA(Input=data, \n")
f.write("\t HIV= '" + interVA_HIV + "', \n")
f.write("\t Malaria = '" + interVA_Malaria + "', \n")
f.write("\t output='" + interVA_output + "', \n")
f.write("\t groupcode=" + interVA_groupcode + ", \n")
f.write("\t replicate=" + interVA_replicate + ", \n")
f.write("\t replicate.bug1=" + interVA_replicate_bug1 + ", \n")
f.write("\t replicate.bug2=" + interVA_replicate_bug2 + ", \n")
f.write("\t write=FALSE) \n")
f.write("sex <- ifelse(tolower(data$MALE)=='y', 'Male', 'Female') \n")
# write results
f.write("cod <- getTopCOD(results) \n")
f.write("hasCOD <- as.character(data$ID) %in% as.character(levels(cod$ID)) \n")
f.write("dob <- as.Date(as.character(records$consented.deceased_CRVS.info_on_deceased.Id10021), '%b %d, %Y') \n") ## HERE -- not sure if date format will vary!
f.write("dod <- as.Date(as.character(records$consented.deceased_CRVS.info_on_deceased.Id10023), '%b %d, %Y') \n") ## HERE -- not sure if date format will vary!
f.write("age <- floor(records$consented.deceased_CRVS.info_on_deceased.ageInDays/365.25) \n")
f.write("## create matrices for DHIS2 blob (data2) and transfer database (data3) \n")
f.write("## first column must be ID \n")
f.write("metadataCode <- '" + algorithmMetadataCode + "'\n")
f.write("cod2 <- rep('MISSING', nrow(data)); cod2[hasCOD] <- as.character(cod[,2]) \n")
f.write("data2 <- cbind(data[,-1], cod2, metadataCode) \n")
f.write("names(data2) <- c(names(data[,-1]), 'Cause of Death', 'Metadata') \n")
f.write("evaBlob <- cbind(rep(as.character(data[,1]), each=ncol(data2)), rep(names(data2)), c(apply(data2, 1, c))) \n")
f.write("colnames(evaBlob) <- c('ID', 'Attribute', 'Value') \n")
f.write("write.csv(evaBlob, file='" + openVAFilesDir + "/entityAttributeValue.csv', row.names=FALSE, na='') \n\n")
f.write("data3 <- cbind(as.character(data[,1]), sex, dob, dod, age, cod2, metadataCode, data[,-1]) \n")
f.write("names(data3) <- c('id', 'sex', 'dob', 'dod', 'age', 'cod', 'metadataCode', names(data[,-1])) \n")
f.write("write.csv(data3, file='" + openVAFilesDir + "/recordStorage.csv', row.names=FALSE, na='') \n")
except OSError as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Could not create R Script File","Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Error: Could not create R Script File"]
cleanup(errorMsg)
# run R script
rBatch = "R CMD BATCH --vanilla " + rScriptIn + " " + rScriptOut
rprocess = subprocess.Popen(rBatch, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = rprocess.communicate()
rrc = rprocess.returncode
if rrc != 0:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Could not run R Script", str(stderr), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error: Could not run R Script", str(stderr)]
cleanup(errorMsg)
else:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("OpenVA Analysis Completed Successfully", "Information", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "OpenVA Analysis Completed Successfully (error committing message to database)."]
cleanup(errorMsg)
# push results to DHIS2
try:
api = Dhis(dhisURL, dhisUser, dhisPass)
except (requests.RequestException) as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Unable to connect to DHIS2", str(e), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Error: Unable to connect to DHIS2"]
cleanup(errorMsg)
# verify VA program and orgUnit
try:
vaPrograms = api.get("programs", params={"filter": "name:like:Verbal Autopsy"}).get("programs")
orgUnitValid = len(api.get("organisationUnits", params={"filter": "id:eq:{}".format(dhisOrgUnit)})["organisationUnits"])==1
if not orgUnitValid:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Organisation Unit UID could not be found.", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error: Organisation Unit UID could not be found.", "Error committing message to database"]
cleanup(errorMsg)
if not vaPrograms:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("'Verbal Autopsy' program not found", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error: 'Verbal Autopsy' program not found.", "Error committing message to database"]
cleanup(errorMsg)
elif len(vaPrograms) > 1:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("More than one 'Verbal Autopsy' found.", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error: More than one 'Verbal Autopsy' found.", "Error committing message to database"]
cleanup(errorMsg)
except:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Error using Dhis.get, unable to either get UID for VA Program or verify Org Unit ID", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error: Error using Dhis.get, unable to either get UID for VA Program or verify Org Unit ID",
"Error committing message to database"]
cleanup(errorMsg)
vaProgramUID = vaPrograms[0]["id"]
blobPath = os.path.join(dhisDir, "blobs")
try:
if not os.path.isdir(blobPath):
os.makedirs(blobPath)
except OSError as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Unable to create folder for DHIS blobs.", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Error: Unable to create folder for DHIS blobs."]
cleanup(errorMsg)
events = []
export = {}
## read in VA data (with COD and algorithm metadata) from csv's (and create groups by ID for Entity-Attribute-Value file)
try:
## WARNING: The following CSV file contains sensitive VA information (leaving it in folder)!
dfDHIS2 = pd.read_csv(openVAFilesDir + "/entityAttributeValue.csv")
except:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Unable to access file: " + openVAFilesDir + "entityAttributeVAlue.csv", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Unable to access file: " + openVAFilesDir + "entityAttributeVAlue.csv",
"Error committing message to database"]
cleanup(errorMsg)
grouped = dfDHIS2.groupby(["ID"])
## prepare events for DHIS2 export
try:
with open(openVAFilesDir + "/recordStorage.csv", "r", newline="") as csvIn:
with open(openVAFilesDir + "/newStorage.csv", "w", newline="") as csvOut:
reader = csv.reader(csvIn)
writer = csv.writer(csvOut, lineterminator="\n")
header = next(reader)
header.extend(["dhisVerbalAutopsyID", "pipelineOutcome"])
writer.writerow(header)
for row in reader:
if row[5]!="MISSING":
vaID = str(row[0])
blobFile = "{}.db".format(os.path.join(dhisDir, "blobs", vaID))
blobRecord = grouped.get_group(str(row[0]))
blobEVA = blobRecord.values.tolist()
## create DHIS2 blob
try:
create_db(blobFile, blobEVA)
except:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Unable to create DHIS2 BLOB", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Unable to create DHIS2 BLOB", "Error committing message to database"]
cleanup(errorMsg)
## post DHIS2 blob
try:
fileID = api.post_blob(blobFile)
except requests.RequestException as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Unable to post BLOB to DHIS2", str(e), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Unable to post BLOB to DHIS2"]
cleanup(errorMsg)
sex = row[1].lower()
dob = row[2]
if row[3] =="":
eventDate = datetime.date(9999,9,9)
else:
dod = datetime.datetime.strptime(row[3], "%Y-%m-%d")
eventDate = datetime.date(dod.year, dod.month, dod.day)
age = row[4]
if row[5] == "Undetermined":
codCode = "99"
else:
codCode = getCODCode(dhisCODCodes, row[5])
e = VerbalAutopsyEvent(vaID, vaProgramUID, dhisOrgUnit,
eventDate, sex, dob, age, codCode, algorithmMetadataCode, fileID)
events.append(e.format_to_dhis2(dhisUser))
row.extend([vaID, "Pushing to DHIS2"])
writer.writerow(row)
else:
row.extend(["", "No CoD Assigned"])
writer.writerow(row)
except:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Unable to access one of record/newStorage.csv files in folder: " + openVAFilesDir, "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Unable to access one of record/newStorage.csv files in folder: " + openVAFilesDir,
"Error committing message to database"]
cleanup(errorMsg)
export["events"] = events
try:
log = api.post("events", data=export)
except requests.RequestException as e:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Unable to post events to DHIS2 VA Program.", str(e), timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Unable to post events to DHIS2 VA Program."]
cleanup(errorMsg)
if 'importSummaries' not in log['response'].keys():
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Failed to retrieve summary from post to DHIS2 VA Program.", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error", "Failed to retrieve summary from post to DHIS2 VA Program."]
cleanup(errorMsg)
if log["httpStatusCode"] == 200:
nPosted = len(log['response']['importSummaries'])
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Successfully posted {} events to DHIS2 VA Program.".format(nPosted), "Information", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Successfully posted {} events to DHIS2 VA Program, but error writing to DB".format(nPosted)]
cleanup(errorMsg)
vaReferences = list(findKeyValue("reference", d=log["response"]))
dfNewStorage = pd.read_csv(openVAFilesDir + "/newStorage.csv")
try:
for vaReference in vaReferences:
postedDataValues = api.get("events/{}".format(vaReference)).get("dataValues")
postedVAIDIndex = next((index for (index, d) in enumerate(postedDataValues) if d["dataElement"]=="htm6PixLJNy"), None)
postedVAID = postedDataValues[postedVAIDIndex]["value"]
rowVAID = dfNewStorage["dhisVerbalAutopsyID"] == postedVAID
dfNewStorage.loc[rowVAID,"pipelineOutcome"] = "Pushed to DHIS2"
except:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Error trying to verify events posted to DHIS2", "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error trying to verify events posted to DHIS2", ""]
cleanup(errorMsg)
# store results in database
try:
for row in dfNewStorage.itertuples():
xferDBID = row[1]
xferDBOutcome = row[254]
vaData = row[1],row[8:253]
vaDataFlat = tuple([y for x in vaData for y in (x if isinstance(x, tuple) else (x,))])
xferDBRecord = pickle.dumps(vaDataFlat)
sqlXferDB = "INSERT INTO VA_Storage (id, outcome, record, dateEntered) VALUES (?,?,?,?)"
par = [xferDBID, xferDBOutcome, sqlite3.Binary(xferDBRecord), timeFMT]
cursor.execute(sqlXferDB, par)
db.commit()
## note: to read back in: (1) cursor.exetute(SQL SELECT STATEMENT) (2) results = pickle.loads(sqlResult[0])
## An alternative version of storing VA records to SQLite DB(not relying on pickle)
# for row in dfNewStorage.itertuples():
# xferDBID = row[1]
# xferDBOutcome = row[254]
# with open("xferDBRecord.txt", "w", newline="") as f:
# vaData = row[1],row[8:253]
# vaDataFlat = tuple([y for x in vaData for y in (x if isinstance(x, tuple) else (x,))])
# writer = csv.writer(f, lineterminator="\n")
# writer.writerow(vaDataFlat)
# with open("xferDBRecord.txt", "rb") as f:
# xferDBRecord = f.read()
# sqlXferDB = "INSERT INTO VA_Storage (id, outcome, record, dateEntered) VALUES (?,?,?,?)"
# par = [xferDBID, xferDBOutcome, sqlite3.Binary(xferDBRecord), timeFMT]
# cursor.execute(sqlXferDB, par)
# db.commit()
except:
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Error storing Blobs to {}.db".format(dbName), "Error", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, "Error storing Blobs to {}.db".format(dbName), ""]
cleanup(errorMsg)
try:
nNewStorage = dfNewStorage.shape[0]
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Stored {} records to {}.db".format(nNewStorage, dbName), "Information", timeFMT)
cursor.execute(sql, par)
db.commit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e),
"Stored {} records to {}.db, but error trying to log message to EventLog".format(nNewStorage, dbName)]
cleanup(errorMsg)
# all done!
try:
sql = "INSERT INTO EventLog (eventDesc, eventType, eventTime) VALUES (?, ?, ?)"
par = ("Successful completion of Pipeline", "Information", str(datetime.datetime.now()))
cursor.execute(sql, par)
db.commit()
cursor.close()
db.close()
sys.exit()
except (sqlcipher.Error, sqlcipher.Warning, sqlcipher.DatabaseError) as e:
db.rollback()
errorMsg = [timeFMT, str(e), "Finished executing Pipeline steps, but error trying to log last message."]
cleanup(errorMsg)
| gpl-3.0 |
gfyoung/pandas | pandas/tests/frame/indexing/test_getitem.py | 2 | 5364 | import numpy as np
import pytest
from pandas import (
Categorical,
CategoricalDtype,
CategoricalIndex,
DataFrame,
MultiIndex,
Series,
Timestamp,
get_dummies,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
class TestGetitem:
def test_getitem_unused_level_raises(self):
# GH#20410
mi = MultiIndex(
levels=[["a_lot", "onlyone", "notevenone"], [1970, ""]],
codes=[[1, 0], [1, 0]],
)
df = DataFrame(-1, index=range(3), columns=mi)
with pytest.raises(KeyError, match="notevenone"):
df["notevenone"]
def test_getitem_periodindex(self):
rng = period_range("1/1/2000", periods=5)
df = DataFrame(np.random.randn(10, 5), columns=rng)
ts = df[rng[0]]
tm.assert_series_equal(ts, df.iloc[:, 0])
# GH#1211; smoketest unrelated to the rest of this test
repr(df)
ts = df["1/1/2000"]
tm.assert_series_equal(ts, df.iloc[:, 0])
def test_getitem_list_of_labels_categoricalindex_cols(self):
# GH#16115
cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")])
expected = DataFrame(
[[1, 0], [0, 1]], dtype="uint8", index=[0, 1], columns=cats
)
dummies = get_dummies(cats)
result = dummies[list(dummies.columns)]
tm.assert_frame_equal(result, expected)
def test_getitem_sparse_column_return_type_and_dtype(self):
# https://github.com/pandas-dev/pandas/issues/23559
data = SparseArray([0, 1])
df = DataFrame({"A": data})
expected = Series(data, name="A")
result = df["A"]
tm.assert_series_equal(result, expected)
# Also check iloc and loc while we're here
result = df.iloc[:, 0]
tm.assert_series_equal(result, expected)
result = df.loc[:, "A"]
tm.assert_series_equal(result, expected)
class TestGetitemListLike:
def test_getitem_list_missing_key(self):
# GH#13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({"x": [1.0], "y": [2.0], "z": [3.0]})
df.columns = ["x", "x", "z"]
# Check that we get the correct value in the KeyError
with pytest.raises(KeyError, match=r"\['y'\] not in index"):
df[["x", "y", "z"]]
class TestGetitemCallable:
def test_getitem_callable(self, float_frame):
# GH#12533
result = float_frame[lambda x: "A"]
expected = float_frame.loc[:, "A"]
tm.assert_series_equal(result, expected)
result = float_frame[lambda x: ["A", "B"]]
expected = float_frame.loc[:, ["A", "B"]]
tm.assert_frame_equal(result, float_frame.loc[:, ["A", "B"]])
df = float_frame[:3]
result = df[lambda x: [True, False, True]]
expected = float_frame.iloc[[0, 2], :]
tm.assert_frame_equal(result, expected)
def test_loc_multiindex_columns_one_level(self):
# GH#29749
df = DataFrame([[1, 2]], columns=[["a", "b"]])
expected = DataFrame([1], columns=[["a"]])
result = df["a"]
tm.assert_frame_equal(result, expected)
result = df.loc[:, "a"]
tm.assert_frame_equal(result, expected)
class TestGetitemBooleanMask:
def test_getitem_bool_mask_categorical_index(self):
df3 = DataFrame(
{
"A": np.arange(6, dtype="int64"),
},
index=CategoricalIndex(
[1, 1, 2, 1, 3, 2],
dtype=CategoricalDtype([3, 2, 1], ordered=True),
name="B",
),
)
df4 = DataFrame(
{
"A": np.arange(6, dtype="int64"),
},
index=CategoricalIndex(
[1, 1, 2, 1, 3, 2],
dtype=CategoricalDtype([3, 2, 1], ordered=False),
name="B",
),
)
result = df3[df3.index == "a"]
expected = df3.iloc[[]]
tm.assert_frame_equal(result, expected)
result = df4[df4.index == "a"]
expected = df4.iloc[[]]
tm.assert_frame_equal(result, expected)
result = df3[df3.index == 1]
expected = df3.iloc[[0, 1, 3]]
tm.assert_frame_equal(result, expected)
result = df4[df4.index == 1]
expected = df4.iloc[[0, 1, 3]]
tm.assert_frame_equal(result, expected)
# since we have an ordered categorical
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=True,
# name='B')
result = df3[df3.index < 2]
expected = df3.iloc[[4]]
tm.assert_frame_equal(result, expected)
result = df3[df3.index > 1]
expected = df3.iloc[[]]
tm.assert_frame_equal(result, expected)
# unordered
# cannot be compared
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=False,
# name='B')
msg = "Unordered Categoricals can only compare equality or not"
with pytest.raises(TypeError, match=msg):
df4[df4.index < 2]
with pytest.raises(TypeError, match=msg):
df4[df4.index > 1]
| bsd-3-clause |
koobonil/Boss2D | Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/python/training/server_lib_same_variables_no_clear_test.py | 125 | 2159 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.GrpcServer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class SameVariablesNoClearTest(test.TestCase):
# Verifies behavior of multiple variables with multiple sessions connecting to
# the same server.
# TODO(b/34465411): Starting multiple servers with different configurations
# in the same test is flaky. Move this test case back into
# "server_lib_test.py" when this is no longer the case.
def testSameVariablesNoClear(self):
server = server_lib.Server.create_local_server()
with session.Session(server.target) as sess_1:
v0 = variables.Variable([[2, 1]], name="v0")
v1 = variables.Variable([[1], [2]], name="v1")
v2 = math_ops.matmul(v0, v1)
sess_1.run([v0.initializer, v1.initializer])
self.assertAllEqual([[4]], sess_1.run(v2))
with session.Session(server.target) as sess_2:
new_v0 = ops.get_default_graph().get_tensor_by_name("v0:0")
new_v1 = ops.get_default_graph().get_tensor_by_name("v1:0")
new_v2 = math_ops.matmul(new_v0, new_v1)
self.assertAllEqual([[4]], sess_2.run(new_v2))
if __name__ == "__main__":
test.main()
| mit |
nilsgrabbert/spark | examples/src/main/python/streaming/hdfs_wordcount.py | 85 | 1865 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Counts words in new text files created in the given directory
Usage: hdfs_wordcount.py <directory>
<directory> is the directory that Spark Streaming will use to find and read new text files.
To run this on your local machine on directory `localdir`, run this example
$ bin/spark-submit examples/src/main/python/streaming/hdfs_wordcount.py localdir
Then create a text file in `localdir` and the words in the file will get counted.
"""
from __future__ import print_function
import sys
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: hdfs_wordcount.py <directory>", file=sys.stderr)
exit(-1)
sc = SparkContext(appName="PythonStreamingHDFSWordCount")
ssc = StreamingContext(sc, 1)
lines = ssc.textFileStream(sys.argv[1])
counts = lines.flatMap(lambda line: line.split(" "))\
.map(lambda x: (x, 1))\
.reduceByKey(lambda a, b: a+b)
counts.pprint()
ssc.start()
ssc.awaitTermination()
| apache-2.0 |
ddrmanxbxfr/servo | tests/wpt/css-tests/tools/py/testing/code/test_code.py | 216 | 4212 | import py
import sys
def test_ne():
code1 = py.code.Code(compile('foo = "bar"', '', 'exec'))
assert code1 == code1
code2 = py.code.Code(compile('foo = "baz"', '', 'exec'))
assert code2 != code1
def test_code_gives_back_name_for_not_existing_file():
name = 'abc-123'
co_code = compile("pass\n", name, 'exec')
assert co_code.co_filename == name
code = py.code.Code(co_code)
assert str(code.path) == name
assert code.fullsource is None
def test_code_with_class():
class A:
pass
py.test.raises(TypeError, "py.code.Code(A)")
if True:
def x():
pass
def test_code_fullsource():
code = py.code.Code(x)
full = code.fullsource
assert 'test_code_fullsource()' in str(full)
def test_code_source():
code = py.code.Code(x)
src = code.source()
expected = """def x():
pass"""
assert str(src) == expected
def test_frame_getsourcelineno_myself():
def func():
return sys._getframe(0)
f = func()
f = py.code.Frame(f)
source, lineno = f.code.fullsource, f.lineno
assert source[lineno].startswith(" return sys._getframe(0)")
def test_getstatement_empty_fullsource():
def func():
return sys._getframe(0)
f = func()
f = py.code.Frame(f)
prop = f.code.__class__.fullsource
try:
f.code.__class__.fullsource = None
assert f.statement == py.code.Source("")
finally:
f.code.__class__.fullsource = prop
def test_code_from_func():
co = py.code.Code(test_frame_getsourcelineno_myself)
assert co.firstlineno
assert co.path
def test_builtin_patch_unpatch(monkeypatch):
cpy_builtin = py.builtin.builtins
comp = cpy_builtin.compile
def mycompile(*args, **kwargs):
return comp(*args, **kwargs)
class Sub(AssertionError):
pass
monkeypatch.setattr(cpy_builtin, 'AssertionError', Sub)
monkeypatch.setattr(cpy_builtin, 'compile', mycompile)
py.code.patch_builtins()
assert cpy_builtin.AssertionError != Sub
assert cpy_builtin.compile != mycompile
py.code.unpatch_builtins()
assert cpy_builtin.AssertionError is Sub
assert cpy_builtin.compile == mycompile
def test_unicode_handling():
value = py.builtin._totext('\xc4\x85\xc4\x87\n', 'utf-8').encode('utf8')
def f():
raise Exception(value)
excinfo = py.test.raises(Exception, f)
s = str(excinfo)
if sys.version_info[0] < 3:
u = unicode(excinfo)
def test_code_getargs():
def f1(x):
pass
c1 = py.code.Code(f1)
assert c1.getargs(var=True) == ('x',)
def f2(x, *y):
pass
c2 = py.code.Code(f2)
assert c2.getargs(var=True) == ('x', 'y')
def f3(x, **z):
pass
c3 = py.code.Code(f3)
assert c3.getargs(var=True) == ('x', 'z')
def f4(x, *y, **z):
pass
c4 = py.code.Code(f4)
assert c4.getargs(var=True) == ('x', 'y', 'z')
def test_frame_getargs():
def f1(x):
return sys._getframe(0)
fr1 = py.code.Frame(f1('a'))
assert fr1.getargs(var=True) == [('x', 'a')]
def f2(x, *y):
return sys._getframe(0)
fr2 = py.code.Frame(f2('a', 'b', 'c'))
assert fr2.getargs(var=True) == [('x', 'a'), ('y', ('b', 'c'))]
def f3(x, **z):
return sys._getframe(0)
fr3 = py.code.Frame(f3('a', b='c'))
assert fr3.getargs(var=True) == [('x', 'a'), ('z', {'b': 'c'})]
def f4(x, *y, **z):
return sys._getframe(0)
fr4 = py.code.Frame(f4('a', 'b', c='d'))
assert fr4.getargs(var=True) == [('x', 'a'), ('y', ('b',)),
('z', {'c': 'd'})]
class TestExceptionInfo:
def test_bad_getsource(self):
try:
if False: pass
else: assert False
except AssertionError:
exci = py.code.ExceptionInfo()
assert exci.getrepr()
class TestTracebackEntry:
def test_getsource(self):
try:
if False: pass
else: assert False
except AssertionError:
exci = py.code.ExceptionInfo()
entry = exci.traceback[0]
source = entry.getsource()
assert len(source) == 4
assert 'else: assert False' in source[3]
| mpl-2.0 |
dralves/nixysa | third_party/ply-3.1/test/lex_hedit.py | 174 | 1141 | # -----------------------------------------------------------------------------
# hedit.py
#
# Paring of Fortran H Edit descriptions (Contributed by Pearu Peterson)
#
# These tokens can't be easily tokenized because they are of the following
# form:
#
# nHc1...cn
#
# where n is a positive integer and c1 ... cn are characters.
#
# This example shows how to modify the state of the lexer to parse
# such tokens
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.lex as lex
tokens = (
'H_EDIT_DESCRIPTOR',
)
# Tokens
t_ignore = " \t\n"
def t_H_EDIT_DESCRIPTOR(t):
r"\d+H.*" # This grabs all of the remaining text
i = t.value.index('H')
n = eval(t.value[:i])
# Adjust the tokenizing position
t.lexer.lexpos -= len(t.value) - (i+1+n)
t.value = t.value[i+1:i+1+n]
return t
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Build the lexer
lex.lex()
lex.runmain(data="3Habc 10Habcdefghij 2Hxy")
| apache-2.0 |
franck-talbart/codelet_tuning_infrastructure | cts/plugins/plugin_config.py | 1 | 12594 | #!/usr/bin/python
#************************************************************************
# Codelet Tuning Infrastructure
# Copyright (C) 2010-2015 Intel Corporation, CEA, GENCI, and UVSQ
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#************************************************************************
# Authors: Franck Talbart, Mathieu Bordet, Nicolas Petit
config =\
{
"Application":
{
"actions":
{
"application_create_compile_entry":
{
"name": "Compile",
"url": "?page=repositories_summary&main=action_cts_input_form&mode=add"
}
},
},
"Binary":
{
"actions":
{
"binary_profile_maqao_perf":
{
"name": "Profile with MAQAO perf",
"url": "?page=repositories_summary&main=action_cts_input_form&mode=add"
},
"binary_profile_icc":
{
"name": "Profile with ICC",
"url": "?page=repositories_summary&main=action_cts_input_form&mode=add"
},
"binary_profile_vtune":
{
"name": "Profile with Vtune",
"url": "?page=repositories_summary&main=action_cts_input_form&mode=add"
},
"binary_create_decan_entry":
{
"name": "DECAN",
"url": "?page=repositories_summary&main=action_cts_input_form&mode=add"
}
}
},
"Compile":
{
"daemon_commands": ["run"],
"daemon_produced_by":
{
"run": "entry",
"init": "application"
},
"actions":
{
"compile_run":
{
"name": "Run Compilation",
"url": "?page=repositories_summary&main=action_cts_input_form&mode=add"
},
}
},
"Daemon":
{
"daemon_produced_by":
{
"init": "produced_by"
},
"actions":
{
"kill":
{
"name": "Kill",
"url": "?page=repositories_summary&main=action_run"
},
"run":
{
"name": "Run",
"url": "?page=repositories_summary&main=action_run"
},
}
},
"Decan":
{
"daemon_commands": ["run"],
"daemon_produced_by":
{
"run": "entry",
"init": "original_binary"
},
"actions":
{
"run":
{
"name": "Run DECAN",
"url": "?page=repositories_summary&main=action_run"
},
}
},
"IccProfiler":
{
"daemon_commands": ["run"],
"daemon_produced_by":
{
"init": "binary",
"run": "entry"
},
"actions":
{
"icc_profiler_run":
{
"name": "Run the Icc profiler",
"url": "?page=repositories_summary&main=action_cts_input_form&mode=add"
},
}
},
"LoopGroup":
{
"actions":
{
"loop_group_create_maqao_cqa_entry":
{
"name": "MAQAO CQA",
"url": "?page=repositories_summary&main=action_cts_input_form&mode=add"
}
}
},
"MaqaoCqa":
{
"daemon_commands": ["run"],
"daemon_produced_by":
{
"run": "entry",
"init": "loop_group"
},
"actions":
{
"maqao_cqa_run":
{
"name": "Run MAQAO CQA",
"url": "?page=repositories_summary&main=action_cts_input_form&mode=add"
},
}
},
"MaqaoPerf":
{
"daemon_commands": ["run"],
"daemon_produced_by":
{
"run": "entry",
"init": "binary"
},
"actions":
{
"maqao_perf_run":
{
"name": "Run MAQAO Perf",
"url": "?page=repositories_summary&main=action_cts_input_form&mode=add"
},
}
},
"Process":
{
"daemon_commands": [
"run_command",
"run"
]
},
"Rm":
{
"daemon_commands": ["repository"]
},
"Vtune":
{
"daemon_commands": ["run"],
"daemon_produced_by":
{
"run": "entry",
"init": "binary",
},
"actions":
{
"vtune_run":
{
"name": "Run Vtune",
"url": "?page=repositories_summary&main=action_cts_input_form&mode=add"
},
}
}
}
| gpl-3.0 |
abhikeshav/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ip_pfilter_oper.py | 1 | 21114 | """ Cisco_IOS_XR_ip_pfilter_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR ip\-pfilter package operational data.
This module contains definitions
for the following management objects\:
pfilter\-ma\: Root class of PfilterMa Oper schema
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class PfilterMa(object):
"""
Root class of PfilterMa Oper schema
.. attribute:: nodes
Node\-specific operational data
**type**\: :py:class:`Nodes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_pfilter_oper.PfilterMa.Nodes>`
"""
_prefix = 'ip-pfilter-oper'
_revision = '2015-11-09'
def __init__(self):
self.nodes = PfilterMa.Nodes()
self.nodes.parent = self
class Nodes(object):
"""
Node\-specific operational data
.. attribute:: node
PfilterMa operational data for a particular node
**type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_pfilter_oper.PfilterMa.Nodes.Node>`
"""
_prefix = 'ip-pfilter-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.node = YList()
self.node.parent = self
self.node.name = 'node'
class Node(object):
"""
PfilterMa operational data for a particular
node
.. attribute:: node_name <key>
The node
**type**\: str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
.. attribute:: process
Operational data for pfilter
**type**\: :py:class:`Process <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_pfilter_oper.PfilterMa.Nodes.Node.Process>`
"""
_prefix = 'ip-pfilter-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.node_name = None
self.process = PfilterMa.Nodes.Node.Process()
self.process.parent = self
class Process(object):
"""
Operational data for pfilter
.. attribute:: ipv4
Operational data for pfilter
**type**\: :py:class:`Ipv4 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_pfilter_oper.PfilterMa.Nodes.Node.Process.Ipv4>`
.. attribute:: ipv6
Operational data for pfilter
**type**\: :py:class:`Ipv6 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_pfilter_oper.PfilterMa.Nodes.Node.Process.Ipv6>`
"""
_prefix = 'ip-pfilter-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.ipv4 = PfilterMa.Nodes.Node.Process.Ipv4()
self.ipv4.parent = self
self.ipv6 = PfilterMa.Nodes.Node.Process.Ipv6()
self.ipv6.parent = self
class Ipv6(object):
"""
Operational data for pfilter
.. attribute:: interfaces
Operational data for pfilter
**type**\: :py:class:`Interfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_pfilter_oper.PfilterMa.Nodes.Node.Process.Ipv6.Interfaces>`
"""
_prefix = 'ip-pfilter-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interfaces = PfilterMa.Nodes.Node.Process.Ipv6.Interfaces()
self.interfaces.parent = self
class Interfaces(object):
"""
Operational data for pfilter
.. attribute:: interface
Operational data for pfilter
**type**\: list of :py:class:`Interface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_pfilter_oper.PfilterMa.Nodes.Node.Process.Ipv6.Interfaces.Interface>`
"""
_prefix = 'ip-pfilter-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface = YList()
self.interface.parent = self
self.interface.name = 'interface'
class Interface(object):
"""
Operational data for pfilter
.. attribute:: interface_name <key>
Name of the interface
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: acl_information
Interface ACL Details
**type**\: str
**mandatory**\: True
"""
_prefix = 'ip-pfilter-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
self.acl_information = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-ip-pfilter-oper:interface[Cisco-IOS-XR-ip-pfilter-oper:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
if self.acl_information is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_pfilter_oper as meta
return meta._meta_table['PfilterMa.Nodes.Node.Process.Ipv6.Interfaces.Interface']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ip-pfilter-oper:interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface is not None:
for child_ref in self.interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_pfilter_oper as meta
return meta._meta_table['PfilterMa.Nodes.Node.Process.Ipv6.Interfaces']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ip-pfilter-oper:ipv6'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interfaces is not None and self.interfaces._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_pfilter_oper as meta
return meta._meta_table['PfilterMa.Nodes.Node.Process.Ipv6']['meta_info']
class Ipv4(object):
"""
Operational data for pfilter
.. attribute:: interfaces
Operational data for pfilter
**type**\: :py:class:`Interfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_pfilter_oper.PfilterMa.Nodes.Node.Process.Ipv4.Interfaces>`
"""
_prefix = 'ip-pfilter-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interfaces = PfilterMa.Nodes.Node.Process.Ipv4.Interfaces()
self.interfaces.parent = self
class Interfaces(object):
"""
Operational data for pfilter
.. attribute:: interface
Operational data for pfilter
**type**\: list of :py:class:`Interface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ip_pfilter_oper.PfilterMa.Nodes.Node.Process.Ipv4.Interfaces.Interface>`
"""
_prefix = 'ip-pfilter-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface = YList()
self.interface.parent = self
self.interface.name = 'interface'
class Interface(object):
"""
Operational data for pfilter
.. attribute:: interface_name <key>
Name of the interface
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: acl_information
Interface ACL Details
**type**\: str
**mandatory**\: True
"""
_prefix = 'ip-pfilter-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
self.acl_information = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-ip-pfilter-oper:interface[Cisco-IOS-XR-ip-pfilter-oper:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
if self.acl_information is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_pfilter_oper as meta
return meta._meta_table['PfilterMa.Nodes.Node.Process.Ipv4.Interfaces.Interface']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ip-pfilter-oper:interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interface is not None:
for child_ref in self.interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_pfilter_oper as meta
return meta._meta_table['PfilterMa.Nodes.Node.Process.Ipv4.Interfaces']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ip-pfilter-oper:ipv4'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.interfaces is not None and self.interfaces._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_pfilter_oper as meta
return meta._meta_table['PfilterMa.Nodes.Node.Process.Ipv4']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ip-pfilter-oper:process'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.ipv4 is not None and self.ipv4._has_data():
return True
if self.ipv6 is not None and self.ipv6._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_pfilter_oper as meta
return meta._meta_table['PfilterMa.Nodes.Node.Process']['meta_info']
@property
def _common_path(self):
if self.node_name is None:
raise YPYModelError('Key property node_name is None')
return '/Cisco-IOS-XR-ip-pfilter-oper:pfilter-ma/Cisco-IOS-XR-ip-pfilter-oper:nodes/Cisco-IOS-XR-ip-pfilter-oper:node[Cisco-IOS-XR-ip-pfilter-oper:node-name = ' + str(self.node_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.node_name is not None:
return True
if self.process is not None and self.process._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_pfilter_oper as meta
return meta._meta_table['PfilterMa.Nodes.Node']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ip-pfilter-oper:pfilter-ma/Cisco-IOS-XR-ip-pfilter-oper:nodes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.node is not None:
for child_ref in self.node:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_pfilter_oper as meta
return meta._meta_table['PfilterMa.Nodes']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ip-pfilter-oper:pfilter-ma'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.nodes is not None and self.nodes._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ip_pfilter_oper as meta
return meta._meta_table['PfilterMa']['meta_info']
| apache-2.0 |
abligh/xen-4.2-live-migrate | tools/python/xen/xend/xenstore/tests/stress_xs.py | 49 | 2700 | # This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Copyright (c) 2005 XenSource Ltd
import random
import sys
import threading
import time
import xen.lowlevel.xs
from xen.xend.xenstore import xsutil
from xen.xend.xenstore.xstransact import xstransact
from xen.xend.xenstore.xswatch import xswatch
PATH = '/tool/stress_xs'
def stress():
xstransact.Remove(PATH)
xstransact.Mkdir(PATH)
xswatch(PATH, watch_callback)
def do(f):
t = threading.Thread(target=stress_write)
t.setDaemon(True)
t.start()
do(stress_write)
do(stress_get_domain_path)
do(stress_get_domain_path_xsutil)
do(stress_open_close)
while True:
# Wait for Ctrl-C.
time.sleep(100000000)
def stress_write():
xstransact.Write(PATH, 'key', '1')
while True:
val = xstransact.Gather(PATH, ('key', int))
xstransact.Store(PATH, ('key', val + 1))
random_sleep()
def stress_get_domain_path():
xs_handle = xen.lowlevel.xs.xs()
domid = 0
while True:
xs_handle.get_domain_path(domid)
domid += 1
random_sleep()
def stress_get_domain_path_xsutil():
domid = 0
while True:
xsutil.GetDomainPath(domid)
domid += 1
random_sleep()
def stress_open_close():
while True:
xs_handle = xen.lowlevel.xs.xs()
try:
try:
trans = xs_handle.transaction_start()
val = int(xs_handle.read(trans, PATH + '/key'))
xs_handle.write(trans, PATH + '/key', str(val + 1))
xs_handle.transaction_end(trans, False)
except:
xs_handle.transaction_end(trans, True)
random_sleep()
finally:
del xs_handle
def watch_callback(path):
random_sleep()
return True
def random_sleep():
d = random.randint(-50000, 500)
if d > 0:
time.sleep(d / 1000.0)
def main(argv = None):
if argv is None:
argv = sys.argv
stress()
return 0
if __name__ == "__main__":
sys.exit(main())
| gpl-2.0 |
MLnick/spark | examples/src/main/python/mllib/logistic_regression_with_lbfgs_example.py | 106 | 2082 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Logistic Regression With LBFGS Example.
"""
from __future__ import print_function
from pyspark import SparkContext
# $example on$
from pyspark.mllib.classification import LogisticRegressionWithLBFGS, LogisticRegressionModel
from pyspark.mllib.regression import LabeledPoint
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonLogisticRegressionWithLBFGSExample")
# $example on$
# Load and parse the data
def parsePoint(line):
values = [float(x) for x in line.split(' ')]
return LabeledPoint(values[0], values[1:])
data = sc.textFile("data/mllib/sample_svm_data.txt")
parsedData = data.map(parsePoint)
# Build the model
model = LogisticRegressionWithLBFGS.train(parsedData)
# Evaluating the model on training data
labelsAndPreds = parsedData.map(lambda p: (p.label, model.predict(p.features)))
trainErr = labelsAndPreds.filter(lambda lp: lp[0] != lp[1]).count() / float(parsedData.count())
print("Training Error = " + str(trainErr))
# Save and load model
model.save(sc, "target/tmp/pythonLogisticRegressionWithLBFGSModel")
sameModel = LogisticRegressionModel.load(sc,
"target/tmp/pythonLogisticRegressionWithLBFGSModel")
# $example off$
| apache-2.0 |
yaukwankiu/armor | __init__pre_20140313.py | 1 | 2017 | """
I am completely rewriting this framework which was formerly known as weatherPattern. Yau Kwan Kiu, 802 CERB, NTU, 23-1-2013.
== Requirements ==
* python 2.7 or similar (python 2.5 will probably be okay, no python 3 please)
* numpy and scipy
* no sympy needed yet
* no opencv yet
== What's this? ==
ARMOR = Adjustment of Rainfall from MOdels using Radar, from WEather DEcision Technologies Inc, USA, based on the papers of [DuFran et al 2009], which builds on MAPLE (McGill Algorithm for Prediction by Lagrangian Extrapolation) based on [German, Zawadzki and Turner, 2001-2005] - see our 2012 Annual report to the Central Weather Bureau Taiwan for reference and details
This is our integrated ARMOR testing platform written in python. We shall develop and test our algorithms together here.
== Philosophy ==
* python as a glue, C or CUDA as the sword if need be
* keep it simple in design and use
* cross platform - at least running on pc and linux
== Design ==
* data structure:
** new style class (for future flexibility) armor.pattern.DBZ, (and other patterns for the future!), wrapping numpy.ma.MaskArray [ http://docs.scipy.org/doc/numpy/reference/maskedarray.baseclass.html#the-maskedarray-class ], with identifying parameters (name, time, etc), parameters for I/O (path for input/output, params for screen display, etc), as well as simple I/O and standard methods for data processing and analysis
** module package operations acting on armor.Pattern: armor.advection, armor.shiiba, armor.wavelet, armor.hht, armor.kmeans, armor.hmm, armor.morphology
** try to be as python3 friendly as possible
== Roadmap ==
* speeding up with CUDA
* integration with QGIS
* incorporating tools from opencv and sympy, such as SIFT/SURF, symbolic manipulations, etc
You can see the above with
import armor
help(armor)
...............Cheers, YKK 23-1-2013..............
"""
__all__ = ['pattern', 'advection', 'basicio', 'fft', 'hht', 'hmm', 'kmeans', 'morphology', 'shiiba', 'wavelet']
test_attr = 'haha!'
| cc0-1.0 |
boltnev/iktomi | tests/db/sqla/__init__.py | 4 | 2080 | import unittest
from sqlalchemy.exc import UnboundExecutionError
from sqlalchemy.orm import sessionmaker
from sqlalchemy import func
from iktomi.db.sqla import multidb_binds
from . import multidb_models
from .multidb_models import db1, db2
class MultidbTest(unittest.TestCase):
def setUp(self):
binds = multidb_binds({'db1': 'sqlite://', 'db2': 'sqlite://'},
package=multidb_models)
self.db = sessionmaker(binds=binds)()
db1.metadata.create_all(bind=self.db.get_bind(db1.SameName))
db1.metadata.create_all(bind=self.db.get_bind(db2.SameName))
def test_get_bind(self):
with self.assertRaises(UnboundExecutionError):
# Insure it's not bound to single engine
self.db.get_bind()
engine_common1 = self.db.get_bind(db1.SameName)
engine_common2 = self.db.get_bind(db2.SameName)
self.assertIsNot(engine_common1, engine_common2)
engine_different1 = self.db.get_bind(db1.DifferentName1)
self.assertIs(engine_common1, engine_different1)
engine_different2 = self.db.get_bind(db2.DifferentName2)
self.assertIs(engine_common2, engine_different2)
def test_missing_metadata(self):
with self.assertRaises(ImportError):
multidb_binds({'db1': 'sqlite://',
'db2': 'sqlite://',
'db3': 'sqlite://'},
package=multidb_models)
def test_query_class(self):
try:
self.db.query(db1.SameName).all()
except UnboundExecutionError as exc:
self.fail('Unexpected exception: {}'.format(exc))
def test_query_attr(self):
try:
self.db.query(db1.SameName.id).all()
except UnboundExecutionError as exc:
self.fail('Unexpected exception: {}'.format(exc))
def test_query_func(self):
try:
self.db.query(func.max(db1.SameName.id)).all()
except UnboundExecutionError as exc:
self.fail('Unexpected exception: {}'.format(exc))
| mit |
Xowap/ansible | lib/ansible/plugins/callback/oneline.py | 144 | 3487 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
class CallbackModule(CallbackBase):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'oneline'
def _command_generic_msg(self, hostname, result, caption):
stdout = result.get('stdout','').replace('\n', '\\n')
if 'stderr' in result and result['stderr']:
stderr = result.get('stderr','').replace('\n', '\\n')
return "%s | %s | rc=%s | (stdout) %s (stderr) %s" % (hostname, caption, result.get('rc',0), stdout, stderr)
else:
return "%s | %s | rc=%s | (stdout) %s" % (hostname, caption, result.get('rc',0), stdout)
def v2_runner_on_failed(self, result, ignore_errors=False):
if 'exception' in result._result:
if self._display.verbosity < 3:
# extract just the actual error message from the exception text
error = result._result['exception'].strip().split('\n')[-1]
msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
else:
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'].replace('\n','')
if result._task.action in C.MODULE_NO_JSON:
self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'FAILED'), color='red')
else:
self._display.display(msg, color='red')
# finally, remove the exception from the result so it's not shown every time
del result._result['exception']
self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='red')
def v2_runner_on_ok(self, result):
if result._task.action in C.MODULE_NO_JSON:
self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'SUCCESS'), color='green')
else:
self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='green')
def v2_runner_on_unreachable(self, result):
self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color='yellow')
def v2_runner_on_skipped(self, result):
self._display.display("%s | SKIPPED" % (result._host.get_name()), color='cyan')
| gpl-3.0 |
Voluntarynet/BitmessageKit | BitmessageKit/Vendor/static-python/Lib/traceback.py | 67 | 11255 | """Extract, format and print information about Python stack traces."""
import linecache
import sys
import types
__all__ = ['extract_stack', 'extract_tb', 'format_exception',
'format_exception_only', 'format_list', 'format_stack',
'format_tb', 'print_exc', 'format_exc', 'print_exception',
'print_last', 'print_stack', 'print_tb', 'tb_lineno']
def _print(file, str='', terminator='\n'):
file.write(str+terminator)
def print_list(extracted_list, file=None):
"""Print the list of tuples as returned by extract_tb() or
extract_stack() as a formatted stack trace to the given file."""
if file is None:
file = sys.stderr
for filename, lineno, name, line in extracted_list:
_print(file,
' File "%s", line %d, in %s' % (filename,lineno,name))
if line:
_print(file, ' %s' % line.strip())
def format_list(extracted_list):
"""Format a list of traceback entry tuples for printing.
Given a list of tuples as returned by extract_tb() or
extract_stack(), return a list of strings ready for printing.
Each string in the resulting list corresponds to the item with the
same index in the argument list. Each string ends in a newline;
the strings may contain internal newlines as well, for those items
whose source text line is not None.
"""
list = []
for filename, lineno, name, line in extracted_list:
item = ' File "%s", line %d, in %s\n' % (filename,lineno,name)
if line:
item = item + ' %s\n' % line.strip()
list.append(item)
return list
def print_tb(tb, limit=None, file=None):
"""Print up to 'limit' stack trace entries from the traceback 'tb'.
If 'limit' is omitted or None, all entries are printed. If 'file'
is omitted or None, the output goes to sys.stderr; otherwise
'file' should be an open file or file-like object with a write()
method.
"""
if file is None:
file = sys.stderr
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
n = 0
while tb is not None and (limit is None or n < limit):
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
_print(file,
' File "%s", line %d, in %s' % (filename, lineno, name))
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line: _print(file, ' ' + line.strip())
tb = tb.tb_next
n = n+1
def format_tb(tb, limit = None):
"""A shorthand for 'format_list(extract_stack(f, limit))."""
return format_list(extract_tb(tb, limit))
def extract_tb(tb, limit = None):
"""Return list of up to limit pre-processed entries from traceback.
This is useful for alternate formatting of stack traces. If
'limit' is omitted or None, all entries are extracted. A
pre-processed stack trace entry is a quadruple (filename, line
number, function name, text) representing the information that is
usually printed for a stack trace. The text is a string with
leading and trailing whitespace stripped; if the source is not
available it is None.
"""
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
list = []
n = 0
while tb is not None and (limit is None or n < limit):
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line: line = line.strip()
else: line = None
list.append((filename, lineno, name, line))
tb = tb.tb_next
n = n+1
return list
def print_exception(etype, value, tb, limit=None, file=None):
"""Print exception up to 'limit' stack trace entries from 'tb' to 'file'.
This differs from print_tb() in the following ways: (1) if
traceback is not None, it prints a header "Traceback (most recent
call last):"; (2) it prints the exception type and value after the
stack trace; (3) if type is SyntaxError and value has the
appropriate format, it prints the line where the syntax error
occurred with a caret on the next line indicating the approximate
position of the error.
"""
if file is None:
file = sys.stderr
if tb:
_print(file, 'Traceback (most recent call last):')
print_tb(tb, limit, file)
lines = format_exception_only(etype, value)
for line in lines:
_print(file, line, '')
def format_exception(etype, value, tb, limit = None):
"""Format a stack trace and the exception information.
The arguments have the same meaning as the corresponding arguments
to print_exception(). The return value is a list of strings, each
ending in a newline and some containing internal newlines. When
these lines are concatenated and printed, exactly the same text is
printed as does print_exception().
"""
if tb:
list = ['Traceback (most recent call last):\n']
list = list + format_tb(tb, limit)
else:
list = []
list = list + format_exception_only(etype, value)
return list
def format_exception_only(etype, value):
"""Format the exception part of a traceback.
The arguments are the exception type and value such as given by
sys.last_type and sys.last_value. The return value is a list of
strings, each ending in a newline.
Normally, the list contains a single string; however, for
SyntaxError exceptions, it contains several lines that (when
printed) display detailed information about where the syntax
error occurred.
The message indicating which exception occurred is always the last
string in the list.
"""
# An instance should not have a meaningful value parameter, but
# sometimes does, particularly for string exceptions, such as
# >>> raise string1, string2 # deprecated
#
# Clear these out first because issubtype(string1, SyntaxError)
# would raise another exception and mask the original problem.
if (isinstance(etype, BaseException) or
isinstance(etype, types.InstanceType) or
etype is None or type(etype) is str):
return [_format_final_exc_line(etype, value)]
stype = etype.__name__
if not issubclass(etype, SyntaxError):
return [_format_final_exc_line(stype, value)]
# It was a syntax error; show exactly where the problem was found.
lines = []
try:
msg, (filename, lineno, offset, badline) = value.args
except Exception:
pass
else:
filename = filename or "<string>"
lines.append(' File "%s", line %d\n' % (filename, lineno))
if badline is not None:
lines.append(' %s\n' % badline.strip())
if offset is not None:
caretspace = badline.rstrip('\n')[:offset].lstrip()
# non-space whitespace (likes tabs) must be kept for alignment
caretspace = ((c.isspace() and c or ' ') for c in caretspace)
# only three spaces to account for offset1 == pos 0
lines.append(' %s^\n' % ''.join(caretspace))
value = msg
lines.append(_format_final_exc_line(stype, value))
return lines
def _format_final_exc_line(etype, value):
"""Return a list of a single line -- normal case for format_exception_only"""
valuestr = _some_str(value)
if value is None or not valuestr:
line = "%s\n" % etype
else:
line = "%s: %s\n" % (etype, valuestr)
return line
def _some_str(value):
try:
return str(value)
except Exception:
pass
try:
value = unicode(value)
return value.encode("ascii", "backslashreplace")
except Exception:
pass
return '<unprintable %s object>' % type(value).__name__
def print_exc(limit=None, file=None):
"""Shorthand for 'print_exception(sys.exc_type, sys.exc_value, sys.exc_traceback, limit, file)'.
(In fact, it uses sys.exc_info() to retrieve the same information
in a thread-safe way.)"""
if file is None:
file = sys.stderr
try:
etype, value, tb = sys.exc_info()
print_exception(etype, value, tb, limit, file)
finally:
etype = value = tb = None
def format_exc(limit=None):
"""Like print_exc() but return a string."""
try:
etype, value, tb = sys.exc_info()
return ''.join(format_exception(etype, value, tb, limit))
finally:
etype = value = tb = None
def print_last(limit=None, file=None):
"""This is a shorthand for 'print_exception(sys.last_type,
sys.last_value, sys.last_traceback, limit, file)'."""
if not hasattr(sys, "last_type"):
raise ValueError("no last exception")
if file is None:
file = sys.stderr
print_exception(sys.last_type, sys.last_value, sys.last_traceback,
limit, file)
def print_stack(f=None, limit=None, file=None):
"""Print a stack trace from its invocation point.
The optional 'f' argument can be used to specify an alternate
stack frame at which to start. The optional 'limit' and 'file'
arguments have the same meaning as for print_exception().
"""
if f is None:
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
print_list(extract_stack(f, limit), file)
def format_stack(f=None, limit=None):
"""Shorthand for 'format_list(extract_stack(f, limit))'."""
if f is None:
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
return format_list(extract_stack(f, limit))
def extract_stack(f=None, limit = None):
"""Extract the raw traceback from the current stack frame.
The return value has the same format as for extract_tb(). The
optional 'f' and 'limit' arguments have the same meaning as for
print_stack(). Each item in the list is a quadruple (filename,
line number, function name, text), and the entries are in order
from oldest to newest stack frame.
"""
if f is None:
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
list = []
n = 0
while f is not None and (limit is None or n < limit):
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line: line = line.strip()
else: line = None
list.append((filename, lineno, name, line))
f = f.f_back
n = n+1
list.reverse()
return list
def tb_lineno(tb):
"""Calculate correct line number of traceback given in tb.
Obsolete in 2.3.
"""
return tb.tb_lineno
| mit |
deepsrijit1105/edx-platform | openedx/core/djangoapps/credit/admin.py | 27 | 2460 | """
Django admin page for credit eligibility
"""
from ratelimitbackend import admin
from openedx.core.djangoapps.credit.models import (
CreditConfig, CreditCourse, CreditProvider, CreditEligibility, CreditRequest, CreditRequirement,
CreditRequirementStatus
)
class CreditCourseAdmin(admin.ModelAdmin):
"""Admin for credit courses. """
list_display = ('course_key', 'enabled',)
list_filter = ('enabled',)
search_fields = ('course_key',)
class Meta(object):
model = CreditCourse
class CreditProviderAdmin(admin.ModelAdmin):
"""Admin for credit providers. """
list_display = ('provider_id', 'display_name', 'active',)
list_filter = ('active',)
search_fields = ('provider_id', 'display_name')
class Meta(object):
model = CreditProvider
class CreditEligibilityAdmin(admin.ModelAdmin):
"""Admin for credit eligibility. """
list_display = ('course', 'username', 'deadline')
search_fields = ('username', 'course__course_key')
class Meta(object):
model = CreditEligibility
class CreditRequestAdmin(admin.ModelAdmin):
"""Admin for credit requests. """
list_display = ('provider', 'course', 'status', 'username')
list_filter = ('provider', 'status',)
readonly_fields = ('uuid',)
search_fields = ('uuid', 'username', 'course__course_key', 'provider__provider_id')
class Meta(object):
model = CreditRequest
class CreditRequirementAdmin(admin.ModelAdmin):
""" Admin for CreditRequirement. """
list_display = ('course', 'namespace', 'name', 'display_name', 'active',)
list_filter = ('active', 'namespace',)
search_fields = ('course__course_key', 'namespace', 'name',)
class Meta(object):
model = CreditRequirement
class CreditRequirementStatusAdmin(admin.ModelAdmin):
""" Admin for CreditRequirementStatus. """
list_display = ('username', 'requirement', 'status',)
search_fields = ('username', 'requirement__course__course_key',)
class Meta(object):
model = CreditRequirementStatus
admin.site.register(CreditCourse, CreditCourseAdmin)
admin.site.register(CreditProvider, CreditProviderAdmin)
admin.site.register(CreditEligibility, CreditEligibilityAdmin)
admin.site.register(CreditRequest, CreditRequestAdmin)
admin.site.register(CreditConfig)
admin.site.register(CreditRequirement, CreditRequirementAdmin)
admin.site.register(CreditRequirementStatus, CreditRequirementStatusAdmin)
| agpl-3.0 |
statgen/encore | encore/tests/api_tests.py | 1 | 1036 | import pytest
import flask_login
from encore import create_app
from encore.user import User
@pytest.fixture(scope="module")
def app(request):
app = create_app()
ctx = app.app_context()
ctx.push()
request.addfinalizer(ctx.pop)
return app
@pytest.fixture(scope="module")
def test_client(request, app):
client = app.test_client()
client.__enter__()
request.addfinalizer(lambda: client.__exit__(None, None, None))
return client
@pytest.fixture(scope="module")
def test_client_user(request, app):
client = app.test_client()
client.__enter__()
with client.session_transaction() as sess:
sess["user_id"] = "[email protected]"
sess["_fresh"] = True
request.addfinalizer(lambda: client.__exit__(None, None, None))
return client
def test_home_anon(test_client):
rv = test_client.get("/")
assert b'please sign in' in rv.data
assert rv.status_code == 200
def test_home_user(test_client_user):
rv = test_client_auth.get("/")
assert b'Welcome' in rv.data
| agpl-3.0 |
williamfeng323/py-web | flask/lib/python3.6/site-packages/psycopg2/psycopg1.py | 8 | 3339 | """psycopg 1.1.x compatibility module
This module uses the new style connection and cursor types to build a psycopg
1.1.1.x compatibility layer. It should be considered a temporary hack to run
old code while porting to psycopg 2. Import it as follows::
from psycopg2 import psycopg1 as psycopg
"""
# psycopg/psycopg1.py - psycopg 1.1.x compatibility module
#
# Copyright (C) 2003-2010 Federico Di Gregorio <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
from psycopg2 import _psycopg as _2psycopg # noqa
from psycopg2.extensions import cursor as _2cursor
from psycopg2.extensions import connection as _2connection
from psycopg2 import * # noqa
from psycopg2 import extensions as _ext
_2connect = connect
def connect(*args, **kwargs):
"""connect(dsn, ...) -> new psycopg 1.1.x compatible connection object"""
kwargs['connection_factory'] = connection
conn = _2connect(*args, **kwargs)
conn.set_isolation_level(_ext.ISOLATION_LEVEL_READ_COMMITTED)
return conn
class connection(_2connection):
"""psycopg 1.1.x connection."""
def cursor(self):
"""cursor() -> new psycopg 1.1.x compatible cursor object"""
return _2connection.cursor(self, cursor_factory=cursor)
def autocommit(self, on_off=1):
"""autocommit(on_off=1) -> switch autocommit on (1) or off (0)"""
if on_off > 0:
self.set_isolation_level(_ext.ISOLATION_LEVEL_AUTOCOMMIT)
else:
self.set_isolation_level(_ext.ISOLATION_LEVEL_READ_COMMITTED)
class cursor(_2cursor):
"""psycopg 1.1.x cursor.
Note that this cursor implements the exact procedure used by psycopg 1 to
build dictionaries out of result rows. The DictCursor in the
psycopg.extras modules implements a much better and faster algorithm.
"""
def __build_dict(self, row):
res = {}
for i in range(len(self.description)):
res[self.description[i][0]] = row[i]
return res
def dictfetchone(self):
row = _2cursor.fetchone(self)
if row:
return self.__build_dict(row)
else:
return row
def dictfetchmany(self, size):
res = []
rows = _2cursor.fetchmany(self, size)
for row in rows:
res.append(self.__build_dict(row))
return res
def dictfetchall(self):
res = []
rows = _2cursor.fetchall(self)
for row in rows:
res.append(self.__build_dict(row))
return res
| mit |
aeron15/ruffus | ruffus/test/test_transform_with_no_re_matches.py | 5 | 3227 | #!/usr/bin/env python
from __future__ import print_function
"""
test_transform_with_no_re_matches.py
test messages with no regular expression matches
"""
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# options
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
import os
tempdir = os.path.relpath(os.path.abspath(os.path.splitext(__file__)[0])) + "/"
import sys
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# module name = script name without extension
module_name = os.path.splitext(os.path.basename(__file__))[0]
from ruffus import transform, regex, pipeline_run, Pipeline, originate, mkdir
import ruffus
print(" Ruffus Version = ", ruffus.__version__)
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# imports
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Tasks
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
@mkdir(tempdir)
@originate(tempdir + "a")
def task_1 (o):
open(o, 'w').close()
@transform(task_1, regex("b"), "task_2.output")
def task_2 (i, o):
for f in o:
with open(f, 'w') as oo:
pass
import unittest
class t_save_to_str_logger:
"""
Everything to stderr
"""
def __init__ (self):
self.info_str = ""
self.warning_str = ""
self.debug_str = ""
def info (self, message):
self.info_str += message
def warning (self, message):
self.warning_str += message
def debug (self, message):
self.debug_str += message
class Test_task_mkdir(unittest.TestCase):
def setUp (self):
"""
"""
pass
def tearDown (self):
"""
"""
for d in ['a']:
fullpath = os.path.join(os.path.dirname(__file__), tempdir + d)
os.unlink(fullpath)
os.rmdir(tempdir)
def test_no_re_match (self):
save_to_str_logger = t_save_to_str_logger()
pipeline_run(multiprocess = 10, logger = save_to_str_logger, verbose = 1, pipeline= "main")
print(save_to_str_logger.warning_str)
self.assertTrue("no file names matched" in save_to_str_logger.warning_str)
print("\n Warning printed out correctly", file=sys.stderr)
def test_newstyle_no_re_match (self):
test_pipeline = Pipeline("test")
test_pipeline.originate(task_1, tempdir + "a").mkdir(tempdir)
test_pipeline.transform(task_2, task_1, regex("b"), "task_2.output")
save_to_str_logger = t_save_to_str_logger()
test_pipeline.run(multiprocess = 10, logger = save_to_str_logger, verbose = 1)
print(save_to_str_logger.warning_str)
self.assertTrue("no file names matched" in save_to_str_logger.warning_str)
print("\n Warning printed out correctly", file=sys.stderr)
if __name__ == '__main__':
unittest.main()
| mit |
jaingaurav/Diamond | src/collectors/cephstats/cephstats.py | 26 | 1581 | # coding=utf-8
"""
Get ceph status from one node
"""
import subprocess
import re
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)),
'ceph'))
from ceph import CephCollector
patternchk = re.compile(r'\bclient io .*')
numberchk = re.compile(r'\d+')
# This is external to the CephCollector so it can be tested
# separately.
def process_ceph_status(output):
res = patternchk.search(output)
if not res:
return {}
ceph_stats = res.group()
if not ceph_stats:
return {}
ret = {}
rd = wr = iops = None
rd = numberchk.search(ceph_stats)
if rd is not None:
ret['rd'] = rd.group()
wr = numberchk.search(ceph_stats, rd.end())
if wr is not None:
ret['wr'] = wr.group()
iops = numberchk.search(ceph_stats, wr.end())
if iops is not None:
ret['iops'] = iops.group()
return ret
class CephStatsCollector(CephCollector):
def _get_stats(self):
"""
Get ceph stats
"""
try:
output = subprocess.check_output(['ceph', '-s'])
except subprocess.CalledProcessError, err:
self.log.info(
'Could not get stats: %s' % err)
self.log.exception('Could not get stats')
return {}
return process_ceph_status(output)
def collect(self):
"""
Collect ceph stats
"""
stats = self._get_stats()
self._publish_stats('cephstats', stats)
return
| mit |
tersmitten/ansible | lib/ansible/plugins/shell/powershell.py | 30 | 12111 | # Copyright (c) 2014, Chris Church <[email protected]>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: powershell
plugin_type: shell
version_added: historical
short_description: Windows PowerShell
description:
- The only option when using 'winrm' or 'psrp' as a connection plugin.
- Can also be used when using 'ssh' as a connection plugin and the C(DefaultShell) has been configured to PowerShell.
extends_documentation_fragment:
- shell_windows
'''
import base64
import os
import re
import shlex
import pkgutil
import xml.etree.ElementTree as ET
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes, to_text
from ansible.plugins.shell import ShellBase
_common_args = ['PowerShell', '-NoProfile', '-NonInteractive', '-ExecutionPolicy', 'Unrestricted']
# Primarily for testing, allow explicitly specifying PowerShell version via
# an environment variable.
_powershell_version = os.environ.get('POWERSHELL_VERSION', None)
if _powershell_version:
_common_args = ['PowerShell', '-Version', _powershell_version] + _common_args[1:]
def _parse_clixml(data, stream="Error"):
"""
Takes a byte string like '#< CLIXML\r\n<Objs...' and extracts the stream
message encoded in the XML data. CLIXML is used by PowerShell to encode
multiple objects in stderr.
"""
clixml = ET.fromstring(data.split(b"\r\n", 1)[-1])
namespace_match = re.match(r'{(.*)}', clixml.tag)
namespace = "{%s}" % namespace_match.group(1) if namespace_match else ""
strings = clixml.findall("./%sS" % namespace)
lines = [e.text.replace('_x000D__x000A_', '') for e in strings if e.attrib.get('S') == stream]
return to_bytes('\r\n'.join(lines))
class ShellModule(ShellBase):
# Common shell filenames that this plugin handles
# Powershell is handled differently. It's selected when winrm is the
# connection
COMPATIBLE_SHELLS = frozenset()
# Family of shells this has. Must match the filename without extension
SHELL_FAMILY = 'powershell'
_SHELL_REDIRECT_ALLNULL = '> $null'
_SHELL_AND = ';'
# Used by various parts of Ansible to do Windows specific changes
_IS_WINDOWS = True
env = dict()
# We're being overly cautious about which keys to accept (more so than
# the Windows environment is capable of doing), since the powershell
# env provider's limitations don't appear to be documented.
safe_envkey = re.compile(r'^[\d\w_]{1,255}$')
# TODO: add binary module support
def assert_safe_env_key(self, key):
if not self.safe_envkey.match(key):
raise AnsibleError("Invalid PowerShell environment key: %s" % key)
return key
def safe_env_value(self, key, value):
if len(value) > 32767:
raise AnsibleError("PowerShell environment value for key '%s' exceeds 32767 characters in length" % key)
# powershell single quoted literals need single-quote doubling as their only escaping
value = value.replace("'", "''")
return to_text(value, errors='surrogate_or_strict')
def env_prefix(self, **kwargs):
# powershell/winrm env handling is handled in the exec wrapper
return ""
def join_path(self, *args):
parts = []
for arg in args:
arg = self._unquote(arg).replace('/', '\\')
parts.extend([a for a in arg.split('\\') if a])
path = '\\'.join(parts)
if path.startswith('~'):
return path
return path
def get_remote_filename(self, pathname):
# powershell requires that script files end with .ps1
base_name = os.path.basename(pathname.strip())
name, ext = os.path.splitext(base_name.strip())
if ext.lower() not in ['.ps1', '.exe']:
return name + '.ps1'
return base_name.strip()
def path_has_trailing_slash(self, path):
# Allow Windows paths to be specified using either slash.
path = self._unquote(path)
return path.endswith('/') or path.endswith('\\')
def chmod(self, paths, mode):
raise NotImplementedError('chmod is not implemented for Powershell')
def chown(self, paths, user):
raise NotImplementedError('chown is not implemented for Powershell')
def set_user_facl(self, paths, user, mode):
raise NotImplementedError('set_user_facl is not implemented for Powershell')
def remove(self, path, recurse=False):
path = self._escape(self._unquote(path))
if recurse:
return self._encode_script('''Remove-Item "%s" -Force -Recurse;''' % path)
else:
return self._encode_script('''Remove-Item "%s" -Force;''' % path)
def mkdtemp(self, basefile=None, system=False, mode=None, tmpdir=None):
# Windows does not have an equivalent for the system temp files, so
# the param is ignored
basefile = self._escape(self._unquote(basefile))
basetmpdir = tmpdir if tmpdir else self.get_option('remote_tmp')
script = '''
$tmp_path = [System.Environment]::ExpandEnvironmentVariables('%s')
$tmp = New-Item -Type Directory -Path $tmp_path -Name '%s'
Write-Output -InputObject $tmp.FullName
''' % (basetmpdir, basefile)
return self._encode_script(script.strip())
def expand_user(self, user_home_path, username=''):
# PowerShell only supports "~" (not "~username"). Resolve-Path ~ does
# not seem to work remotely, though by default we are always starting
# in the user's home directory.
user_home_path = self._unquote(user_home_path)
if user_home_path == '~':
script = 'Write-Output (Get-Location).Path'
elif user_home_path.startswith('~\\'):
script = 'Write-Output ((Get-Location).Path + "%s")' % self._escape(user_home_path[1:])
else:
script = 'Write-Output "%s"' % self._escape(user_home_path)
return self._encode_script(script)
def exists(self, path):
path = self._escape(self._unquote(path))
script = '''
If (Test-Path "%s")
{
$res = 0;
}
Else
{
$res = 1;
}
Write-Output "$res";
Exit $res;
''' % path
return self._encode_script(script)
def checksum(self, path, *args, **kwargs):
path = self._escape(self._unquote(path))
script = '''
If (Test-Path -PathType Leaf "%(path)s")
{
$sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
$fp = [System.IO.File]::Open("%(path)s", [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
[System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
$fp.Dispose();
}
ElseIf (Test-Path -PathType Container "%(path)s")
{
Write-Output "3";
}
Else
{
Write-Output "1";
}
''' % dict(path=path)
return self._encode_script(script)
def build_module_command(self, env_string, shebang, cmd, arg_path=None):
bootstrap_wrapper = pkgutil.get_data("ansible.executor.powershell", "bootstrap_wrapper.ps1")
# pipelining bypass
if cmd == '':
return self._encode_script(script=bootstrap_wrapper, strict_mode=False, preserve_rc=False)
# non-pipelining
cmd_parts = shlex.split(cmd, posix=False)
cmd_parts = list(map(to_text, cmd_parts))
if shebang and shebang.lower() == '#!powershell':
if not self._unquote(cmd_parts[0]).lower().endswith('.ps1'):
# we're running a module via the bootstrap wrapper
cmd_parts[0] = '"%s.ps1"' % self._unquote(cmd_parts[0])
wrapper_cmd = "type " + cmd_parts[0] + " | " + self._encode_script(script=bootstrap_wrapper, strict_mode=False, preserve_rc=False)
return wrapper_cmd
elif shebang and shebang.startswith('#!'):
cmd_parts.insert(0, shebang[2:])
elif not shebang:
# The module is assumed to be a binary
cmd_parts[0] = self._unquote(cmd_parts[0])
cmd_parts.append(arg_path)
script = '''
Try
{
%s
%s
}
Catch
{
$_obj = @{ failed = $true }
If ($_.Exception.GetType)
{
$_obj.Add('msg', $_.Exception.Message)
}
Else
{
$_obj.Add('msg', $_.ToString())
}
If ($_.InvocationInfo.PositionMessage)
{
$_obj.Add('exception', $_.InvocationInfo.PositionMessage)
}
ElseIf ($_.ScriptStackTrace)
{
$_obj.Add('exception', $_.ScriptStackTrace)
}
Try
{
$_obj.Add('error_record', ($_ | ConvertTo-Json | ConvertFrom-Json))
}
Catch
{
}
Echo $_obj | ConvertTo-Json -Compress -Depth 99
Exit 1
}
''' % (env_string, ' '.join(cmd_parts))
return self._encode_script(script, preserve_rc=False)
def wrap_for_exec(self, cmd):
return '& %s; exit $LASTEXITCODE' % cmd
def _unquote(self, value):
'''Remove any matching quotes that wrap the given value.'''
value = to_text(value or '')
m = re.match(r'^\s*?\'(.*?)\'\s*?$', value)
if m:
return m.group(1)
m = re.match(r'^\s*?"(.*?)"\s*?$', value)
if m:
return m.group(1)
return value
def _escape(self, value, include_vars=False):
'''Return value escaped for use in PowerShell command.'''
# http://www.techotopia.com/index.php/Windows_PowerShell_1.0_String_Quoting_and_Escape_Sequences
# http://stackoverflow.com/questions/764360/a-list-of-string-replacements-in-python
subs = [('\n', '`n'), ('\r', '`r'), ('\t', '`t'), ('\a', '`a'),
('\b', '`b'), ('\f', '`f'), ('\v', '`v'), ('"', '`"'),
('\'', '`\''), ('`', '``'), ('\x00', '`0')]
if include_vars:
subs.append(('$', '`$'))
pattern = '|'.join('(%s)' % re.escape(p) for p, s in subs)
substs = [s for p, s in subs]
def replace(m):
return substs[m.lastindex - 1]
return re.sub(pattern, replace, value)
def _encode_script(self, script, as_list=False, strict_mode=True, preserve_rc=True):
'''Convert a PowerShell script to a single base64-encoded command.'''
script = to_text(script)
if script == u'-':
cmd_parts = _common_args + ['-Command', '-']
else:
if strict_mode:
script = u'Set-StrictMode -Version Latest\r\n%s' % script
# try to propagate exit code if present- won't work with begin/process/end-style scripts (ala put_file)
# NB: the exit code returned may be incorrect in the case of a successful command followed by an invalid command
if preserve_rc:
script = u'%s\r\nIf (-not $?) { If (Get-Variable LASTEXITCODE -ErrorAction SilentlyContinue) { exit $LASTEXITCODE } Else { exit 1 } }\r\n'\
% script
script = '\n'.join([x.strip() for x in script.splitlines() if x.strip()])
encoded_script = to_text(base64.b64encode(script.encode('utf-16-le')), 'utf-8')
cmd_parts = _common_args + ['-EncodedCommand', encoded_script]
if as_list:
return cmd_parts
return ' '.join(cmd_parts)
| gpl-3.0 |
Kovath/league-announcer | log_analyzer.py | 1 | 1527 | #!/usr/bin/env python
# LOG ANALYZER
# by Kevin Yang
#
# Assumes VERY MUCH THIS FORMAT:
# [<time>] <event> -> <data>
import sys, re
import numpy as np
def find_outliers(array, mean = None, std = None, m = 6):
if mean == None:
mean = np.mean(array)
if std == None:
std = np.std(array)
return array[abs(array - mean) >= m * std]
if __name__ == "__main__":
log_file = open(sys.argv[1])
regex = re.compile(r"\[(?P<time>\d+)\] (?P<event>\w*) -> (?P<data>.*)")
events = []
for line in log_file:
match = re.search(regex, line)
if match == None:
print("error parsing line: " + line)
continue
event = {
"time" : match.group("time"),
"event" : match.group("event"),
"data" : eval(match.group("data")),
}
events.append(event)
if len(events) <= 0:
exit()
data_query = dict([(key, []) for key in events[0]["data"].keys()])
for event in events:
for key in data_query.keys():
data_query[key].append(event["data"][key])
for query, data in data_query.items():
data_query[query] = { "data" : np.array(data) }
# calculations
for query, stats in data_query.items():
data = stats["data"]
stats["median"] = np.median(data)
stats["mean"] = np.mean(data)
stats["min"] = np.min(data)
stats["max"] = np.max(data)
stats["std"] = np.std(data)
stats["outliers"] = find_outliers(data, stats["mean"], stats["std"])
# output
for query, stats in data_query.items():
print(query + ":")
for stat, value in stats.items():
print("\t%s: %s" % (stat, str(value)))
print("") | mit |
40123148/40123148 | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_case.py | 738 | 51689 | import difflib
import pprint
import pickle
import re
import sys
import warnings
import weakref
import inspect
from copy import deepcopy
from test import support
import unittest
from .support import (
TestEquality, TestHashing, LoggingResult,
ResultWithNoStartTestRunStopTestRun
)
class Test(object):
"Keep these TestCase classes out of the main namespace"
class Foo(unittest.TestCase):
def runTest(self): pass
def test1(self): pass
class Bar(Foo):
def test2(self): pass
class LoggingTestCase(unittest.TestCase):
"""A test case which logs its calls."""
def __init__(self, events):
super(Test.LoggingTestCase, self).__init__('test')
self.events = events
def setUp(self):
self.events.append('setUp')
def test(self):
self.events.append('test')
def tearDown(self):
self.events.append('tearDown')
class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
### Set up attributes used by inherited tests
################################################################
# Used by TestHashing.test_hash and TestEquality.test_eq
eq_pairs = [(Test.Foo('test1'), Test.Foo('test1'))]
# Used by TestEquality.test_ne
ne_pairs = [(Test.Foo('test1'), Test.Foo('runTest')),
(Test.Foo('test1'), Test.Bar('test1')),
(Test.Foo('test1'), Test.Bar('test2'))]
################################################################
### /Set up attributes used by inherited tests
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
# ...
# "methodName defaults to "runTest"."
#
# Make sure it really is optional, and that it defaults to the proper
# thing.
def test_init__no_test_name(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test().id()[-13:], '.Test.runTest')
# test that TestCase can be instantiated with no args
# primarily for use at the interactive interpreter
test = unittest.TestCase()
test.assertEqual(3, 3)
with test.assertRaises(test.failureException):
test.assertEqual(3, 2)
with self.assertRaises(AttributeError):
test.run()
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__valid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test('test').id()[-10:], '.Test.test')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__invalid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
try:
Test('testfoo')
except ValueError:
pass
else:
self.fail("Failed to raise ValueError")
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
class Foo(unittest.TestCase):
def test(self): pass
self.assertEqual(Foo('test').countTestCases(), 1)
# "Return the default type of test result object to be used to run this
# test. For TestCase instances, this will always be
# unittest.TestResult; subclasses of TestCase should
# override this as necessary."
def test_defaultTestResult(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
result = Foo().defaultTestResult()
self.assertEqual(type(result), unittest.TestResult)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'addError', 'stopTest']
self.assertEqual(events, expected)
# "With a temporary result stopTestRun is called when setUp errors.
def test_run_call_order__error_in_setUp_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'addError',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "With a default result, an error in the test still results in stopTestRun
# being called."
def test_run_call_order__error_in_test_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addError', 'stopTest', 'stopTestRun']
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "When a test fails with a default result stopTestRun is still called."
def test_run_call_order__failure_in_test_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addFailure', 'stopTest', 'stopTestRun']
events = []
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
self.assertEqual(events, expected)
# "When tearDown errors with a default result stopTestRun is still called."
def test_run_call_order__error_in_tearDown_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
events = []
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "TestCase.run() still works when the defaultTestResult is a TestResult
# that does not support startTestRun and stopTestRun.
def test_run_call_order_default_result(self):
class Foo(unittest.TestCase):
def defaultTestResult(self):
return ResultWithNoStartTestRunStopTestRun()
def test(self):
pass
Foo('test').run()
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework. The initial value of this
# attribute is AssertionError"
def test_failureException__default(self):
class Foo(unittest.TestCase):
def test(self):
pass
self.assertTrue(Foo('test').failureException is AssertionError)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__explicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
raise RuntimeError()
failureException = RuntimeError
self.assertTrue(Foo('test').failureException is RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__implicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
self.fail("foo")
failureException = RuntimeError
self.assertTrue(Foo('test').failureException is RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "The default implementation does nothing."
def test_setUp(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().setUp()
# "The default implementation does nothing."
def test_tearDown(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().tearDown()
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
self.assertIsInstance(Foo().id(), str)
# "If result is omitted or None, a temporary result object is created,
# used, and is made available to the caller. As TestCase owns the
# temporary result startTestRun and stopTestRun are called.
def test_run__uses_defaultTestResult(self):
events = []
defaultResult = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
events.append('test')
def defaultTestResult(self):
return defaultResult
# Make run() find a result object on its own
result = Foo('test').run()
self.assertIs(result, defaultResult)
expected = ['startTestRun', 'startTest', 'test', 'addSuccess',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "The result object is returned to run's caller"
def test_run__returns_given_result(self):
class Foo(unittest.TestCase):
def test(self):
pass
result = unittest.TestResult()
retval = Foo('test').run(result)
self.assertIs(retval, result)
# "The same effect [as method run] may be had by simply calling the
# TestCase instance."
def test_call__invoking_an_instance_delegates_to_run(self):
resultIn = unittest.TestResult()
resultOut = unittest.TestResult()
class Foo(unittest.TestCase):
def test(self):
pass
def run(self, result):
self.assertIs(result, resultIn)
return resultOut
retval = Foo('test')(resultIn)
self.assertIs(retval, resultOut)
def testShortDescriptionWithoutDocstring(self):
self.assertIsNone(self.shortDescription())
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithOneLineDocstring(self):
"""Tests shortDescription() for a method with a docstring."""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a docstring.')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithMultiLineDocstring(self):
"""Tests shortDescription() for a method with a longer docstring.
This method ensures that only the first line of a docstring is
returned used in the short description, no matter how long the
whole thing is.
"""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a longer '
'docstring.')
def testAddTypeEqualityFunc(self):
class SadSnake(object):
"""Dummy class for test_addTypeEqualityFunc."""
s1, s2 = SadSnake(), SadSnake()
self.assertFalse(s1 == s2)
def AllSnakesCreatedEqual(a, b, msg=None):
return type(a) == type(b) == SadSnake
self.addTypeEqualityFunc(SadSnake, AllSnakesCreatedEqual)
self.assertEqual(s1, s2)
# No this doesn't clean up and remove the SadSnake equality func
# from this TestCase instance but since its a local nothing else
# will ever notice that.
def testAssertIs(self):
thing = object()
self.assertIs(thing, thing)
self.assertRaises(self.failureException, self.assertIs, thing, object())
def testAssertIsNot(self):
thing = object()
self.assertIsNot(thing, object())
self.assertRaises(self.failureException, self.assertIsNot, thing, thing)
def testAssertIsInstance(self):
thing = []
self.assertIsInstance(thing, list)
self.assertRaises(self.failureException, self.assertIsInstance,
thing, dict)
def testAssertNotIsInstance(self):
thing = []
self.assertNotIsInstance(thing, dict)
self.assertRaises(self.failureException, self.assertNotIsInstance,
thing, list)
def testAssertIn(self):
animals = {'monkey': 'banana', 'cow': 'grass', 'seal': 'fish'}
self.assertIn('a', 'abc')
self.assertIn(2, [1, 2, 3])
self.assertIn('monkey', animals)
self.assertNotIn('d', 'abc')
self.assertNotIn(0, [1, 2, 3])
self.assertNotIn('otter', animals)
self.assertRaises(self.failureException, self.assertIn, 'x', 'abc')
self.assertRaises(self.failureException, self.assertIn, 4, [1, 2, 3])
self.assertRaises(self.failureException, self.assertIn, 'elephant',
animals)
self.assertRaises(self.failureException, self.assertNotIn, 'c', 'abc')
self.assertRaises(self.failureException, self.assertNotIn, 1, [1, 2, 3])
self.assertRaises(self.failureException, self.assertNotIn, 'cow',
animals)
def testAssertDictContainsSubset(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertDictContainsSubset({}, {})
self.assertDictContainsSubset({}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1, 'b': 2})
self.assertDictContainsSubset({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({1: "one"}, {})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 2}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing the failure msg
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'foo': one}, {'foo': '\uFFFD'})
def testAssertEqual(self):
equal_pairs = [
((), ()),
({}, {}),
([], []),
(set(), set()),
(frozenset(), frozenset())]
for a, b in equal_pairs:
# This mess of try excepts is to test the assertEqual behavior
# itself.
try:
self.assertEqual(a, b)
except self.failureException:
self.fail('assertEqual(%r, %r) failed' % (a, b))
try:
self.assertEqual(a, b, msg='foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with msg= failed' % (a, b))
try:
self.assertEqual(a, b, 'foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with third parameter failed' %
(a, b))
unequal_pairs = [
((), []),
({}, set()),
(set([4,1]), frozenset([4,2])),
(frozenset([4,5]), set([2,3])),
(set([3,4]), set([5,4]))]
for a, b in unequal_pairs:
self.assertRaises(self.failureException, self.assertEqual, a, b)
self.assertRaises(self.failureException, self.assertEqual, a, b,
'foo')
self.assertRaises(self.failureException, self.assertEqual, a, b,
msg='foo')
def testEquality(self):
self.assertListEqual([], [])
self.assertTupleEqual((), ())
self.assertSequenceEqual([], ())
a = [0, 'a', []]
b = []
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, a, b)
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, tuple(a), tuple(b))
self.assertRaises(unittest.TestCase.failureException,
self.assertSequenceEqual, a, tuple(b))
b.extend(a)
self.assertListEqual(a, b)
self.assertTupleEqual(tuple(a), tuple(b))
self.assertSequenceEqual(a, tuple(b))
self.assertSequenceEqual(tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual,
a, tuple(b))
self.assertRaises(self.failureException, self.assertTupleEqual,
tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual, None, b)
self.assertRaises(self.failureException, self.assertTupleEqual, None,
tuple(b))
self.assertRaises(self.failureException, self.assertSequenceEqual,
None, tuple(b))
self.assertRaises(self.failureException, self.assertListEqual, 1, 1)
self.assertRaises(self.failureException, self.assertTupleEqual, 1, 1)
self.assertRaises(self.failureException, self.assertSequenceEqual,
1, 1)
self.assertDictEqual({}, {})
c = { 'x': 1 }
d = {}
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d)
d.update(c)
self.assertDictEqual(c, d)
d['x'] = 0
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d, 'These are unequal')
self.assertRaises(self.failureException, self.assertDictEqual, None, d)
self.assertRaises(self.failureException, self.assertDictEqual, [], d)
self.assertRaises(self.failureException, self.assertDictEqual, 1, 1)
def testAssertSequenceEqualMaxDiff(self):
self.assertEqual(self.maxDiff, 80*8)
seq1 = 'a' + 'x' * 80**2
seq2 = 'b' + 'x' * 80**2
diff = '\n'.join(difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
# the +1 is the leading \n added by assertSequenceEqual
omitted = unittest.case.DIFF_OMITTED % (len(diff) + 1,)
self.maxDiff = len(diff)//2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) < len(diff))
self.assertIn(omitted, msg)
self.maxDiff = len(diff) * 2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) > len(diff))
self.assertNotIn(omitted, msg)
self.maxDiff = None
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) > len(diff))
self.assertNotIn(omitted, msg)
def testTruncateMessage(self):
self.maxDiff = 1
message = self._truncateMessage('foo', 'bar')
omitted = unittest.case.DIFF_OMITTED % len('bar')
self.assertEqual(message, 'foo' + omitted)
self.maxDiff = None
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
self.maxDiff = 4
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
def testAssertDictEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertDictEqual({}, {1: 0})
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertDictEqual did not fail')
def testAssertMultiLineEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertMultiLineEqual('foo', 'bar')
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertMultiLineEqual did not fail')
def testAssertEqual_diffThreshold(self):
# check threshold value
self.assertEqual(self._diffThreshold, 2**16)
# disable madDiff to get diff markers
self.maxDiff = None
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 2**8
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
# under the threshold: diff marker (^) in error message
s = 'x' * (2**7)
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s + 'a', s + 'b')
self.assertIn('^', str(cm.exception))
self.assertEqual(s + 'a', s + 'a')
# over the threshold: diff not used and marker (^) not in error message
s = 'x' * (2**9)
# if the path that uses difflib is taken, _truncateMessage will be
# called -- replace it with explodingTruncation to verify that this
# doesn't happen
def explodingTruncation(message, diff):
raise SystemError('this should not be raised')
old_truncate = self._truncateMessage
self._truncateMessage = explodingTruncation
self.addCleanup(lambda: setattr(self, '_truncateMessage', old_truncate))
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
self.assertNotIn('^', str(cm.exception))
self.assertEqual(str(cm.exception), '%r != %r' % (s1, s2))
self.assertEqual(s + 'a', s + 'a')
def testAssertCountEqual(self):
a = object()
self.assertCountEqual([1, 2, 3], [3, 2, 1])
self.assertCountEqual(['foo', 'bar', 'baz'], ['bar', 'baz', 'foo'])
self.assertCountEqual([a, a, 2, 2, 3], (a, 2, 3, a, 2))
self.assertCountEqual([1, "2", "a", "a"], ["a", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 2] + [3] * 100, [1] * 100 + [2, 3])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, "2", "a", "a"], ["a", "2", True, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[10], [10, 11])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11], [10])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11, 10], [10, 11])
# Test that sequences of unhashable objects can be tested for sameness:
self.assertCountEqual([[1, 2], [3, 4], 0], [False, [3, 4], [1, 2]])
# Test that iterator of unhashable objects can be tested for sameness:
self.assertCountEqual(iter([1, 2, [], 3, 4]),
iter([1, 2, [], 3, 4]))
# hashable types, but not orderable
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, 'x', 1, 5j, 2j, frozenset()])
# comparing dicts
self.assertCountEqual([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
# comparing heterogenous non-hashable sequences
self.assertCountEqual([1, 'x', divmod, []], [divmod, [], 'x', 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, [], 'x', 1, 5j, 2j, set()])
self.assertRaises(self.failureException, self.assertCountEqual,
[[1]], [[2]])
# Same elements, but not same sequence length
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, 2], [2, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, "2", "a", "a"], ["2", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, {'b': 2}, None, True], [{'b': 2}, True, None])
# Same elements which don't reliably compare, in
# different order, see issue 10242
a = [{2,4}, {1,2}]
b = a[::-1]
self.assertCountEqual(a, b)
# test utility functions supporting assertCountEqual()
diffs = set(unittest.util._count_diff_all_purpose('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
diffs = unittest.util._count_diff_all_purpose([[]], [])
self.assertEqual(diffs, [(1, 0, [])])
diffs = set(unittest.util._count_diff_hashable('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
def testAssertSetEqual(self):
set1 = set()
set2 = set()
self.assertSetEqual(set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, None, set2)
self.assertRaises(self.failureException, self.assertSetEqual, [], set2)
self.assertRaises(self.failureException, self.assertSetEqual, set1, None)
self.assertRaises(self.failureException, self.assertSetEqual, set1, [])
set1 = set(['a'])
set2 = set()
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = set(['a'])
self.assertSetEqual(set1, set2)
set1 = set(['a'])
set2 = set(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = frozenset(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a', 'b'])
set2 = frozenset(['a', 'b'])
self.assertSetEqual(set1, set2)
set1 = set()
set2 = "foo"
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, set2, set1)
# make sure any string formatting is tuple-safe
set1 = set([(0, 1), (2, 3)])
set2 = set([(4, 5)])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
def testInequality(self):
# Try ints
self.assertGreater(2, 1)
self.assertGreaterEqual(2, 1)
self.assertGreaterEqual(1, 1)
self.assertLess(1, 2)
self.assertLessEqual(1, 2)
self.assertLessEqual(1, 1)
self.assertRaises(self.failureException, self.assertGreater, 1, 2)
self.assertRaises(self.failureException, self.assertGreater, 1, 1)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1, 2)
self.assertRaises(self.failureException, self.assertLess, 2, 1)
self.assertRaises(self.failureException, self.assertLess, 1, 1)
self.assertRaises(self.failureException, self.assertLessEqual, 2, 1)
# Try Floats
self.assertGreater(1.1, 1.0)
self.assertGreaterEqual(1.1, 1.0)
self.assertGreaterEqual(1.0, 1.0)
self.assertLess(1.0, 1.1)
self.assertLessEqual(1.0, 1.1)
self.assertLessEqual(1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertLess, 1.1, 1.0)
self.assertRaises(self.failureException, self.assertLess, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertLessEqual, 1.1, 1.0)
# Try Strings
self.assertGreater('bug', 'ant')
self.assertGreaterEqual('bug', 'ant')
self.assertGreaterEqual('ant', 'ant')
self.assertLess('ant', 'bug')
self.assertLessEqual('ant', 'bug')
self.assertLessEqual('ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', 'ant')
# Try bytes
self.assertGreater(b'bug', b'ant')
self.assertGreaterEqual(b'bug', b'ant')
self.assertGreaterEqual(b'ant', b'ant')
self.assertLess(b'ant', b'bug')
self.assertLessEqual(b'ant', b'bug')
self.assertLessEqual(b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'bug')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, b'ant',
b'bug')
self.assertRaises(self.failureException, self.assertLess, b'bug', b'ant')
self.assertRaises(self.failureException, self.assertLess, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertLessEqual, b'bug', b'ant')
def testAssertMultiLineEqual(self):
sample_text = """\
http://www.python.org/doc/2.3/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...]
"""
revised_sample_text = """\
http://www.python.org/doc/2.4.1/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...] You may provide your
own implementation that does not subclass from TestCase, of course.
"""
sample_text_error = """\
- http://www.python.org/doc/2.3/lib/module-unittest.html
? ^
+ http://www.python.org/doc/2.4.1/lib/module-unittest.html
? ^^^
test case
- A test case is the smallest unit of testing. [...]
+ A test case is the smallest unit of testing. [...] You may provide your
? +++++++++++++++++++++
+ own implementation that does not subclass from TestCase, of course.
"""
self.maxDiff = None
try:
self.assertMultiLineEqual(sample_text, revised_sample_text)
except self.failureException as e:
# need to remove the first line of the error message
error = str(e).split('\n', 1)[1]
# no fair testing ourself with ourself, and assertEqual is used for strings
# so can't use assertEqual either. Just use assertTrue.
self.assertTrue(sample_text_error == error)
def testAsertEqualSingleLine(self):
sample_text = "laden swallows fly slowly"
revised_sample_text = "unladen swallows fly quickly"
sample_text_error = """\
- laden swallows fly slowly
? ^^^^
+ unladen swallows fly quickly
? ++ ^^^^^
"""
try:
self.assertEqual(sample_text, revised_sample_text)
except self.failureException as e:
error = str(e).split('\n', 1)[1]
self.assertTrue(sample_text_error == error)
def testAssertIsNone(self):
self.assertIsNone(None)
self.assertRaises(self.failureException, self.assertIsNone, False)
self.assertIsNotNone('DjZoPloGears on Rails')
self.assertRaises(self.failureException, self.assertIsNotNone, None)
def testAssertRegex(self):
self.assertRegex('asdfabasdf', r'ab+')
self.assertRaises(self.failureException, self.assertRegex,
'saaas', r'aaaa')
def testAssertRaisesRegex(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaisesRegex(ExceptionMock, re.compile('expect$'), Stub)
self.assertRaisesRegex(ExceptionMock, 'expect$', Stub)
def testAssertNotRaisesRegex(self):
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, re.compile('x'),
lambda: None)
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, 'x',
lambda: None)
def testAssertRaisesRegexMismatch(self):
def Stub():
raise Exception('Unexpected')
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception, '^Expected$',
Stub)
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception,
re.compile('^Expected$'), Stub)
def testAssertRaisesExcValue(self):
class ExceptionMock(Exception):
pass
def Stub(foo):
raise ExceptionMock(foo)
v = "particular value"
ctx = self.assertRaises(ExceptionMock)
with ctx:
Stub(v)
e = ctx.exception
self.assertIsInstance(e, ExceptionMock)
self.assertEqual(e.args[0], v)
def testAssertWarnsCallable(self):
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
# Success when the right warning is triggered, even several times
self.assertWarns(RuntimeWarning, _runtime_warn)
self.assertWarns(RuntimeWarning, _runtime_warn)
# A tuple of warning classes is accepted
self.assertWarns((DeprecationWarning, RuntimeWarning), _runtime_warn)
# *args and **kwargs also work
self.assertWarns(RuntimeWarning,
warnings.warn, "foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarns(RuntimeWarning, lambda: 0)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarns(DeprecationWarning, _runtime_warn)
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
self.assertWarns(DeprecationWarning, _runtime_warn)
def testAssertWarnsContext(self):
# Believe it or not, it is preferrable to duplicate all tests above,
# to make sure the __warningregistry__ $@ is circumvented correctly.
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarns(RuntimeWarning) as cm:
_runtime_warn()
# A tuple of warning classes is accepted
with self.assertWarns((DeprecationWarning, RuntimeWarning)) as cm:
_runtime_warn()
# The context manager exposes various useful attributes
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foo")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Same with several warnings
with self.assertWarns(RuntimeWarning):
_runtime_warn()
_runtime_warn()
with self.assertWarns(RuntimeWarning):
warnings.warn("foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarns(RuntimeWarning):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
def testAssertWarnsRegexCallable(self):
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "foox")
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
lambda: 0)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarnsRegex(DeprecationWarning, "o+",
_runtime_warn, "foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
def testAssertWarnsRegexContext(self):
# Same as above, but with assertWarnsRegex as a context manager
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarnsRegex(RuntimeWarning, "o+") as cm:
_runtime_warn("foox")
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foox")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(DeprecationWarning, "o+"):
_runtime_warn("foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
def testDeprecatedMethodNames(self):
"""
Test that the deprecated methods raise a DeprecationWarning. See #9424.
"""
old = (
(self.failIfEqual, (3, 5)),
(self.assertNotEquals, (3, 5)),
(self.failUnlessEqual, (3, 3)),
(self.assertEquals, (3, 3)),
(self.failUnlessAlmostEqual, (2.0, 2.0)),
(self.assertAlmostEquals, (2.0, 2.0)),
(self.failIfAlmostEqual, (3.0, 5.0)),
(self.assertNotAlmostEquals, (3.0, 5.0)),
(self.failUnless, (True,)),
(self.assert_, (True,)),
(self.failUnlessRaises, (TypeError, lambda _: 3.14 + 'spam')),
(self.failIf, (False,)),
(self.assertDictContainsSubset, (dict(a=1, b=2), dict(a=1, b=2, c=3))),
(self.assertRaisesRegexp, (KeyError, 'foo', lambda: {}['foo'])),
(self.assertRegexpMatches, ('bar', 'bar')),
)
for meth, args in old:
with self.assertWarns(DeprecationWarning):
meth(*args)
# disable this test for now. When the version where the fail* methods will
# be removed is decided, re-enable it and update the version
def _testDeprecatedFailMethods(self):
"""Test that the deprecated fail* methods get removed in 3.x"""
if sys.version_info[:2] < (3, 3):
return
deprecated_names = [
'failIfEqual', 'failUnlessEqual', 'failUnlessAlmostEqual',
'failIfAlmostEqual', 'failUnless', 'failUnlessRaises', 'failIf',
'assertDictContainsSubset',
]
for deprecated_name in deprecated_names:
with self.assertRaises(AttributeError):
getattr(self, deprecated_name) # remove these in 3.x
def testDeepcopy(self):
# Issue: 5660
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
# This shouldn't blow up
deepcopy(test)
def testPickle(self):
# Issue 10326
# Can't use TestCase classes defined in Test class as
# pickle does not work with inner classes
test = unittest.TestCase('run')
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
# blew up prior to fix
pickled_test = pickle.dumps(test, protocol=protocol)
unpickled_test = pickle.loads(pickled_test)
self.assertEqual(test, unpickled_test)
# exercise the TestCase instance in a way that will invoke
# the type equality lookup mechanism
unpickled_test.assertEqual(set(), set())
def testKeyboardInterrupt(self):
def _raise(self=None):
raise KeyboardInterrupt
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
with self.assertRaises(KeyboardInterrupt):
klass('test_something').run()
def testSkippingEverywhere(self):
def _skip(self=None):
raise unittest.SkipTest('some reason')
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _skip
class Test2(unittest.TestCase):
setUp = _skip
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _skip
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_skip)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.skipped), 1)
self.assertEqual(result.testsRun, 1)
def testSystemExit(self):
def _raise(self=None):
raise SystemExit
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.testsRun, 1)
@support.cpython_only
def testNoCycles(self):
case = unittest.TestCase()
wr = weakref.ref(case)
with support.disable_gc():
del case
self.assertFalse(wr())
| lgpl-3.0 |
gangadharkadam/v5_frappe | frappe/utils/data.py | 4 | 17064 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
# IMPORTANT: only import safe functions as this module will be included in jinja environment
import frappe
import operator
import re, urllib, datetime, math
import babel.dates
# datetime functions
def getdate(string_date):
"""
Coverts string date (yyyy-mm-dd) to datetime.date object
"""
if isinstance(string_date, datetime.date):
return string_date
elif isinstance(string_date, datetime.datetime):
return string_date.date()
if " " in string_date:
string_date = string_date.split(" ")[0]
return datetime.datetime.strptime(string_date, "%Y-%m-%d").date()
def add_to_date(date, years=0, months=0, days=0):
"""Adds `days` to the given date"""
format = isinstance(date, basestring)
if date:
date = getdate(date)
else:
raise Exception, "Start date required"
from dateutil.relativedelta import relativedelta
date += relativedelta(years=years, months=months, days=days)
if format:
return date.strftime("%Y-%m-%d")
else:
return date
def add_days(date, days):
return add_to_date(date, days=days)
def add_months(date, months):
return add_to_date(date, months=months)
def add_years(date, years):
return add_to_date(date, years=years)
def date_diff(string_ed_date, string_st_date):
return (getdate(string_ed_date) - getdate(string_st_date)).days
def time_diff(string_ed_date, string_st_date):
return get_datetime(string_ed_date) - get_datetime(string_st_date)
def time_diff_in_seconds(string_ed_date, string_st_date):
return time_diff(string_ed_date, string_st_date).total_seconds()
def time_diff_in_hours(string_ed_date, string_st_date):
return round(float(time_diff(string_ed_date, string_st_date).total_seconds()) / 3600, 6)
def now_datetime():
return convert_utc_to_user_timezone(datetime.datetime.utcnow())
def get_user_time_zone():
if getattr(frappe.local, "user_time_zone", None) is None:
frappe.local.user_time_zone = frappe.cache().get_value("time_zone")
if not frappe.local.user_time_zone:
frappe.local.user_time_zone = frappe.db.get_default('time_zone') or 'Asia/Calcutta'
frappe.cache().set_value("time_zone", frappe.local.user_time_zone)
return frappe.local.user_time_zone
def convert_utc_to_user_timezone(utc_timestamp):
from pytz import timezone, UnknownTimeZoneError
utcnow = timezone('UTC').localize(utc_timestamp)
try:
return utcnow.astimezone(timezone(get_user_time_zone()))
except UnknownTimeZoneError:
return utcnow
def now():
"""return current datetime as yyyy-mm-dd hh:mm:ss"""
if getattr(frappe.local, "current_date", None):
return getdate(frappe.local.current_date).strftime("%Y-%m-%d") + " " + \
now_datetime().strftime('%H:%M:%S.%f')
else:
return now_datetime().strftime('%Y-%m-%d %H:%M:%S.%f')
def nowdate():
"""return current date as yyyy-mm-dd"""
return now_datetime().strftime('%Y-%m-%d')
def today():
return nowdate()
def nowtime():
"""return current time in hh:mm"""
return now_datetime().strftime('%H:%M:%S.%f')
def get_first_day(dt, d_years=0, d_months=0):
"""
Returns the first day of the month for the date specified by date object
Also adds `d_years` and `d_months` if specified
"""
dt = getdate(dt)
# d_years, d_months are "deltas" to apply to dt
overflow_years, month = divmod(dt.month + d_months - 1, 12)
year = dt.year + d_years + overflow_years
return datetime.date(year, month + 1, 1)
def get_last_day(dt):
"""
Returns last day of the month using:
`get_first_day(dt, 0, 1) + datetime.timedelta(-1)`
"""
return get_first_day(dt, 0, 1) + datetime.timedelta(-1)
def get_datetime(datetime_str):
try:
return datetime.datetime.strptime(datetime_str, '%Y-%m-%d %H:%M:%S.%f')
except TypeError:
if isinstance(datetime_str, datetime.datetime):
return datetime_str.replace(tzinfo=None)
else:
raise
except ValueError:
if datetime_str=='0000-00-00 00:00:00.000000':
return None
return datetime.datetime.strptime(datetime_str, '%Y-%m-%d %H:%M:%S')
def get_datetime_str(datetime_obj):
if isinstance(datetime_obj, basestring):
datetime_obj = get_datetime(datetime_obj)
return datetime_obj.strftime('%Y-%m-%d %H:%M:%S.%f')
def formatdate(string_date=None, format_string=None):
"""
Convers the given string date to :data:`user_format`
User format specified in defaults
Examples:
* dd-mm-yyyy
* mm-dd-yyyy
* dd/mm/yyyy
"""
date = getdate(string_date) if string_date else now_datetime().date()
if format_string:
return babel.dates.format_date(date, format_string or "medium", locale=(frappe.local.lang or "").replace("-", "_"))
else:
if getattr(frappe.local, "user_format", None) is None:
frappe.local.user_format = frappe.db.get_default("date_format")
out = frappe.local.user_format or "yyyy-mm-dd"
try:
return out.replace("dd", date.strftime("%d"))\
.replace("mm", date.strftime("%m"))\
.replace("yyyy", date.strftime("%Y"))
except ValueError, e:
raise frappe.ValidationError, str(e)
def global_date_format(date):
"""returns date as 1 January 2012"""
formatted_date = getdate(date).strftime("%d %B %Y")
return formatted_date.startswith("0") and formatted_date[1:] or formatted_date
def has_common(l1, l2):
"""Returns truthy value if there are common elements in lists l1 and l2"""
return set(l1) & set(l2)
def flt(s, precision=None):
"""Convert to float (ignore commas)"""
if isinstance(s, basestring):
s = s.replace(',','')
try:
num = float(s)
if precision is not None:
num = rounded(num, precision)
except Exception:
num = 0
return num
def cint(s):
"""Convert to integer"""
try: num = int(float(s))
except: num = 0
return num
def cstr(s):
if isinstance(s, unicode):
return s
elif s==None:
return ''
elif isinstance(s, basestring):
return unicode(s, 'utf-8')
else:
return unicode(s)
def rounded(num, precision=0):
"""round method for round halfs to nearest even algorithm"""
precision = cint(precision)
multiplier = 10 ** precision
# avoid rounding errors
num = round(num * multiplier if precision else num, 8)
floor = math.floor(num)
decimal_part = num - floor
if decimal_part == 0.5:
num = floor if (floor % 2 == 0) else floor + 1
else:
num = round(num)
return (num / multiplier) if precision else num
def encode(obj, encoding="utf-8"):
if isinstance(obj, list):
out = []
for o in obj:
if isinstance(o, unicode):
out.append(o.encode(encoding))
else:
out.append(o)
return out
elif isinstance(obj, unicode):
return obj.encode(encoding)
else:
return obj
def parse_val(v):
"""Converts to simple datatypes from SQL query results"""
if isinstance(v, (datetime.date, datetime.datetime)):
v = unicode(v)
elif isinstance(v, datetime.timedelta):
v = ":".join(unicode(v).split(":")[:2])
elif isinstance(v, long):
v = int(v)
return v
def fmt_money(amount, precision=None, currency=None):
"""
Convert to string with commas for thousands, millions etc
"""
number_format = None
if currency:
number_format = frappe.db.get_value("Currency", currency, "number_format")
if not number_format:
number_format = frappe.db.get_default("number_format") or "#,###.##"
decimal_str, comma_str, number_format_precision = get_number_format_info(number_format)
if precision is None:
precision = number_format_precision
amount = '%.*f' % (precision, flt(amount))
if amount.find('.') == -1:
decimals = ''
else:
decimals = amount.split('.')[1]
parts = []
minus = ''
if flt(amount) < 0:
minus = '-'
amount = cstr(abs(flt(amount))).split('.')[0]
if len(amount) > 3:
parts.append(amount[-3:])
amount = amount[:-3]
val = number_format=="#,##,###.##" and 2 or 3
while len(amount) > val:
parts.append(amount[-val:])
amount = amount[:-val]
parts.append(amount)
parts.reverse()
amount = comma_str.join(parts) + ((precision and decimal_str) and (decimal_str + decimals) or "")
amount = minus + amount
if currency and frappe.defaults.get_global_default("hide_currency_symbol") != "Yes":
symbol = frappe.db.get_value("Currency", currency, "symbol") or currency
amount = symbol + " " + amount
return amount
number_format_info = {
"#,###.##": (".", ",", 2),
"#.###,##": (",", ".", 2),
"# ###.##": (".", " ", 2),
"# ###,##": (",", " ", 2),
"#'###.##": (".", "'", 2),
"#, ###.##": (".", ", ", 2),
"#,##,###.##": (".", ",", 2),
"#,###.###": (".", ",", 3),
"#.###": ("", ".", 0),
"#,###": ("", ",", 0)
}
def get_number_format_info(format):
return number_format_info.get(format) or (".", ",", 2)
#
# convet currency to words
#
def money_in_words(number, main_currency = None, fraction_currency=None):
"""
Returns string in words with currency and fraction currency.
"""
from frappe.utils import get_defaults
_ = frappe._
if not number or flt(number) < 0:
return ""
d = get_defaults()
if not main_currency:
main_currency = d.get('currency', 'INR')
if not fraction_currency:
fraction_currency = frappe.db.get_value("Currency", main_currency, "fraction") or _("Cent")
n = "%.2f" % flt(number)
main, fraction = n.split('.')
if len(fraction)==1: fraction += '0'
number_format = frappe.db.get_value("Currency", main_currency, "number_format") or \
frappe.db.get_default("number_format") or "#,###.##"
in_million = True
if number_format == "#,##,###.##": in_million = False
out = main_currency + ' ' + in_words(main, in_million).title()
if cint(fraction):
out = out + ' ' + _('and') + ' ' + in_words(fraction, in_million).title() + ' ' + fraction_currency
return out + ' ' + _('only.')
#
# convert number to words
#
def in_words(integer, in_million=True):
"""
Returns string in words for the given integer.
"""
_ = frappe._
n=int(integer)
known = {0: 'zero', 1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine', 10: 'ten',
11: 'eleven', 12: 'twelve', 13: 'thirteen', 14: 'fourteen', 15: 'fifteen', 16: 'sixteen', 17: 'seventeen', 18: 'eighteen',
19: 'nineteen', 20: 'twenty', 30: 'thirty', 40: 'forty', 50: 'fifty', 60: 'sixty', 70: 'seventy', 80: 'eighty', 90: 'ninety'}
def psn(n, known, xpsn):
import sys;
if n in known: return known[n]
bestguess, remainder = str(n), 0
if n<=20:
frappe.errprint(sys.stderr)
frappe.errprint(n)
frappe.errprint("How did this happen?")
assert 0
elif n < 100:
bestguess= xpsn((n//10)*10, known, xpsn) + '-' + xpsn(n%10, known, xpsn)
return bestguess
elif n < 1000:
bestguess= xpsn(n//100, known, xpsn) + ' ' + _('hundred')
remainder = n%100
else:
if in_million:
if n < 1000000:
bestguess= xpsn(n//1000, known, xpsn) + ' ' + _('thousand')
remainder = n%1000
elif n < 1000000000:
bestguess= xpsn(n//1000000, known, xpsn) + ' ' + _('million')
remainder = n%1000000
else:
bestguess= xpsn(n//1000000000, known, xpsn) + ' ' + _('billion')
remainder = n%1000000000
else:
if n < 100000:
bestguess= xpsn(n//1000, known, xpsn) + ' ' + _('thousand')
remainder = n%1000
elif n < 10000000:
bestguess= xpsn(n//100000, known, xpsn) + ' ' + _('lakh')
remainder = n%100000
else:
bestguess= xpsn(n//10000000, known, xpsn) + ' ' + _('crore')
remainder = n%10000000
if remainder:
if remainder >= 100:
comma = ','
else:
comma = ''
return bestguess + comma + ' ' + xpsn(remainder, known, xpsn)
else:
return bestguess
return psn(n, known, psn)
def is_html(text):
out = False
for key in ["<br>", "<p", "<img", "<div"]:
if key in text:
out = True
break
return out
# from Jinja2 code
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
def strip_html(text):
"""removes anything enclosed in and including <>"""
return _striptags_re.sub("", text)
def escape_html(text):
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
return "".join(html_escape_table.get(c,c) for c in text)
def pretty_date(iso_datetime):
"""
Takes an ISO time and returns a string representing how
long ago the date represents.
Ported from PrettyDate by John Resig
"""
if not iso_datetime: return ''
import math
if isinstance(iso_datetime, basestring):
iso_datetime = datetime.datetime.strptime(iso_datetime, '%Y-%m-%d %H:%M:%S.%f')
now_dt = datetime.datetime.strptime(now(), '%Y-%m-%d %H:%M:%S.%f')
dt_diff = now_dt - iso_datetime
# available only in python 2.7+
# dt_diff_seconds = dt_diff.total_seconds()
dt_diff_seconds = dt_diff.days * 86400.0 + dt_diff.seconds
dt_diff_days = math.floor(dt_diff_seconds / 86400.0)
# differnt cases
if dt_diff_seconds < 60.0:
return 'just now'
elif dt_diff_seconds < 120.0:
return '1 minute ago'
elif dt_diff_seconds < 3600.0:
return '%s minutes ago' % cint(math.floor(dt_diff_seconds / 60.0))
elif dt_diff_seconds < 7200.0:
return '1 hour ago'
elif dt_diff_seconds < 86400.0:
return '%s hours ago' % cint(math.floor(dt_diff_seconds / 3600.0))
elif dt_diff_days == 1.0:
return 'Yesterday'
elif dt_diff_days < 7.0:
return '%s days ago' % cint(dt_diff_days)
elif dt_diff_days < 31.0:
return '%s week(s) ago' % cint(math.ceil(dt_diff_days / 7.0))
elif dt_diff_days < 365.0:
return '%s months ago' % cint(math.ceil(dt_diff_days / 30.0))
else:
return 'more than %s year(s) ago' % cint(math.floor(dt_diff_days / 365.0))
def comma_or(some_list):
return comma_sep(some_list, " or ")
def comma_and(some_list):
return comma_sep(some_list, " and ")
def comma_sep(some_list, sep):
if isinstance(some_list, (list, tuple)):
# list(some_list) is done to preserve the existing list
some_list = [unicode(s) for s in list(some_list)]
if not some_list:
return ""
elif len(some_list) == 1:
return some_list[0]
else:
some_list = ["'%s'" % s for s in some_list]
return ", ".join(some_list[:-1]) + sep + some_list[-1]
else:
return some_list
def filter_strip_join(some_list, sep):
"""given a list, filter None values, strip spaces and join"""
return (cstr(sep)).join((cstr(a).strip() for a in filter(None, some_list)))
def get_url(uri=None, full_address=False):
"""get app url from request"""
host_name = frappe.local.conf.host_name
if not host_name:
if hasattr(frappe.local, "request") and frappe.local.request and frappe.local.request.host:
protocol = 'https' == frappe.get_request_header('X-Forwarded-Proto', "") and 'https://' or 'http://'
host_name = protocol + frappe.local.request.host
elif frappe.local.site:
host_name = "http://{}".format(frappe.local.site)
else:
host_name = frappe.db.get_value("Website Settings", "Website Settings",
"subdomain")
if host_name and "http" not in host_name:
host_name = "http://" + host_name
if not host_name:
host_name = "http://localhost"
if not uri and full_address:
uri = frappe.get_request_header("REQUEST_URI", "")
url = urllib.basejoin(host_name, uri) if uri else host_name
return url
def get_host_name():
return get_url().rsplit("//", 1)[-1]
def get_url_to_form(doctype, name, label=None):
if not label: label = name
return """<a href="/desk#!Form/%(doctype)s/%(name)s">%(label)s</a>""" % locals()
operator_map = {
# startswith
"^": lambda (a, b): (a or "").startswith(b),
# in or not in a list
"in": lambda (a, b): operator.contains(b, a),
"not in": lambda (a, b): not operator.contains(b, a),
# comparison operators
"=": lambda (a, b): operator.eq(a, b),
"!=": lambda (a, b): operator.ne(a, b),
">": lambda (a, b): operator.gt(a, b),
"<": lambda (a, b): operator.lt(a, b),
">=": lambda (a, b): operator.ge(a, b),
"<=": lambda (a, b): operator.le(a, b),
"not None": lambda (a, b): a and True or False,
"None": lambda (a, b): (not a) and True or False
}
def compare(val1, condition, val2):
ret = False
if condition in operator_map:
ret = operator_map[condition]((val1, val2))
return ret
def scrub_urls(html):
html = expand_relative_urls(html)
html = quote_urls(html)
return html
def expand_relative_urls(html):
# expand relative urls
url = get_url()
if url.endswith("/"): url = url[:-1]
def _expand_relative_urls(match):
to_expand = list(match.groups())
if not to_expand[2].startswith("/"):
to_expand[2] = "/" + to_expand[2]
to_expand.insert(2, url)
return "".join(to_expand)
return re.sub('(href|src){1}([\s]*=[\s]*[\'"]?)((?!http)[^\'" >]+)([\'"]?)', _expand_relative_urls, html)
def quote_urls(html):
def _quote_url(match):
groups = list(match.groups())
groups[2] = urllib.quote(groups[2].encode("utf-8"), safe=b"~@#$&()*!+=:;,.?/'").decode("utf-8")
return "".join(groups)
return re.sub('(href|src){1}([\s]*=[\s]*[\'"]?)((?:http)[^\'">]+)([\'"]?)',
_quote_url, html)
def unique(seq):
"""use this instead of list(set()) to preserve order of the original list.
Thanks to Stackoverflow: http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order"""
seen = set()
seen_add = seen.add
return [ x for x in seq if not (x in seen or seen_add(x)) ]
| mit |
cloudify-cosmo/cloudify-diamond-plugin | diamond_agent/tests/test_single_node.py | 1 | 7362 | import os
import time
import tempfile
import pickle as cPickle
import mock
from testtools import TestCase
from cloudify.workflows import local
from cloudify.decorators import operation
from diamond_agent import tasks
from diamond_agent.tests import IGNORED_LOCAL_WORKFLOW_MODULES
class TestSingleNode(TestCase):
def setUp(self):
super(TestSingleNode, self).setUp()
os.environ['MANAGEMENT_IP'] = '127.0.0.1'
self.is_uninstallable = True
self.env = None
self._original_get_agent_name = tasks._get_agent_name
tasks._get_agent_name = mock.MagicMock(return_value='agent_name')
self.addCleanup(self._unmock_agent_name)
def tearDown(self):
super(TestSingleNode, self).tearDown()
if self.env and self.is_uninstallable:
self.env.execute('uninstall', task_retries=0)
# custom handler + custom collector
def test_custom_collectors(self):
log_path = tempfile.mktemp()
inputs = {
'diamond_config': {
'prefix': tempfile.mkdtemp(prefix='cloudify-'),
'interval': 1,
'handlers': {
'test_handler.TestHandler': {
'path': 'handlers/test_handler.py',
'config': {
'log_path': log_path,
}
}
}
},
'collectors_config': {
'TestCollector': {
'path': 'collectors/test.py',
'config': {
'name': 'metric',
'value': 42,
},
},
},
}
self.env = self._create_env(inputs)
self.env.execute('install', task_retries=0)
def test_cloudify_handler_format(self):
log_path = tempfile.mktemp()
inputs = {
'diamond_config': {
'prefix': tempfile.mkdtemp(prefix='cloudify-'),
'interval': 1,
'handlers': {
'test_handler.TestHandler': {
'path': 'handlers/test_handler.py',
'config': {
'log_path': log_path,
'output_cloudify_format': True,
}
}
}
},
'collectors_config': {
'TestCollector': {
'path': 'collectors/test.py',
'config': {
'name': 'metric',
'value': 42,
},
},
},
}
self.env = self._create_env(inputs)
self.env.execute('install', task_retries=0)
# custom handler + no collector
# diamond should run without outputting anything
def test_no_collectors(self):
log_path = tempfile.mktemp()
inputs = {
'diamond_config': {
'prefix': tempfile.mkdtemp(prefix='cloudify-'),
'interval': 1,
'handlers': {
'test_handler.TestHandler': {
'path': 'handlers/test_handler.py',
'config': {
'log_path': log_path,
},
}
}
},
'collectors_config': {}
}
self.env = self._create_env(inputs)
self.env.execute('install', task_retries=0)
def test_uninstall_workflow(self):
inputs = {
'diamond_config': {
'prefix': tempfile.mkdtemp(prefix='cloudify-'),
'interval': 1,
'handlers': {
'diamond.handler.archive.ArchiveHandler': {
'config': {
'log_file': tempfile.mktemp(),
}
}
}
},
'collectors_config': {},
}
self.is_uninstallable = False
self.env = self._create_env(inputs)
self.env.execute('install', task_retries=0)
def test_no_handlers(self):
inputs = {
'diamond_config': {
'handlers': {},
},
'collectors_config': {},
}
self.is_uninstallable = False
self.env = self._create_env(inputs)
self.env.execute('install', task_retries=0)
def test_restart_plugin_script(self):
"""A script that restarts diamond doesn't interfere with the plugin.
If the add_collectors tasks run in parallel with a script that
also happens to restart diamond, there's a race condition between them
looking up the process by the PID, making one of them to break.
"""
blueprint_yaml = self._get_resource_path('blueprint',
'restart_diamond_script.yaml')
self.is_uninstallable = False
local_env = local.init_env(
blueprint_yaml, ignored_modules=IGNORED_LOCAL_WORKFLOW_MODULES)
self.addCleanup(local_env.execute, 'uninstall')
# this needs a threadpool size >1 so that the add_collectors task
# can run in parallel with the custom restart task
local_env.execute('install', task_thread_pool_size=5)
def _mock_get_paths(self, prefix):
return [
os.path.join(prefix, 'etc', tasks.CONFIG_NAME),
os.path.join(prefix, 'etc', 'collectors'),
os.path.join(prefix, 'collectors'),
os.path.join(prefix, 'etc', 'handlers'),
os.path.join(prefix, 'handlers')
]
def _create_env(self, inputs):
return local.init_env(self._blueprint_path(),
inputs=inputs,
ignored_modules=IGNORED_LOCAL_WORKFLOW_MODULES)
def _blueprint_path(self):
return self._get_resource_path('blueprint', 'single_node.yaml')
def _get_resource_path(self, *args):
return os.path.join(os.path.dirname(__file__), 'resources', *args)
def _unmock_agent_name(self):
tasks._get_agent_name = self._original_get_agent_name
def collector_in_log(path, collector):
with open(path, 'r') as fh:
try:
while True:
metric = cPickle.load(fh)
if metric.path.split('.')[3] == collector:
return True
except EOFError:
return False
def is_created(path, timeout=5):
for _ in range(timeout):
if os.path.isfile(path):
return True
time.sleep(1)
return False
def get_ids(instances, name):
for instance in instances:
if instance['name'] == name:
return instance['host_id'], instance['node_id'], instance['id']
def get_pid(config):
pid_file = os.path.join(config['diamond_config']['prefix'],
'var', 'run', 'diamond.pid')
with open(pid_file, 'r') as pf:
pid = int(pf.read())
return pid
@operation
def sleep_and_restart_diamond(ctx):
"""Restart diamond 5 times, with 3 second pauses between restarts.
This is a task used in the TestSingleNode.test_restart_plugin_script test.
"""
ctx.logger.info('Foo')
| apache-2.0 |
varunagrawal/azure-services | varunagrawal/site-packages/django/contrib/admindocs/views.py | 77 | 15064 | import inspect
import os
import re
from django import template
from django.template import RequestContext
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.db import models
from django.shortcuts import render_to_response
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import Http404
from django.core import urlresolvers
from django.contrib.admindocs import utils
from django.contrib.sites.models import Site
from django.utils.importlib import import_module
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
# Exclude methods starting with these strings from documentation
MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_')
class GenericSite(object):
domain = 'example.com'
name = 'my site'
@staff_member_required
def doc_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
return render_to_response('admin_doc/index.html', {
'root_path': urlresolvers.reverse('admin:index'),
}, context_instance=RequestContext(request))
@staff_member_required
def bookmarklets(request):
admin_root = urlresolvers.reverse('admin:index')
return render_to_response('admin_doc/bookmarklets.html', {
'root_path': admin_root,
'admin_url': mark_safe("%s://%s%s" % (request.is_secure() and 'https' or 'http', request.get_host(), admin_root)),
}, context_instance=RequestContext(request))
@staff_member_required
def template_tag_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
tags = []
app_libs = template.libraries.items()
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for tag_name, tag_func in library.tags.items():
title, body, metadata = utils.parse_docstring(tag_func.__doc__)
if title:
title = utils.parse_rst(title, 'tag', _('tag:') + tag_name)
if body:
body = utils.parse_rst(body, 'tag', _('tag:') + tag_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name)
if library in template.builtins:
tag_library = None
else:
tag_library = module_name.split('.')[-1]
tags.append({
'name': tag_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_tag_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'tags': tags
}, context_instance=RequestContext(request))
@staff_member_required
def template_filter_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
filters = []
app_libs = template.libraries.items()
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for filter_name, filter_func in library.filters.items():
title, body, metadata = utils.parse_docstring(filter_func.__doc__)
if title:
title = utils.parse_rst(title, 'filter', _('filter:') + filter_name)
if body:
body = utils.parse_rst(body, 'filter', _('filter:') + filter_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name)
if library in template.builtins:
tag_library = None
else:
tag_library = module_name.split('.')[-1]
filters.append({
'name': filter_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_filter_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'filters': filters
}, context_instance=RequestContext(request))
@staff_member_required
def view_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
if settings.ADMIN_FOR:
settings_modules = [import_module(m) for m in settings.ADMIN_FOR]
else:
settings_modules = [settings]
views = []
for settings_mod in settings_modules:
urlconf = import_module(settings_mod.ROOT_URLCONF)
view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for (func, regex) in view_functions:
views.append({
'full_name': '%s.%s' % (func.__module__, getattr(func, '__name__', func.__class__.__name__)),
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'url': simplify_regex(regex),
})
return render_to_response('admin_doc/view_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'views': views
}, context_instance=RequestContext(request))
@staff_member_required
def view_detail(request, view):
if not utils.docutils_is_available:
return missing_docutils_page(request)
mod, func = urlresolvers.get_mod_func(view)
try:
view_func = getattr(import_module(mod), func)
except (ImportError, AttributeError):
raise Http404
title, body, metadata = utils.parse_docstring(view_func.__doc__)
if title:
title = utils.parse_rst(title, 'view', _('view:') + view)
if body:
body = utils.parse_rst(body, 'view', _('view:') + view)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view)
return render_to_response('admin_doc/view_detail.html', {
'root_path': urlresolvers.reverse('admin:index'),
'name': view,
'summary': title,
'body': body,
'meta': metadata,
}, context_instance=RequestContext(request))
@staff_member_required
def model_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
m_list = [m._meta for m in models.get_models()]
return render_to_response('admin_doc/model_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'models': m_list
}, context_instance=RequestContext(request))
@staff_member_required
def model_detail(request, app_label, model_name):
if not utils.docutils_is_available:
return missing_docutils_page(request)
# Get the model class.
try:
app_mod = models.get_app(app_label)
except ImproperlyConfigured:
raise Http404(_("App %r not found") % app_label)
model = None
for m in models.get_models(app_mod):
if m._meta.object_name.lower() == model_name:
model = m
break
if model is None:
raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % {'model_name': model_name, 'app_label': app_label})
opts = model._meta
# Gather fields/field descriptions.
fields = []
for field in opts.fields:
# ForeignKey is a special case since the field will actually be a
# descriptor that returns the other object
if isinstance(field, models.ForeignKey):
data_type = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = utils.parse_rst((_("the related `%(app_label)s.%(data_type)s` object") % {'app_label': app_label, 'data_type': data_type}), 'model', _('model:') + data_type)
else:
data_type = get_readable_field_data_type(field)
verbose = field.verbose_name
fields.append({
'name': field.name,
'data_type': data_type,
'verbose': verbose,
'help_text': field.help_text,
})
# Gather many-to-many fields.
for field in opts.many_to_many:
data_type = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': app_label, 'object_name': data_type}
fields.append({
'name': "%s.all" % field.name,
"data_type": 'List',
'verbose': utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name),
})
fields.append({
'name' : "%s.count" % field.name,
'data_type' : 'Integer',
'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name),
})
# Gather model methods.
for func_name, func in model.__dict__.items():
if (inspect.isfunction(func) and len(inspect.getargspec(func)[0]) == 1):
try:
for exclude in MODEL_METHODS_EXCLUDE:
if func_name.startswith(exclude):
raise StopIteration
except StopIteration:
continue
verbose = func.__doc__
if verbose:
verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.module_name)
fields.append({
'name': func_name,
'data_type': get_return_data_type(func_name),
'verbose': verbose,
})
# Gather related objects
for rel in opts.get_all_related_objects() + opts.get_all_related_many_to_many_objects():
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': rel.opts.app_label, 'object_name': rel.opts.object_name}
accessor = rel.get_accessor_name()
fields.append({
'name' : "%s.all" % accessor,
'data_type' : 'List',
'verbose' : utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name),
})
fields.append({
'name' : "%s.count" % accessor,
'data_type' : 'Integer',
'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name),
})
return render_to_response('admin_doc/model_detail.html', {
'root_path': urlresolvers.reverse('admin:index'),
'name': '%s.%s' % (opts.app_label, opts.object_name),
'summary': _("Fields on %s objects") % opts.object_name,
'description': model.__doc__,
'fields': fields,
}, context_instance=RequestContext(request))
@staff_member_required
def template_detail(request, template):
templates = []
for site_settings_module in settings.ADMIN_FOR:
settings_mod = import_module(site_settings_module)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for dir in settings_mod.TEMPLATE_DIRS:
template_file = os.path.join(dir, template)
templates.append({
'file': template_file,
'exists': os.path.exists(template_file),
'contents': lambda: os.path.exists(template_file) and open(template_file).read() or '',
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'order': list(settings_mod.TEMPLATE_DIRS).index(dir),
})
return render_to_response('admin_doc/template_detail.html', {
'root_path': urlresolvers.reverse('admin:index'),
'name': template,
'templates': templates,
}, context_instance=RequestContext(request))
####################
# Helper functions #
####################
def missing_docutils_page(request):
"""Display an error message for people without docutils"""
return render_to_response('admin_doc/missing_docutils.html')
def load_all_installed_template_libraries():
# Load/register all template tag libraries from installed apps.
for module_name in template.get_templatetags_modules():
mod = import_module(module_name)
try:
libraries = [
os.path.splitext(p)[0]
for p in os.listdir(os.path.dirname(mod.__file__))
if p.endswith('.py') and p[0].isalpha()
]
except OSError:
libraries = []
for library_name in libraries:
try:
lib = template.get_library(library_name)
except template.InvalidTemplateLibrary, e:
pass
def get_return_data_type(func_name):
"""Return a somewhat-helpful data type given a function name"""
if func_name.startswith('get_'):
if func_name.endswith('_list'):
return 'List'
elif func_name.endswith('_count'):
return 'Integer'
return ''
def get_readable_field_data_type(field):
"""Returns the description for a given field type, if it exists,
Fields' descriptions can contain format strings, which will be interpolated
against the values of field.__dict__ before being output."""
return field.description % field.__dict__
def extract_views_from_urlpatterns(urlpatterns, base=''):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if hasattr(p, 'url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(extract_views_from_urlpatterns(patterns, base + p.regex.pattern))
elif hasattr(p, 'callback'):
try:
views.append((p.callback, base + p.regex.pattern))
except ViewDoesNotExist:
continue
else:
raise TypeError(_("%s does not appear to be a urlpattern object") % p)
return views
named_group_matcher = re.compile(r'\(\?P(<\w+>).+?\)')
non_named_group_matcher = re.compile(r'\(.*?\)')
def simplify_regex(pattern):
"""
Clean up urlpattern regexes into something somewhat readable by Mere Humans:
turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
into "<sport_slug>/athletes/<athlete_slug>/"
"""
# handle named groups first
pattern = named_group_matcher.sub(lambda m: m.group(1), pattern)
# handle non-named groups
pattern = non_named_group_matcher.sub("<var>", pattern)
# clean up any outstanding regex-y characters.
pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '')
if not pattern.startswith('/'):
pattern = '/' + pattern
return pattern
| gpl-2.0 |
lasote/conan | conans/test/integration/profile_test.py | 1 | 16563 | import unittest
from conans.client import tools
from conans.test.utils.tools import TestClient
from conans.test.utils.cpp_test_files import cpp_hello_conan_files
from conans.util.files import save, load
import os
from conans.paths import CONANFILE
from collections import OrderedDict
from conans.test.utils.test_files import temp_folder
from conans.test.utils.profiles import create_profile as _create_profile
from nose_parameterized import parameterized
conanfile_scope_env = """
import platform
from conans import ConanFile
class AConan(ConanFile):
name = "Hello0"
version = "0.1"
settings = "os", "compiler", "arch"
def build(self):
self.output.warn("Scope myscope: %s" % self.scope.myscope)
self.output.warn("Scope otherscope: %s" % self.scope.otherscope)
self.output.warn("Scope undefined: %s" % self.scope.undefined)
# Print environment vars
if self.settings.os == "Windows":
self.run("SET")
else:
self.run("env")
"""
def create_profile(folder, name, settings=None, scopes=None, package_settings=None, env=None,
package_env=None, options=None):
_create_profile(folder, name, settings, scopes, package_settings, env, package_env, options)
content = load(os.path.join(folder, name))
content = "include(default)\n \n" + content
save(os.path.join(folder, name), content)
class ProfileTest(unittest.TestCase):
def setUp(self):
self.client = TestClient()
def bad_syntax_test(self):
self.client.save({CONANFILE: conanfile_scope_env})
self.client.run("export lasote/stable")
profile = '''
[settings
'''
clang_profile_path = os.path.join(self.client.client_cache.profiles_path, "clang")
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr clang",
ignore_error=True)
self.assertIn("Error reading 'clang' profile", self.client.user_io.out)
self.assertIn("Bad syntax", self.client.user_io.out)
profile = '''
[settings]
[invented]
'''
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr clang",
ignore_error=True)
self.assertIn("Unrecognized field 'invented'", self.client.user_io.out)
self.assertIn("Error reading 'clang' profile", self.client.user_io.out)
profile = '''
[settings]
as
'''
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr clang",
ignore_error=True)
self.assertIn("Error reading 'clang' profile: Invalid setting line 'as'",
self.client.user_io.out)
profile = '''
[env]
as
'''
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr clang",
ignore_error=True)
self.assertIn("Error reading 'clang' profile: Invalid env line 'as'",
self.client.user_io.out)
profile = '''
[scopes]
as
'''
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr clang",
ignore_error=True)
self.assertIn("Error reading 'clang' profile: Bad scope as", self.client.user_io.out)
profile = '''
[settings]
os = a value
'''
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr clang",
ignore_error=True)
# stripped "a value"
self.assertIn("'a value' is not a valid 'settings.os'", self.client.user_io.out)
profile = '''
include(default)
[env]
ENV_VAR = a value
'''
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr clang")
self._assert_env_variable_printed("ENV_VAR", "a value")
profile = '''
include(default)
# Line with comments is not a problem
[env]
# Not even here
ENV_VAR = a value
'''
save(clang_profile_path, profile)
self.client.run("install Hello0/0.1@lasote/stable --build -pr clang")
self._assert_env_variable_printed("ENV_VAR", "a value")
@parameterized.expand([("", ), ("./local_profiles/", ), (temp_folder() + "/", )])
def install_with_missing_profile_test(self, path):
self.client.save({CONANFILE: conanfile_scope_env})
error = self.client.run('install -pr "%sscopes_env"' % path, ignore_error=True)
self.assertTrue(error)
self.assertIn("ERROR: Profile not found:", self.client.out)
self.assertIn("scopes_env", self.client.out)
def install_profile_env_test(self):
files = cpp_hello_conan_files("Hello0", "0.1", build=False)
files["conanfile.py"] = conanfile_scope_env
create_profile(self.client.client_cache.profiles_path, "envs", settings={},
env=[("A_VAR", "A_VALUE")], package_env={"Hello0": [("OTHER_VAR", "2")]})
self.client.save(files)
self.client.run("export lasote/stable")
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr envs")
self._assert_env_variable_printed("A_VAR", "A_VALUE")
self._assert_env_variable_printed("OTHER_VAR", "2")
# Override with package var
self.client.run("install Hello0/0.1@lasote/stable --build "
"-pr envs -e Hello0:A_VAR=OTHER_VALUE")
self._assert_env_variable_printed("A_VAR", "OTHER_VALUE")
self._assert_env_variable_printed("OTHER_VAR", "2")
# Override package var with package var
self.client.run("install Hello0/0.1@lasote/stable --build -pr envs "
"-e Hello0:A_VAR=OTHER_VALUE -e Hello0:OTHER_VAR=3")
self._assert_env_variable_printed("A_VAR", "OTHER_VALUE")
self._assert_env_variable_printed("OTHER_VAR", "3")
# Pass a variable with "=" symbol
self.client.run("install Hello0/0.1@lasote/stable --build -pr envs "
"-e Hello0:A_VAR=Valuewith=equal -e Hello0:OTHER_VAR=3")
self._assert_env_variable_printed("A_VAR", "Valuewith=equal")
self._assert_env_variable_printed("OTHER_VAR", "3")
def install_profile_settings_test(self):
files = cpp_hello_conan_files("Hello0", "0.1", build=False)
# Create a profile and use it
profile_settings = OrderedDict([("compiler", "Visual Studio"),
("compiler.version", "12"),
("compiler.runtime", "MD"),
("arch", "x86")])
create_profile(self.client.client_cache.profiles_path, "vs_12_86",
settings=profile_settings, package_settings={})
tools.replace_in_file(self.client.client_cache.default_profile_path,
"compiler.libcxx", "#compiler.libcxx", strict=False)
self.client.save(files)
self.client.run("export lasote/stable")
self.client.run("install --build missing -pr vs_12_86")
info = load(os.path.join(self.client.current_folder, "conaninfo.txt"))
for setting, value in profile_settings.items():
self.assertIn("%s=%s" % (setting, value), info)
# Try to override some settings in install command
self.client.run("install --build missing -pr vs_12_86 -s compiler.version=14")
info = load(os.path.join(self.client.current_folder, "conaninfo.txt"))
for setting, value in profile_settings.items():
if setting != "compiler.version":
self.assertIn("%s=%s" % (setting, value), info)
else:
self.assertIn("compiler.version=14", info)
# Use package settings in profile
tmp_settings = OrderedDict()
tmp_settings["compiler"] = "gcc"
tmp_settings["compiler.libcxx"] = "libstdc++11"
tmp_settings["compiler.version"] = "4.8"
package_settings = {"Hello0": tmp_settings}
create_profile(self.client.client_cache.profiles_path,
"vs_12_86_Hello0_gcc", settings=profile_settings,
package_settings=package_settings)
# Try to override some settings in install command
self.client.run("install --build missing -pr vs_12_86_Hello0_gcc -s compiler.version=14")
info = load(os.path.join(self.client.current_folder, "conaninfo.txt"))
self.assertIn("compiler=gcc", info)
self.assertIn("compiler.libcxx=libstdc++11", info)
# If other package is specified compiler is not modified
package_settings = {"NoExistsRecipe": tmp_settings}
create_profile(self.client.client_cache.profiles_path,
"vs_12_86_Hello0_gcc", settings=profile_settings,
package_settings=package_settings)
# Try to override some settings in install command
self.client.run("install --build missing -pr vs_12_86_Hello0_gcc -s compiler.version=14")
info = load(os.path.join(self.client.current_folder, "conaninfo.txt"))
self.assertIn("compiler=Visual Studio", info)
self.assertNotIn("compiler.libcxx", info)
# Mix command line package settings with profile
package_settings = {"Hello0": tmp_settings}
create_profile(self.client.client_cache.profiles_path, "vs_12_86_Hello0_gcc",
settings=profile_settings, package_settings=package_settings)
# Try to override some settings in install command
self.client.run("install --build missing -pr vs_12_86_Hello0_gcc"
" -s compiler.version=14 -s Hello0:compiler.libcxx=libstdc++")
info = load(os.path.join(self.client.current_folder, "conaninfo.txt"))
self.assertIn("compiler=gcc", info)
self.assertNotIn("compiler.libcxx=libstdc++11", info)
self.assertIn("compiler.libcxx=libstdc++", info)
def install_profile_options_test(self):
files = cpp_hello_conan_files("Hello0", "0.1", build=False)
create_profile(self.client.client_cache.profiles_path, "vs_12_86",
options=[("Hello0:language", 1),
("Hello0:static", False)])
self.client.save(files)
self.client.run("install --build missing -pr vs_12_86")
info = load(os.path.join(self.client.current_folder, "conaninfo.txt"))
self.assertIn("language=1", info)
self.assertIn("static=False", info)
def scopes_env_test(self):
# Create a profile and use it
create_profile(self.client.client_cache.profiles_path, "scopes_env", settings={},
scopes={"Hello0:myscope": "1",
"ALL:otherscope": "2",
"undefined": "3"}, # undefined scope do not apply to my packages
env=[("CXX", "/path/tomy/g++"), ("CC", "/path/tomy/gcc")])
self.client.save({CONANFILE: conanfile_scope_env})
self.client.run("export lasote/stable")
self.client.run("install Hello0/0.1@lasote/stable --build missing -pr scopes_env")
self.assertIn("Scope myscope: 1", self.client.user_io.out)
self.assertIn("Scope otherscope: 2", self.client.user_io.out)
self.assertIn("Scope undefined: None", self.client.user_io.out)
self._assert_env_variable_printed("CC", "/path/tomy/gcc")
self._assert_env_variable_printed("CXX", "/path/tomy/g++")
# The env variable shouldn't persist after install command
self.assertFalse(os.environ.get("CC", None) == "/path/tomy/gcc")
self.assertFalse(os.environ.get("CXX", None) == "/path/tomy/g++")
def test_package_test(self):
test_conanfile = '''from conans.model.conan_file import ConanFile
from conans import CMake
import os
class DefaultNameConan(ConanFile):
name = "DefaultName"
version = "0.1"
settings = "os", "compiler", "arch", "build_type"
requires = "Hello0/0.1@lasote/stable"
def build(self):
# Print environment vars
# self.run('cmake %s %s' % (self.conanfile_directory, cmake.command_line))
if self.settings.os == "Windows":
self.run('echo "My var is %ONE_VAR%"')
else:
self.run('echo "My var is $ONE_VAR"')
def test(self):
pass
'''
files = {"conanfile.py": conanfile_scope_env,
"test_package/conanfile.py": test_conanfile}
# Create a profile and use it
create_profile(self.client.client_cache.profiles_path, "scopes_env", settings={},
scopes={}, env=[("ONE_VAR", "ONE_VALUE")])
self.client.save(files)
self.client.run("test_package --profile scopes_env")
self._assert_env_variable_printed("ONE_VAR", "ONE_VALUE")
self.assertIn("My var is ONE_VALUE", str(self.client.user_io.out))
# Try now with package environment vars
create_profile(self.client.client_cache.profiles_path, "scopes_env2", settings={},
scopes={}, package_env={"DefaultName": [("ONE_VAR", "IN_TEST_PACKAGE")],
"Hello0": [("ONE_VAR", "PACKAGE VALUE")]})
self.client.run("test_package --profile scopes_env2")
self._assert_env_variable_printed("ONE_VAR", "PACKAGE VALUE")
self.assertIn("My var is IN_TEST_PACKAGE", str(self.client.user_io.out))
# Try now overriding some variables with command line
self.client.run("test_package --profile scopes_env2 "
"-e DefaultName:ONE_VAR=InTestPackageOverride "
"-e Hello0:ONE_VAR=PackageValueOverride ")
self._assert_env_variable_printed("ONE_VAR", "PackageValueOverride")
self.assertIn("My var is InTestPackageOverride", str(self.client.user_io.out))
# A global setting in command line won't override a scoped package variable
self.client.run("test_package --profile scopes_env2 -e ONE_VAR=AnotherValue")
self._assert_env_variable_printed("ONE_VAR", "PACKAGE VALUE")
def _assert_env_variable_printed(self, name, value):
self.assertIn("%s=%s" % (name, value), self.client.user_io.out)
def info_with_profiles_test(self):
self.client.run("remove '*' -f")
# Create a simple recipe to require
winreq_conanfile = '''
from conans.model.conan_file import ConanFile
class WinRequireDefaultNameConan(ConanFile):
name = "WinRequire"
version = "0.1"
settings = "os", "compiler", "arch", "build_type"
'''
files = {"conanfile.py": winreq_conanfile}
self.client.save(files)
self.client.run("export lasote/stable")
# Now require the first recipe depending on OS=windows
conanfile = '''from conans.model.conan_file import ConanFile
import os
class DefaultNameConan(ConanFile):
name = "Hello"
version = "0.1"
settings = "os", "compiler", "arch", "build_type"
def config(self):
if self.settings.os == "Windows":
self.requires.add("WinRequire/0.1@lasote/stable")
'''
files = {"conanfile.py": conanfile}
self.client.save(files)
self.client.run("export lasote/stable")
# Create a profile that doesn't activate the require
create_profile(self.client.client_cache.profiles_path, "scopes_env",
settings={"os": "Linux"},
scopes={})
# Install with the previous profile
self.client.run("info Hello/0.1@lasote/stable --profile scopes_env")
self.assertNotIn('''Requires:
WinRequire/0.1@lasote/stable''', self.client.user_io.out)
# Create a profile that activate the require
create_profile(self.client.client_cache.profiles_path, "scopes_env",
settings={"os": "Windows"},
scopes={})
# Install with the previous profile
self.client.run("info Hello/0.1@lasote/stable --profile scopes_env")
self.assertIn('''Requires:
WinRequire/0.1@lasote/stable''', self.client.user_io.out)
| mit |
luofei98/qgis | python/plugins/processing/algs/otb/maintenance/OTBTester.py | 4 | 16871 | # -*- coding: utf-8 -*-
"""
***************************************************************************
OTBTester.py
---------------------
Copyright : (C) 2013 by CS Systemes d'information (CS SI)
Email : otb at c-s dot fr (CS SI)
Contributors : Julien Malik (CS SI)
Oscar Picas (CS SI)
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Julien Malik, Oscar Picas'
__copyright__ = '(C) 2013, CS Systemes d\'information (CS SI)'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import unittest
import ConfigParser
import io
from parsing import (
File, Command, Comment, BlankLine, Arg, parse, prettify)
from string import Template
import os
import traceback
import logging
import copy
from ConfigParser import SafeConfigParser
from processing.otb.OTBHelper import get_OTB_log
class LowerTemplate(Template):
def safe_substitute(self, param):
ret = super(LowerTemplate, self).safe_substitute(param).lower()
return ret
class MakefileParser(object):
def __init__(self):
self.maxDiff = None
self.parser = SafeConfigParser()
self.parser.read('otbcfg.ini')
if not os.path.exists('otbcfg.ini'):
raise Exception("OTB_SOURCE_DIR and OTB_BINARY_DIR must be specified in the file otbcfg.ini")
self.root_dir = self.parser.get('otb','checkout_dir')
if not os.path.exists(self.root_dir):
raise Exception("Check otbcfg.ini : OTB_SOURCE_DIR and OTB_BINARY_DIR must be specified there")
self.build_dir = self.parser.get('otb', 'build_dir')
if not os.path.exists(self.build_dir):
raise Exception("Check otbcfg.ini : OTB_SOURCE_DIR and OTB_BINARY_DIR must be specified there")
self.logger = get_OTB_log()
def test_CMakelists(self):
provided = {}
provided["OTB_SOURCE_DIR"] = self.root_dir
provided["OTB_BINARY_DIR"] = self.build_dir
provided["OTB_DATA_LARGEINPUT_ROOT"] = os.path.normpath(os.path.join(self.root_dir, "../OTB-Data/Input"))
try:
with open(os.path.join(self.root_dir, "CMakeLists.txt")) as file_input:
content = file_input.read()
output = parse(content)
defined_paths = [each for each in output if 'Command' in str(type(each)) and "FIND_PATH" in each.name]
the_paths = {key.body[0].contents: [thing.contents for thing in key.body[1:]] for key in defined_paths}
the_sets = [each for each in output if 'Command' in str(type(each)) and "SET" in each.name.upper()]
the_sets = {key.body[0].contents: [thing.contents for thing in key.body[1:]] for key in the_sets}
the_sets = {key : " ".join(the_sets[key]) for key in the_sets}
the_strings = set([each.body[-1].contents for each in output if 'Command' in str(type(each)) and "STRING" in each.name.upper()] )
def mini_clean(item):
if item.startswith('"') and item.endswith('"') and " " not in item:
return item[1:-1]
return item
the_sets = {key : mini_clean(the_sets[key]) for key in the_sets}
def templatize(item):
if "$" in item:
return Template(item)
return item
for key in the_sets:
if key in the_strings:
the_sets[key] = the_sets[key].lower()
the_sets = {key : templatize(the_sets[key]) for key in the_sets}
for path in the_paths:
target_file = the_paths[path][1]
suggested_paths = []
if len(the_paths[path]) > 2:
suggested_paths = the_paths[path][2:]
try:
provided[path] = find_file(target_file)
except Exception, e:
for each in suggested_paths:
st = Template(each)
pac = os.path.abspath(st.safe_substitute(provided))
if os.path.exists(pac):
provided[path] = pac
break
resolve_dict(provided, the_sets)
provided.update(the_sets)
return provided
except Exception, e:
traceback.print_exc()
self.fail(e.message)
def add_make(self, previous_context, new_file):
input = open(new_file).read()
output = parse(input)
apps = [each for each in output if 'Command' in str(type(each))]
setcommands = [each for each in apps if 'SET' in each.name.upper()]
stringcommands = [each for each in apps if 'STRING' in each.name.upper()]
environment = previous_context
def mini_clean(item):
if item.startswith('"') and item.endswith('"') and " " not in item:
return item[1:-1]
return item
new_env = {}
for command in setcommands:
key = command.body[0].contents
ct = " ".join([item.contents for item in command.body[1:]])
ct = mini_clean(ct)
if "$" in ct:
values = Template(ct)
else:
values = ct
new_env[key] = values
for stringcommand in stringcommands:
key = stringcommand.body[-1].contents
ct = stringcommand.body[-2].contents
ct = mini_clean(ct.lower())
if "$" in ct:
values = LowerTemplate(ct)
else:
values = ct
new_env[key] = values
resolve_dict(environment, new_env)
environment.update(new_env)
return environment
def get_apps(self, the_makefile, the_dict):
input = open(the_makefile).read()
output = parse(input)
apps = [each for each in output if 'Command' in str(type(each))]
otb_apps = [each for each in apps if 'OTB_TEST_APPLICATION' in each.name.upper()]
return otb_apps
def get_tests(self, the_makefile, the_dict):
input = open(the_makefile).read()
output = parse(input)
apps = [each for each in output if 'Command' in str(type(each))]
otb_tests = [each for each in apps if 'ADD_TEST' in each.name.upper()]
return otb_tests
def get_apps_with_context(self, the_makefile, the_dict):
input = open(the_makefile).read()
output = parse(input)
def is_a_command(item):
return 'Command' in str(type(item))
appz = []
context = []
for each in output:
if is_a_command(each):
if 'FOREACH' in each.name and 'ENDFOREACH' not in each.name:
args = [item.contents for item in each.body]
context.append(args)
elif 'ENDFOREACH' in each.name:
context.pop()
elif 'OTB_TEST_APPLICATION' in each.name.upper():
appz.append((each, context[:]))
return appz
def get_name_line(self, the_list, the_dict):
items = ('NAME', 'APP', 'OPTIONS', 'TESTENVOPTIONS', 'VALID')
itemz = [[], [], [], [], []]
last_index = 0
for each in the_list:
if each.contents in items:
last_index = items.index(each.contents)
else:
itemz[last_index].append(each.contents)
result = itemz[0][0]
the_string = Template(result).safe_substitute(the_dict)
if '$' in the_string:
neo_dict = the_dict
the_string = Template(the_string).safe_substitute(neo_dict)
while '$' in the_string:
try:
the_string = Template(the_string).substitute(neo_dict)
except KeyError, e:
self.logger.warning("Key %s is not found in makefiles" % e.message)
neo_dict[e.message] = ""
if 'string.Template' in the_string:
raise Exception("Unexpected toString call in %s" % the_string)
return the_string
def get_command_line(self, the_list, the_dict):
items = ('NAME', 'APP', 'OPTIONS', 'TESTENVOPTIONS', 'VALID')
itemz = [[], [], [], [], []]
last_index = 0
for each in the_list:
if each.contents in items:
last_index = items.index(each.contents)
else:
itemz[last_index].append(each.contents)
result = []
result.extend(["otbcli_%s" % each for each in itemz[1]])
if len(result[0]) == 7:
raise Exception("App name is empty !")
result.extend(itemz[2])
result.append("-testenv")
result.extend(itemz[3])
the_string = Template(" ".join(result)).safe_substitute(the_dict)
if '$' in the_string:
neo_dict = the_dict
the_string = Template(" ".join(result)).safe_substitute(neo_dict)
while '$' in the_string:
try:
the_string = Template(the_string).substitute(neo_dict)
except KeyError, e:
self.logger.warning("Key %s is not found in makefiles" % e.message)
neo_dict[e.message] = ""
if 'string.Template' in the_string:
raise Exception("Unexpected toString call in %s" % the_string)
return the_string
def get_test(self, the_list, the_dict):
items = ('NAME', 'APP', 'OPTIONS', 'TESTENVOPTIONS', 'VALID')
itemz = [[], [], [], [], []]
last_index = 0
for each in the_list:
if each.contents in items:
last_index = items.index(each.contents)
else:
itemz[last_index].append(each.contents)
result = ["otbTestDriver"]
result.extend(itemz[4])
if len(result) == 1:
return ""
the_string = Template(" ".join(result)).safe_substitute(the_dict)
if '$' in the_string:
neo_dict = the_dict
the_string = Template(" ".join(result)).safe_substitute(neo_dict)
while '$' in the_string:
try:
the_string = Template(the_string).substitute(neo_dict)
except KeyError, e:
self.logger.warning("Key %s is not found in makefiles" % e.message)
neo_dict[e.message] = ""
if 'string.Template' in the_string:
raise Exception("Unexpected toString call in %s" % the_string)
return the_string
def test_algos(self):
tests = {}
algos_dir = os.path.join(self.root_dir, "Testing/Applications")
makefiles = find_files("CMakeLists.txt", algos_dir)
to_be_excluded = os.path.join(self.root_dir, "Testing/Applications/CMakeLists.txt")
if to_be_excluded in makefiles:
makefiles.remove(to_be_excluded)
resolve_algos = {}
for makefile in makefiles:
intermediate_makefiles = []
path = makefile.split(os.sep)[len(self.root_dir.split(os.sep)):-1]
for ind in range(len(path)):
tmp_path = path[:ind+1]
tmp_path.append("CMakeLists.txt")
tmp_path = os.sep.join(tmp_path)
candidate_makefile = os.path.join(self.root_dir, tmp_path)
if os.path.exists(candidate_makefile):
intermediate_makefiles.append(candidate_makefile)
resolve_algos[makefile] = intermediate_makefiles
dict_for_algo = {}
for makefile in makefiles:
basic = self.test_CMakelists()
last_context = self.add_make(basic, os.path.join(self.root_dir, "Testing/Utilities/CMakeLists.txt"))
for intermediate_makefile in resolve_algos[makefile]:
last_context = self.add_make(last_context, intermediate_makefile)
dict_for_algo[makefile] = last_context
for makefile in makefiles:
appz = self.get_apps_with_context(makefile, dict_for_algo[makefile])
for app, context in appz:
if len(context) == 0:
import copy
ddi = copy.deepcopy(dict_for_algo[makefile])
tk_dict = autoresolve(ddi)
tk_dict = autoresolve(tk_dict)
name_line = self.get_name_line(app.body, tk_dict)
command_line = self.get_command_line(app.body, tk_dict)
test_line = self.get_test(app.body, tk_dict)
if '$' in test_line or '$' in command_line:
if '$' in command_line:
self.logger.error(command_line)
if '$' in test_line:
self.logger.warning(test_line)
else:
tests[name_line] = (command_line, test_line)
else:
contexts = {}
for iteration in context:
key = iteration[0]
values = [each[1:-1].lower() for each in iteration[1:]]
contexts[key] = values
keyorder = contexts.keys()
import itertools
pool = [each for each in itertools.product(*contexts.values())]
import copy
for poolinstance in pool:
neo_dict = copy.deepcopy(dict_for_algo[makefile])
zipped = zip(keyorder, poolinstance)
for each in zipped:
neo_dict[each[0]] = each[1]
ak_dict = autoresolve(neo_dict)
ak_dict = autoresolve(ak_dict)
ak_dict = autoresolve(ak_dict)
ddi = ak_dict
name_line = self.get_name_line(app.body, ddi)
command_line = self.get_command_line(app.body, ddi)
test_line = self.get_test(app.body, ddi)
if '$' in command_line or '$' not in test_line:
if '$' in command_line:
self.logger.error(command_line)
if '$' in test_line:
self.logger.warning(test_line)
else:
tests[name_line] = (command_line, test_line)
return tests
def autoresolve(a_dict):
def as_template(item, b_dict):
if hasattr(item, 'safe_substitute'):
return item.safe_substitute(b_dict)
ate = Template(item)
return ate.safe_substitute(b_dict)
templatized = {key: as_template(a_dict[key], a_dict) for key in a_dict.keys() }
return templatized
def find_file(file_name, base_dir = os.curdir):
import os
for root, dirs, files in os.walk(base_dir, topdown=False):
for name in files:
if name == file_name:
return os.path.join(root, name)
raise Exception("File not found %s" % file_name)
def find_files(file_name, base_dir = os.curdir):
import os
result = []
for root, dirs, files in os.walk(base_dir, topdown=False):
for name in files:
if name == file_name:
result.append(os.path.join(root, name))
return result
def resolve_dict(adia, adib):
init = len(adia)
fin = len(adia) + 1
def _resolve_dict(dia, dib):
for key in dib:
cand_value = dib[key]
if hasattr(cand_value, 'safe_substitute'):
value = cand_value.safe_substitute(dia)
if type(value) == type(".") and "$" not in value:
dia[key] = value
else:
dia[key] = cand_value
for key in dia:
if key in dib:
del dib[key]
while(init != fin):
init = len(adia)
_resolve_dict(adia, adib)
fin = len(adia)
| gpl-2.0 |
aewallin/opencamlib | examples/python/old/kdtree_tst.py | 1 | 5359 | import ocl as cam
import camvtk
import time
import vtk
import datetime
if __name__ == "__main__":
myscreen = camvtk.VTKScreen()
myscreen.setAmbient(1,1,1)
#stl = camvtk.STLSurf(filename="demo.stl")
stl = camvtk.STLSurf(filename="demo2.stl")
print("STL surface read")
myscreen.addActor(stl)
stl.SetWireframe()
stl.SetColor((0.5,0.5,0.8))
#stl.SetFlat()
polydata = stl.src.GetOutput()
s= cam.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print("STLSurf with ", s.size(), " triangles")
cutterDiameter=20
cutter = cam.CylCutter(cutterDiameter, 5)
#print cutter.str()
#print cc.type
minx=20
dx=15
maxx=130
miny=-70
dy=1
maxy=50
z=-10
bucketSize = 1
#pftp = cam.ParallelFinish()
#pftp.initCLPoints(minx,dx,maxx,miny,dy,maxy,z)
#pftp.initSTLSurf(s, bucketSize)
#pftp.dropCutterSTL1(cutter)
#print " made ", pftp.dcCalls, " drop-cutter calls"
#exit
pf2 = cam.ParallelFinish()
pf2.initCLPoints(minx,dx,maxx,miny,dy,maxy,z)
pf2.initSTLSurf(s, bucketSize)
pf2.dropCutterSTL2(cutter)
print(" made ", pf2.dcCalls, " drop-cutter calls")
#clpoints = pftp.getCLPoints()
#ccpoints = pftp.getCCPoints()
clpoints = pf2.getCLPoints()
ccpoints = pf2.getCCPoints()
#CLPointGrid(minx,dx,maxx,miny,dy,maxy,z)
nv=0
nn=0
ne=0
nf=0
myscreen.camera.SetPosition(3, 300, 200)
myscreen.camera.SetFocalPoint(75, 0, 0)
t = camvtk.Text()
t.SetPos( (myscreen.width-200, myscreen.height-30) )
myscreen.addActor( t)
t2 = camvtk.Text()
t2.SetPos( (myscreen.width-200, 30) )
myscreen.addActor( t2)
t3 = camvtk.Text()
t3.SetPos( (30, 30))
myscreen.addActor( t3)
t4 = camvtk.Text()
t4.SetPos( (30, myscreen.height-60))
myscreen.addActor( t4)
n=0
precl = cam.Point()
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
w2if.Modified()
lwr.SetFileName("tux1.png")
for cl,cc in zip(clpoints,ccpoints):
camEye = myscreen.camera.GetFocalPoint()
camPos = myscreen.camera.GetPosition()
postext = "(%3.3f, %3.3f, %3.3f)" % (camPos[0], camPos[1], camPos[2])
eyetext = "(%3.3f, %3.3f, %3.3f)" % (camEye[0], camEye[1], camEye[2])
camtext = "Camera LookAt: "+eyetext+"\nCamera Pos: "+ postext
t4.SetText(camtext)
t.SetText(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
xtext = "%3.3f" % cl.x
ytext = "%3.3f" % cl.y
ztext = "%3.3f" % cl.z
t2.SetText( "X: " + xtext + "\nY: " + ytext + "\nZ: " + ztext )
if cc.type==cam.CCType.FACET:
nf+=1
col = (0,1,1)
elif cc.type == cam.CCType.VERTEX:
nv+=1
col = (0,1,0)
elif cc.type == cam.CCType.EDGE:
ne+=1
col = (1,0,0)
elif cc.type == cam.CCType.NONE:
#print "type=NONE!"
nn+=1
col = (1,1,1)
#if cl.isInside(t):
# col = (0, 1, 0)
#else:
# col = (1, 0, 0)
trilist = pf2.getTrianglesUnderCutter(cl, cutter)
#print "at cl=", cl.str() , " where len(trilist)=", len(trilist)
t3.SetText("Total Triangles: "+ str(s.size()) +"\nUnder Cutter (red): "+str(len(trilist)))
stl2 = camvtk.STLSurf(filename=None, triangleList=trilist, color=(1,0,0)) # a new surface with only triangles under cutter
stl2.SetWireframe()
#stl2.SetFlat()
myscreen.addActor(stl2)
trilist=[]
cutactor = camvtk.Cylinder(center=(cl.x,cl.y,cl.z), radius=cutterDiameter/2, height=20, color=(0.7,0,1))
myscreen.addActor( cutactor )
#myscreen.addActor( camvtk.Point(center=(cl.x,cl.y,cl.z) , color=col) )
if n==0:
precl = cl
else:
d = cl-precl
if (d.norm() < 90):
myscreen.addActor( camvtk.Line( p1=(precl.x, precl.y, precl.z), p2=(cl.x, cl.y, cl.z), color=(0,1,1) ) )
precl = cl
n=n+1
#myscreen.addActor( camvtk.Point(center=(cl2.x,cl2.y,cl2.z+0.2) , color=(0.6,0.2,0.9)) )
#myscreen.addActor( camvtk.Point(center=(cc.x,cc.y,cc.z), color=col) )
#print cc.type
myscreen.camera.Azimuth( 1 )
#time.sleep(0.01)
myscreen.render()
w2if.Modified()
lwr.SetFileName("frames/kdbetter"+ ('%05d' % n)+".png")
#lwr.Write()
#raw_input("Press Enter to continue")
myscreen.removeActor(stl2)
myscreen.removeActor( cutactor )
print("none=",nn," vertex=",nv, " edge=",ne, " facet=",nf, " sum=", nn+nv+ne+nf)
print(len(clpoints), " cl points evaluated")
#lwr.Write()
for n in range(1,36):
t.SetText(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
myscreen.camera.Azimuth( 1 )
time.sleep(0.01)
myscreen.render()
lwr.SetFileName("kd_frame"+ ('%03d' % n)+".png")
w2if.Modified()
#lwr.Write()
myscreen.iren.Start()
raw_input("Press Enter to terminate")
| lgpl-2.1 |
axelkennedal/dissen | dissenEnv/lib/python3.5/site-packages/django/forms/fields.py | 35 | 47161 | """
Field classes.
"""
from __future__ import unicode_literals
import copy
import datetime
import os
import re
import sys
import uuid
import warnings
from decimal import Decimal, DecimalException
from io import BytesIO
from django.core import validators
from django.core.exceptions import ValidationError
# Provide this import for backwards compatibility.
from django.core.validators import EMPTY_VALUES # NOQA
from django.forms.boundfield import BoundField
from django.forms.utils import from_current_timezone, to_current_timezone
from django.forms.widgets import (
FILE_INPUT_CONTRADICTION, CheckboxInput, ClearableFileInput, DateInput,
DateTimeInput, EmailInput, HiddenInput, MultipleHiddenInput,
NullBooleanSelect, NumberInput, Select, SelectMultiple,
SplitDateTimeWidget, SplitHiddenDateTimeWidget, TextInput, TimeInput,
URLInput,
)
from django.utils import formats, six
from django.utils.dateparse import parse_duration
from django.utils.deprecation import (
RemovedInDjango110Warning, RenameMethodsBase,
)
from django.utils.duration import duration_string
from django.utils.encoding import force_str, force_text, smart_text
from django.utils.ipv6 import clean_ipv6_address
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
__all__ = (
'Field', 'CharField', 'IntegerField',
'DateField', 'TimeField', 'DateTimeField', 'DurationField',
'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField',
'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField',
'ComboField', 'MultiValueField', 'FloatField', 'DecimalField',
'SplitDateTimeField', 'GenericIPAddressField', 'FilePathField',
'SlugField', 'TypedChoiceField', 'TypedMultipleChoiceField', 'UUIDField',
)
class RenameFieldMethods(RenameMethodsBase):
renamed_methods = (
('_has_changed', 'has_changed', RemovedInDjango110Warning),
)
class Field(six.with_metaclass(RenameFieldMethods, object)):
widget = TextInput # Default widget to use when rendering this type of Field.
hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden".
default_validators = [] # Default set of validators
# Add an 'invalid' entry to default_error_message if you want a specific
# field error message not raised by the field validators.
default_error_messages = {
'required': _('This field is required.'),
}
empty_values = list(validators.EMPTY_VALUES)
# Tracks each time a Field instance is created. Used to retain order.
creation_counter = 0
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text='', error_messages=None, show_hidden_initial=False,
validators=[], localize=False, disabled=False, label_suffix=None):
# required -- Boolean that specifies whether the field is required.
# True by default.
# widget -- A Widget class, or instance of a Widget class, that should
# be used for this Field when displaying it. Each Field has a
# default Widget that it'll use if you don't specify this. In
# most cases, the default widget is TextInput.
# label -- A verbose name for this field, for use in displaying this
# field in a form. By default, Django will use a "pretty"
# version of the form field name, if the Field is part of a
# Form.
# initial -- A value to use in this Field's initial display. This value
# is *not* used as a fallback if data isn't given.
# help_text -- An optional string to use as "help text" for this Field.
# error_messages -- An optional dictionary to override the default
# messages that the field will raise.
# show_hidden_initial -- Boolean that specifies if it is needed to render a
# hidden widget with initial value after widget.
# validators -- List of additional validators to use
# localize -- Boolean that specifies if the field should be localized.
# disabled -- Boolean that specifies whether the field is disabled, that
# is its widget is shown in the form but not editable.
# label_suffix -- Suffix to be added to the label. Overrides
# form's label_suffix.
self.required, self.label, self.initial = required, label, initial
self.show_hidden_initial = show_hidden_initial
self.help_text = help_text
self.disabled = disabled
self.label_suffix = label_suffix
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
# Trigger the localization machinery if needed.
self.localize = localize
if self.localize:
widget.is_localized = True
# Let the widget know whether it should display as required.
widget.is_required = self.required
# Hook into self.widget_attrs() for any Field-specific HTML attributes.
extra_attrs = self.widget_attrs(widget)
if extra_attrs:
widget.attrs.update(extra_attrs)
self.widget = widget
# Increase the creation counter, and save our local copy.
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
self.validators = self.default_validators + validators
super(Field, self).__init__()
def prepare_value(self, value):
return value
def to_python(self, value):
return value
def validate(self, value):
if value in self.empty_values and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise ValidationError(errors)
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value
def bound_data(self, data, initial):
"""
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any.
For most fields, this will simply be data; FileFields need to handle it
a bit differently.
"""
if self.disabled:
return initial
return data
def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), returns a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {}
def has_changed(self, initial, data):
"""
Return True if data differs from initial.
"""
try:
data = self.to_python(data)
if hasattr(self, '_coerce'):
return self._coerce(data) != self._coerce(initial)
except ValidationError:
return True
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or initial value we get
# is None, replace it with ''.
initial_value = initial if initial is not None else ''
data_value = data if data is not None else ''
return initial_value != data_value
def get_bound_field(self, form, field_name):
"""
Return a BoundField instance that will be used when accessing the form
field in a template.
"""
return BoundField(form, self, field_name)
def __deepcopy__(self, memo):
result = copy.copy(self)
memo[id(self)] = result
result.widget = copy.deepcopy(self.widget, memo)
result.validators = self.validators[:]
return result
class CharField(Field):
def __init__(self, max_length=None, min_length=None, strip=True, *args, **kwargs):
self.max_length = max_length
self.min_length = min_length
self.strip = strip
super(CharField, self).__init__(*args, **kwargs)
if min_length is not None:
self.validators.append(validators.MinLengthValidator(int(min_length)))
if max_length is not None:
self.validators.append(validators.MaxLengthValidator(int(max_length)))
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
value = force_text(value)
if self.strip:
value = value.strip()
return value
def widget_attrs(self, widget):
attrs = super(CharField, self).widget_attrs(widget)
if self.max_length is not None:
# The HTML attribute is maxlength, not max_length.
attrs.update({'maxlength': str(self.max_length)})
return attrs
class IntegerField(Field):
widget = NumberInput
default_error_messages = {
'invalid': _('Enter a whole number.'),
}
re_decimal = re.compile(r'\.0*\s*$')
def __init__(self, max_value=None, min_value=None, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
if kwargs.get('localize') and self.widget == NumberInput:
# Localized number input is not well supported on most browsers
kwargs.setdefault('widget', super(IntegerField, self).widget)
super(IntegerField, self).__init__(*args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that int() can be called on the input. Returns the result
of int(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
# Strip trailing decimal and zeros.
try:
value = int(self.re_decimal.sub('', str(value)))
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(IntegerField, self).widget_attrs(widget)
if isinstance(widget, NumberInput):
if self.min_value is not None:
attrs['min'] = self.min_value
if self.max_value is not None:
attrs['max'] = self.max_value
return attrs
class FloatField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
}
def to_python(self, value):
"""
Validates that float() can be called on the input. Returns the result
of float(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = float(value)
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(FloatField, self).validate(value)
# Check for NaN (which is the only thing not equal to itself) and +/- infinity
if value != value or value in (Decimal('Inf'), Decimal('-Inf')):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(FloatField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
attrs.setdefault('step', 'any')
return attrs
class DecimalField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
}
def __init__(self, max_value=None, min_value=None, max_digits=None, decimal_places=None, *args, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(max_value, min_value, *args, **kwargs)
self.validators.append(validators.DecimalValidator(max_digits, decimal_places))
def to_python(self, value):
"""
Validates that the input is a decimal number. Returns a Decimal
instance. Returns None for empty values. Ensures that there are no more
than max_digits in the number, and no more than decimal_places digits
after the decimal point.
"""
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
value = smart_text(value).strip()
try:
value = Decimal(value)
except DecimalException:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(DecimalField, self).validate(value)
if value in self.empty_values:
return
# Check for NaN, Inf and -Inf values. We can't compare directly for NaN,
# since it is never equal to itself. However, NaN is the only value that
# isn't equal to itself, so we can use this to identify NaN
if value != value or value == Decimal("Inf") or value == Decimal("-Inf"):
raise ValidationError(self.error_messages['invalid'], code='invalid')
def widget_attrs(self, widget):
attrs = super(DecimalField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
if self.decimal_places is not None:
# Use exponential notation for small values since they might
# be parsed as 0 otherwise. ref #20765
step = str(Decimal('1') / 10 ** self.decimal_places).lower()
else:
step = 'any'
attrs.setdefault('step', step)
return attrs
class BaseTemporalField(Field):
def __init__(self, input_formats=None, *args, **kwargs):
super(BaseTemporalField, self).__init__(*args, **kwargs)
if input_formats is not None:
self.input_formats = input_formats
def to_python(self, value):
# Try to coerce the value to unicode.
unicode_value = force_text(value, strings_only=True)
if isinstance(unicode_value, six.text_type):
value = unicode_value.strip()
# If unicode, try to strptime against each input format.
if isinstance(value, six.text_type):
for format in self.input_formats:
try:
return self.strptime(value, format)
except (ValueError, TypeError):
continue
raise ValidationError(self.error_messages['invalid'], code='invalid')
def strptime(self, value, format):
raise NotImplementedError('Subclasses must define this method.')
class DateField(BaseTemporalField):
widget = DateInput
input_formats = formats.get_format_lazy('DATE_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date.'),
}
def to_python(self, value):
"""
Validates that the input can be converted to a date. Returns a Python
datetime.date object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
return super(DateField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).date()
class TimeField(BaseTemporalField):
widget = TimeInput
input_formats = formats.get_format_lazy('TIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid time.')
}
def to_python(self, value):
"""
Validates that the input can be converted to a time. Returns a Python
datetime.time object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.time):
return value
return super(TimeField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).time()
class DateTimeField(BaseTemporalField):
widget = DateTimeInput
input_formats = formats.get_format_lazy('DATETIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date/time.'),
}
def prepare_value(self, value):
if isinstance(value, datetime.datetime):
value = to_current_timezone(value)
return value
def to_python(self, value):
"""
Validates that the input can be converted to a datetime. Returns a
Python datetime.datetime object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return from_current_timezone(value)
if isinstance(value, datetime.date):
result = datetime.datetime(value.year, value.month, value.day)
return from_current_timezone(result)
result = super(DateTimeField, self).to_python(value)
return from_current_timezone(result)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format)
class DurationField(Field):
default_error_messages = {
'invalid': _('Enter a valid duration.'),
}
def prepare_value(self, value):
if isinstance(value, datetime.timedelta):
return duration_string(value)
return value
def to_python(self, value):
if value in self.empty_values:
return None
if isinstance(value, datetime.timedelta):
return value
value = parse_duration(value)
if value is None:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
class RegexField(CharField):
def __init__(self, regex, max_length=None, min_length=None, error_message=None, *args, **kwargs):
"""
regex can be either a string or a compiled regular expression object.
error_message is an optional error message to use, if
'Enter a valid value' is too generic for you.
"""
kwargs.setdefault('strip', False)
# error_message is just kept for backwards compatibility:
if error_message is not None:
warnings.warn(
"The 'error_message' argument is deprecated. Use "
"Field.error_messages['invalid'] instead.",
RemovedInDjango110Warning, stacklevel=2
)
error_messages = kwargs.get('error_messages') or {}
error_messages['invalid'] = error_message
kwargs['error_messages'] = error_messages
super(RegexField, self).__init__(max_length, min_length, *args, **kwargs)
self._set_regex(regex)
def _get_regex(self):
return self._regex
def _set_regex(self, regex):
if isinstance(regex, six.string_types):
regex = re.compile(regex, re.UNICODE)
self._regex = regex
if hasattr(self, '_regex_validator') and self._regex_validator in self.validators:
self.validators.remove(self._regex_validator)
self._regex_validator = validators.RegexValidator(regex=regex)
self.validators.append(self._regex_validator)
regex = property(_get_regex, _set_regex)
class EmailField(CharField):
widget = EmailInput
default_validators = [validators.validate_email]
def clean(self, value):
value = self.to_python(value).strip()
return super(EmailField, self).clean(value)
class FileField(Field):
widget = ClearableFileInput
default_error_messages = {
'invalid': _("No file was submitted. Check the encoding type on the form."),
'missing': _("No file was submitted."),
'empty': _("The submitted file is empty."),
'max_length': ungettext_lazy(
'Ensure this filename has at most %(max)d character (it has %(length)d).',
'Ensure this filename has at most %(max)d characters (it has %(length)d).',
'max'),
'contradiction': _('Please either submit a file or check the clear checkbox, not both.')
}
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
self.allow_empty_file = kwargs.pop('allow_empty_file', False)
super(FileField, self).__init__(*args, **kwargs)
def to_python(self, data):
if data in self.empty_values:
return None
# UploadedFile objects should have name and size attributes.
try:
file_name = data.name
file_size = data.size
except AttributeError:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if self.max_length is not None and len(file_name) > self.max_length:
params = {'max': self.max_length, 'length': len(file_name)}
raise ValidationError(self.error_messages['max_length'], code='max_length', params=params)
if not file_name:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if not self.allow_empty_file and not file_size:
raise ValidationError(self.error_messages['empty'], code='empty')
return data
def clean(self, data, initial=None):
# If the widget got contradictory inputs, we raise a validation error
if data is FILE_INPUT_CONTRADICTION:
raise ValidationError(self.error_messages['contradiction'], code='contradiction')
# False means the field value should be cleared; further validation is
# not needed.
if data is False:
if not self.required:
return False
# If the field is required, clearing is not possible (the widget
# shouldn't return False data in that case anyway). False is not
# in self.empty_value; if a False value makes it this far
# it should be validated from here on out as None (so it will be
# caught by the required check).
data = None
if not data and initial:
return initial
return super(FileField, self).clean(data)
def bound_data(self, data, initial):
if data in (None, FILE_INPUT_CONTRADICTION):
return initial
return data
def has_changed(self, initial, data):
if data is None:
return False
return True
class ImageField(FileField):
default_error_messages = {
'invalid_image': _(
"Upload a valid image. The file you uploaded was either not an "
"image or a corrupted image."
),
}
def to_python(self, data):
"""
Checks that the file-upload field data contains a valid image (GIF, JPG,
PNG, possibly others -- whatever the Python Imaging Library supports).
"""
f = super(ImageField, self).to_python(data)
if f is None:
return None
from PIL import Image
# We need to get a file object for Pillow. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
file = data.temporary_file_path()
else:
if hasattr(data, 'read'):
file = BytesIO(data.read())
else:
file = BytesIO(data['content'])
try:
# load() could spot a truncated JPEG, but it loads the entire
# image in memory, which is a DoS vector. See #3848 and #18520.
image = Image.open(file)
# verify() must be called immediately after the constructor.
image.verify()
# Annotating so subclasses can reuse it for their own validation
f.image = image
# Pillow doesn't detect the MIME type of all formats. In those
# cases, content_type will be None.
f.content_type = Image.MIME.get(image.format)
except Exception:
# Pillow doesn't recognize it as an image.
six.reraise(ValidationError, ValidationError(
self.error_messages['invalid_image'],
code='invalid_image',
), sys.exc_info()[2])
if hasattr(f, 'seek') and callable(f.seek):
f.seek(0)
return f
class URLField(CharField):
widget = URLInput
default_error_messages = {
'invalid': _('Enter a valid URL.'),
}
default_validators = [validators.URLValidator()]
def to_python(self, value):
def split_url(url):
"""
Returns a list of url parts via ``urlparse.urlsplit`` (or raises a
``ValidationError`` exception for certain).
"""
try:
return list(urlsplit(url))
except ValueError:
# urlparse.urlsplit can raise a ValueError with some
# misformatted URLs.
raise ValidationError(self.error_messages['invalid'], code='invalid')
value = super(URLField, self).to_python(value)
if value:
url_fields = split_url(value)
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = 'http'
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ''
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
url_fields = split_url(urlunsplit(url_fields))
value = urlunsplit(url_fields)
return value
def clean(self, value):
value = self.to_python(value).strip()
return super(URLField, self).clean(value)
class BooleanField(Field):
widget = CheckboxInput
def to_python(self, value):
"""Returns a Python boolean object."""
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Also check for '0', since this is what
# RadioSelect will provide. Because bool("True") == bool('1') == True,
# we don't need to handle that explicitly.
if isinstance(value, six.string_types) and value.lower() in ('false', '0'):
value = False
else:
value = bool(value)
return super(BooleanField, self).to_python(value)
def validate(self, value):
if not value and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def has_changed(self, initial, data):
# Sometimes data or initial could be None or '' which should be the
# same thing as False.
if initial == 'False':
# show_hidden_initial may have transformed False to 'False'
initial = False
return bool(initial) != bool(data)
class NullBooleanField(BooleanField):
"""
A field whose valid values are None, True and False. Invalid values are
cleaned to None.
"""
widget = NullBooleanSelect
def to_python(self, value):
"""
Explicitly checks for the string 'True' and 'False', which is what a
hidden field will submit for True and False, for 'true' and 'false',
which are likely to be returned by JavaScript serializations of forms,
and for '1' and '0', which is what a RadioField will submit. Unlike
the Booleanfield we need to explicitly check for True, because we are
not using the bool() function
"""
if value in (True, 'True', 'true', '1'):
return True
elif value in (False, 'False', 'false', '0'):
return False
else:
return None
def validate(self, value):
pass
def has_changed(self, initial, data):
# None (unknown) and False (No) are not the same
if initial is not None:
initial = bool(initial)
if data is not None:
data = bool(data)
return initial != data
class CallableChoiceIterator(object):
def __init__(self, choices_func):
self.choices_func = choices_func
def __iter__(self):
for e in self.choices_func():
yield e
class ChoiceField(Field):
widget = Select
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
}
def __init__(self, choices=(), required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
super(ChoiceField, self).__init__(required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs)
self.choices = choices
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
result._choices = copy.deepcopy(self._choices, memo)
return result
def _get_choices(self):
return self._choices
def _set_choices(self, value):
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
if callable(value):
value = CallableChoiceIterator(value)
else:
value = list(value)
self._choices = self.widget.choices = value
choices = property(_get_choices, _set_choices)
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
return smart_text(value)
def validate(self, value):
"""
Validates that the input is in self.choices.
"""
super(ChoiceField, self).validate(value)
if value and not self.valid_value(value):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
text_value = force_text(value)
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == k2 or text_value == force_text(k2):
return True
else:
if value == k or text_value == force_text(k):
return True
return False
class TypedChoiceField(ChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', '')
super(TypedChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validate that the value can be coerced to the right type (if not empty).
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
try:
value = self.coerce(value)
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
return value
def clean(self, value):
value = super(TypedChoiceField, self).clean(value)
return self._coerce(value)
class MultipleChoiceField(ChoiceField):
hidden_widget = MultipleHiddenInput
widget = SelectMultiple
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
'invalid_list': _('Enter a list of values.'),
}
def to_python(self, value):
if not value:
return []
elif not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['invalid_list'], code='invalid_list')
return [smart_text(val) for val in value]
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
def has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in initial)
data_set = set(force_text(value) for value in data)
return data_set != initial_set
class TypedMultipleChoiceField(MultipleChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', [])
super(TypedMultipleChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validates that the values are in self.choices and can be coerced to the
right type.
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
new_value = []
for choice in value:
try:
new_value.append(self.coerce(choice))
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': choice},
)
return new_value
def clean(self, value):
value = super(TypedMultipleChoiceField, self).clean(value)
return self._coerce(value)
def validate(self, value):
if value != self.empty_value:
super(TypedMultipleChoiceField, self).validate(value)
elif self.required:
raise ValidationError(self.error_messages['required'], code='required')
class ComboField(Field):
"""
A Field whose clean() method calls multiple Field clean() methods.
"""
def __init__(self, fields=(), *args, **kwargs):
super(ComboField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by ComboField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def clean(self, value):
"""
Validates the given value against all of self.fields, which is a
list of Field instances.
"""
super(ComboField, self).clean(value)
for field in self.fields:
value = field.clean(value)
return value
class MultiValueField(Field):
"""
A Field that aggregates the logic of multiple Fields.
Its clean() method takes a "decompressed" list of values, which are then
cleaned into a single value according to self.fields. Each value in
this list is cleaned by the corresponding field -- the first value is
cleaned by the first field, the second value is cleaned by the second
field, etc. Once all fields are cleaned, the list of clean values is
"compressed" into a single value.
Subclasses should not have to implement clean(). Instead, they must
implement compress(), which takes a list of valid values and returns a
"compressed" version of those values -- a single value.
You'll probably want to use this with MultiWidget.
"""
default_error_messages = {
'invalid': _('Enter a list of values.'),
'incomplete': _('Enter a complete value.'),
}
def __init__(self, fields=(), *args, **kwargs):
self.require_all_fields = kwargs.pop('require_all_fields', True)
super(MultiValueField, self).__init__(*args, **kwargs)
for f in fields:
f.error_messages.setdefault('incomplete',
self.error_messages['incomplete'])
if self.require_all_fields:
# Set 'required' to False on the individual fields, because the
# required validation will be handled by MultiValueField, not
# by those individual fields.
f.required = False
self.fields = fields
def __deepcopy__(self, memo):
result = super(MultiValueField, self).__deepcopy__(memo)
result.fields = tuple(x.__deepcopy__(memo) for x in self.fields)
return result
def validate(self, value):
pass
def clean(self, value):
"""
Validates every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = []
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in self.empty_values]:
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
else:
return self.compress([])
else:
raise ValidationError(self.error_messages['invalid'], code='invalid')
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if field_value in self.empty_values:
if self.require_all_fields:
# Raise a 'required' error if the MultiValueField is
# required and any field is empty.
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
elif field.required:
# Otherwise, add an 'incomplete' error to the list of
# collected errors and skip field cleaning, if a required
# field is empty.
if field.error_messages['incomplete'] not in errors:
errors.append(field.error_messages['incomplete'])
continue
try:
clean_data.append(field.clean(field_value))
except ValidationError as e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter. Skip duplicates.
errors.extend(m for m in e.error_list if m not in errors)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
self.run_validators(out)
return out
def compress(self, data_list):
"""
Returns a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError('Subclasses must implement this method.')
def has_changed(self, initial, data):
if initial is None:
initial = ['' for x in range(0, len(data))]
else:
if not isinstance(initial, list):
initial = self.widget.decompress(initial)
for field, initial, data in zip(self.fields, initial, data):
try:
initial = field.to_python(initial)
except ValidationError:
return True
if field.has_changed(initial, data):
return True
return False
class FilePathField(ChoiceField):
def __init__(self, path, match=None, recursive=False, allow_files=True,
allow_folders=False, required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
super(FilePathField, self).__init__(choices=(), required=required,
widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
if self.required:
self.choices = []
else:
self.choices = [("", "---------")]
if self.match is not None:
self.match_re = re.compile(self.match)
if recursive:
for root, dirs, files in sorted(os.walk(self.path)):
if self.allow_files:
for f in files:
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
if self.allow_folders:
for f in dirs:
if f == '__pycache__':
continue
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
else:
try:
for f in sorted(os.listdir(self.path)):
if f == '__pycache__':
continue
full_file = os.path.join(self.path, f)
if (((self.allow_files and os.path.isfile(full_file)) or
(self.allow_folders and os.path.isdir(full_file))) and
(self.match is None or self.match_re.search(f))):
self.choices.append((full_file, f))
except OSError:
pass
self.widget.choices = self.choices
class SplitDateTimeField(MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = SplitHiddenDateTimeWidget
default_error_messages = {
'invalid_date': _('Enter a valid date.'),
'invalid_time': _('Enter a valid time.'),
}
def __init__(self, input_date_formats=None, input_time_formats=None, *args, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
localize = kwargs.get('localize', False)
fields = (
DateField(input_formats=input_date_formats,
error_messages={'invalid': errors['invalid_date']},
localize=localize),
TimeField(input_formats=input_time_formats,
error_messages={'invalid': errors['invalid_time']},
localize=localize),
)
super(SplitDateTimeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
if data_list[0] in self.empty_values:
raise ValidationError(self.error_messages['invalid_date'], code='invalid_date')
if data_list[1] in self.empty_values:
raise ValidationError(self.error_messages['invalid_time'], code='invalid_time')
result = datetime.datetime.combine(*data_list)
return from_current_timezone(result)
return None
class GenericIPAddressField(CharField):
def __init__(self, protocol='both', unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.default_validators = validators.ip_address_validators(protocol, unpack_ipv4)[0]
super(GenericIPAddressField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value in self.empty_values:
return ''
value = value.strip()
if value and ':' in value:
return clean_ipv6_address(value, self.unpack_ipv4)
return value
class SlugField(CharField):
default_validators = [validators.validate_slug]
def __init__(self, *args, **kwargs):
self.allow_unicode = kwargs.pop('allow_unicode', False)
if self.allow_unicode:
self.default_validators = [validators.validate_unicode_slug]
super(SlugField, self).__init__(*args, **kwargs)
class UUIDField(CharField):
default_error_messages = {
'invalid': _('Enter a valid UUID.'),
}
def prepare_value(self, value):
if isinstance(value, uuid.UUID):
return value.hex
return value
def to_python(self, value):
value = super(UUIDField, self).to_python(value)
if value in self.empty_values:
return None
if not isinstance(value, uuid.UUID):
try:
value = uuid.UUID(value)
except ValueError:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
| mit |
hmendozap/master-arbeit-projects | autosk_dev_test/component/LinReg.py | 1 | 8756 | import numpy as np
import scipy.sparse as sp
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.conditions import EqualsCondition, InCondition
from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter, Constant
from autosklearn.pipeline.components.base import AutoSklearnRegressionAlgorithm
from autosklearn.pipeline.constants import *
class LinReg(AutoSklearnRegressionAlgorithm):
def __init__(self, number_updates, batch_size, dropout_output,
learning_rate, solver, lambda2,
momentum=0.99, beta1=0.9, beta2=0.9, rho=0.95,
lr_policy='fixed', gamma=0.01, power=1.0, epoch_step=2,
random_state=None):
self.number_updates = number_updates
self.batch_size = batch_size
self.dropout_output = dropout_output
self.learning_rate = learning_rate
self.lr_policy = lr_policy
self.lambda2 = lambda2
self.momentum = momentum
self.beta1 = 1-beta1 if beta1 is not None else 0.9
self.beta2 = 1-beta2 if beta2 is not None else 0.99
self.rho = rho
self.solver = solver
self.gamma = gamma
self.power = power
self.epoch_step = epoch_step
# Empty features and shape
self.n_features = None
self.input_shape = None
self.m_issparse = False
self.m_isregression = True
self.m_isbinary = False
self.m_ismultilabel = False
self.estimator = None
def _prefit(self, X, y):
self.batch_size = int(self.batch_size)
self.n_features = X.shape[1]
self.input_shape = (self.batch_size, self.n_features)
self.num_output_units = 1 # Regression
# Normalize the output
self.mean_y = np.mean(y)
self.std_y = np.std(y)
y = (y - self.mean_y) / self.std_y
if len(y.shape) == 1:
y = y[:, np.newaxis]
self.m_issparse = sp.issparse(X)
return X, y
def fit(self, X, y):
Xf, yf = self._prefit(X, y)
epoch = (self.number_updates * self.batch_size)//X.shape[0]
number_epochs = min(max(2, epoch), 110) # Cap the max number of possible epochs
from implementation import LogisticRegression
self.estimator = LogisticRegression.LogisticRegression(batch_size=self.batch_size,
input_shape=self.input_shape,
num_output_units=self.num_output_units,
dropout_output=self.dropout_output,
learning_rate=self.learning_rate,
lr_policy=self.lr_policy,
lambda2=self.lambda2,
momentum=self.momentum,
beta1=self.beta1,
beta2=self.beta2,
rho=self.rho,
solver=self.solver,
num_epochs=number_epochs,
gamma=self.gamma,
power=self.power,
epoch_step=self.epoch_step,
is_sparse=self.m_issparse,
is_binary=self.m_isbinary,
is_multilabel=self.m_ismultilabel,
is_regression=self.m_isregression)
self.estimator.fit(Xf, yf)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
preds = self.estimator.predict(X, self.m_issparse)
return preds * self.std_y + self.mean_y
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X, self.m_issparse)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'lin_reg',
'name': 'Linear Regression',
'handles_regression': True,
'handles_classification': False,
'handles_multiclass': False,
'handles_multilabel': False,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
policy_choices = ['fixed', 'inv', 'exp', 'step']
batch_size = UniformIntegerHyperparameter("batch_size",
100, 3000,
log=True,
default=150)
number_updates = UniformIntegerHyperparameter("number_updates",
500, 10500,
log=True,
default=500)
dropout_output = UniformFloatHyperparameter("dropout_output", 0.0, 0.99,
default=0.5)
lr = UniformFloatHyperparameter("learning_rate", 1e-6, 0.1,
log=True,
default=0.01)
l2 = UniformFloatHyperparameter("lambda2", 1e-6, 1e-2,
log=True,
default=1e-3)
solver = CategoricalHyperparameter(name="solver",
choices=["sgd", "adam"],
default="sgd")
beta1 = UniformFloatHyperparameter("beta1", 1e-4, 0.1,
log=True,
default=0.1)
beta2 = UniformFloatHyperparameter("beta2", 1e-4, 0.1,
log=True,
default=0.01)
lr_policy = CategoricalHyperparameter(name="lr_policy",
choices=policy_choices,
default='fixed')
gamma = UniformFloatHyperparameter(name="gamma",
lower=1e-3, upper=1e-1,
default=1e-2)
power = UniformFloatHyperparameter("power",
0.0, 1.0,
default=0.5)
epoch_step = UniformIntegerHyperparameter("epoch_step",
2, 20,
default=5)
cs = ConfigurationSpace()
cs.add_hyperparameter(number_updates)
cs.add_hyperparameter(batch_size)
cs.add_hyperparameter(dropout_output)
cs.add_hyperparameter(lr)
cs.add_hyperparameter(l2)
cs.add_hyperparameter(solver)
cs.add_hyperparameter(beta1)
cs.add_hyperparameter(beta2)
cs.add_hyperparameter(lr_policy)
cs.add_hyperparameter(gamma)
cs.add_hyperparameter(power)
cs.add_hyperparameter(epoch_step)
beta1_depends_on_solver = EqualsCondition(beta1, solver, "adam")
beta2_depends_on_solver = EqualsCondition(beta2, solver, "adam")
gamma_depends_on_policy = InCondition(child=gamma, parent=lr_policy,
values=['inv', 'exp', 'step'])
power_depends_on_policy = EqualsCondition(power, lr_policy, 'inv')
epoch_step_depends_on_policy = EqualsCondition(epoch_step,
lr_policy, 'step')
cs.add_condition(beta1_depends_on_solver)
cs.add_condition(beta2_depends_on_solver)
cs.add_condition(gamma_depends_on_policy)
cs.add_condition(power_depends_on_policy)
cs.add_condition(epoch_step_depends_on_policy)
return cs
| mit |
idnael/ctxsearch | ctxsearch/_termi/_termi_encoding.py | 1 | 4460 | #!/usr/bin/python
# TerminatorEncoding - charset encoding classes
# Copyright (C) 2006-2008 [email protected]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""TerminatorEncoding by Emmanuel Bretelle <[email protected]>
TerminatorEncoding supplies a list of possible encoding
values.
This list is taken from gnome-terminal's src/encoding.h
and src/encoding.c
"""
from terminatorlib import translation
class TerminatorEncoding:
"""Class to store encoding details"""
encodings = [
[True, None, _("Current Locale")],
[False, "ISO-8859-1", _("Western")],
[False, "ISO-8859-2", _("Central European")],
[False, "ISO-8859-3", _("South European") ],
[False, "ISO-8859-4", _("Baltic") ],
[False,"ISO-8859-5", _("Cyrillic") ],
[False, "ISO-8859-6", _("Arabic") ],
[False, "ISO-8859-7", _("Greek") ],
[False, "ISO-8859-8", _("Hebrew Visual") ],
[False, "ISO-8859-8-I", _("Hebrew") ],
[False, "ISO-8859-9", _("Turkish") ],
[False, "ISO-8859-10", _("Nordic") ],
[False, "ISO-8859-13", _("Baltic") ],
[False, "ISO-8859-14", _("Celtic") ],
[False, "ISO-8859-15", _("Western") ],
[False, "ISO-8859-16", _("Romanian") ],
[False, "UTF-7", _("Unicode") ],
[False, "UTF-8", _("Unicode") ],
[False, "UTF-16", _("Unicode") ],
[False, "UCS-2", _("Unicode") ],
[False, "UCS-4", _("Unicode") ],
[False, "ARMSCII-8", _("Armenian") ],
[False, "BIG5", _("Chinese Traditional") ],
[False, "BIG5-HKSCS", _("Chinese Traditional") ],
[False, "CP866", _("Cyrillic/Russian") ],
[False, "EUC-JP", _("Japanese") ],
[False, "EUC-KR", _("Korean") ],
[False, "EUC-TW", _("Chinese Traditional") ],
[False, "GB18030", _("Chinese Simplified") ],
[False, "GB2312", _("Chinese Simplified") ],
[False, "GBK", _("Chinese Simplified") ],
[False, "GEORGIAN-PS", _("Georgian") ],
[False, "HZ", _("Chinese Simplified") ],
[False, "IBM850", _("Western") ],
[False, "IBM852", _("Central European") ],
[False, "IBM855", _("Cyrillic") ],
[False, "IBM857", _("Turkish") ],
[False, "IBM862", _("Hebrew") ],
[False, "IBM864", _("Arabic") ],
[False, "ISO2022JP", _("Japanese") ],
[False, "ISO2022KR", _("Korean") ],
[False, "ISO-IR-111", _("Cyrillic") ],
[False, "JOHAB", _("Korean") ],
[False, "KOI8-R", _("Cyrillic") ],
[False, "KOI8-U", _("Cyrillic/Ukrainian") ],
[False, "MAC_ARABIC", _("Arabic") ],
[False, "MAC_CE", _("Central European") ],
[False, "MAC_CROATIAN", _("Croatian") ],
[False, "MAC-CYRILLIC", _("Cyrillic") ],
[False, "MAC_DEVANAGARI", _("Hindi") ],
[False, "MAC_FARSI", _("Persian") ],
[False, "MAC_GREEK", _("Greek") ],
[False, "MAC_GUJARATI", _("Gujarati") ],
[False, "MAC_GURMUKHI", _("Gurmukhi") ],
[False, "MAC_HEBREW", _("Hebrew") ],
[False, "MAC_ICELANDIC", _("Icelandic") ],
[False, "MAC_ROMAN", _("Western") ],
[False, "MAC_ROMANIAN", _("Romanian") ],
[False, "MAC_TURKISH", _("Turkish") ],
[False, "MAC_UKRAINIAN", _("Cyrillic/Ukrainian") ],
[False, "SHIFT-JIS", _("Japanese") ],
[False, "TCVN", _("Vietnamese") ],
[False, "TIS-620", _("Thai") ],
[False, "UHC", _("Korean") ],
[False, "VISCII", _("Vietnamese") ],
[False, "WINDOWS-1250", _("Central European") ],
[False, "WINDOWS-1251", _("Cyrillic") ],
[False, "WINDOWS-1252", _("Western") ],
[False, "WINDOWS-1253", _("Greek") ],
[False, "WINDOWS-1254", _("Turkish") ],
[False, "WINDOWS-1255", _("Hebrew") ],
[False, "WINDOWS-1256", _("Arabic") ],
[False, "WINDOWS-1257", _("Baltic") ],
[False, "WINDOWS-1258", _("Vietnamese") ]
]
def __init__(self):
pass
def get_list():
"""Return a list of supported encodings"""
return TerminatorEncoding.encodings
get_list = staticmethod(get_list)
| gpl-2.0 |
kbrose/project_euler | p90-99/p91.py | 1 | 1360 | grid_size = 50 # assumes a square grid
counter = 3 * (grid_size * grid_size) # all cases that won't be covered
def gcd(a,b):
while (b != 0) and (a != b) and (a != 0):
if b < a:
a = a - b
else:
b = b - a
if a > 0:
return a
return b
end = grid_size+1
def my_append(arr, item):
if item in arr:
return 0
arr.append(item)
return 1
triangles = []
for x in xrange(1,end):
for y in xrange(1,end):
GCD = gcd(x,y)
if GCD > 0:
slope = [x / GCD, y / GCD]
inv_slope = [y / GCD, -(x / GCD)]
else:
slope = [x,y]
inv_slope = [y,-x]
out_of_bounds_left = 0
out_of_bounds_right = 0
for mult in xrange(1,51):
x_inc = mult*inv_slope[0]
y_inc = mult*inv_slope[1]
if ((x - x_inc < 0) or (y - y_inc > grid_size)):
out_of_bounds_left = 1
else:
counter += my_append(triangles, [[0,0],[x,y],[x-x_inc,y-y_inc]])
if ((x + x_inc > grid_size) or (y + y_inc < 0)):
out_of_bounds_right = 1
else:
counter += my_append(triangles, [[0,0],[x,y],[x+x_inc,y+y_inc]])
if (out_of_bounds_left and out_of_bounds_right):
break
print counter
| unlicense |
LIMXTEC/BitSend | qa/rpc-tests/mempool_reorg.py | 3 | 4514 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitsend Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test re-org scenarios with a mempool that contains transactions
# that spend (directly or indirectly) coinbase transactions.
#
from test_framework.test_framework import BitsendTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(BitsendTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
alert_filename = None # Set by setup_network
def setup_network(self):
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
start_count = self.nodes[0].getblockcount()
# Mine three blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = create_tx(self.nodes[0], coinbase_txids[1], node1_address, 49.99)
spend_102_raw = create_tx(self.nodes[0], coinbase_txids[2], node0_address, 49.99)
spend_103_raw = create_tx(self.nodes[0], coinbase_txids[3], node0_address, 49.99)
# Create a block-height-locked transaction which will be invalid after reorg
timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 49.99})
# Set the time lock
timelock_tx = timelock_tx.replace("ffffffff", "11111191", 1)
timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"]
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, timelock_tx)
# Create 102_1 and 103_1:
spend_102_1_raw = create_tx(self.nodes[0], spend_102_id, node1_address, 49.98)
spend_103_1_raw = create_tx(self.nodes[0], spend_103_id, node1_address, 49.98)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
last_block = self.nodes[0].generate(1)
timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, timelock_tx_id})
for node in self.nodes:
node.invalidateblock(last_block[0])
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id})
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| mit |
piru/letsencrypt | letsencrypt/tests/display/enhancements_test.py | 53 | 1703 | """Module for enhancement UI."""
import logging
import unittest
import mock
from letsencrypt import errors
from letsencrypt.display import util as display_util
class AskTest(unittest.TestCase):
"""Test the ask method."""
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@classmethod
def _call(cls, enhancement):
from letsencrypt.display.enhancements import ask
return ask(enhancement)
@mock.patch("letsencrypt.display.enhancements.util")
def test_redirect(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 1)
self.assertTrue(self._call("redirect"))
def test_key_error(self):
self.assertRaises(errors.Error, self._call, "unknown_enhancement")
class RedirectTest(unittest.TestCase):
"""Test the redirect_by_default method."""
@classmethod
def _call(cls):
from letsencrypt.display.enhancements import redirect_by_default
return redirect_by_default()
@mock.patch("letsencrypt.display.enhancements.util")
def test_secure(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 1)
self.assertTrue(self._call())
@mock.patch("letsencrypt.display.enhancements.util")
def test_cancel(self, mock_util):
mock_util().menu.return_value = (display_util.CANCEL, 1)
self.assertFalse(self._call())
@mock.patch("letsencrypt.display.enhancements.util")
def test_easy(self, mock_util):
mock_util().menu.return_value = (display_util.OK, 0)
self.assertFalse(self._call())
if __name__ == "__main__":
unittest.main() # pragma: no cover
| apache-2.0 |
canwe/NewsBlur | apps/rss_feeds/migrations/0046_remove_feedhistory.py | 18 | 5828 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'FeedUpdateHistory'
db.delete_table('rss_feeds_feedupdatehistory')
def backwards(self, orm):
# Adding model 'FeedUpdateHistory'
db.create_table('rss_feeds_feedupdatehistory', (
('number_of_feeds', self.gf('django.db.models.fields.IntegerField')()),
('average_per_feed', self.gf('django.db.models.fields.DecimalField')(max_digits=4, decimal_places=1)),
('fetch_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('seconds_taken', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('rss_feeds', ['FeedUpdateHistory'])
models = {
'rss_feeds.duplicatefeed': {
'Meta': {'object_name': 'DuplicateFeed'},
'duplicate_address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'duplicate_feed_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'duplicate_addresses'", 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.feed': {
'Meta': {'ordering': "['feed_title']", 'object_name': 'Feed', 'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'active_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'db_index': 'True'}),
'average_stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'exception_code': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'favicon_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'favicon_not_found': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'feed_address': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "'[Untitled]'", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fetched_once': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_feed_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'has_page_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'premium_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'queued_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'stories_last_month': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'rss_feeds.feeddata': {
'Meta': {'object_name': 'FeedData'},
'feed': ('utils.fields.AutoOneToOneField', [], {'related_name': "'data'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'feed_classifier_counts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'popular_authors': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'popular_tags': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'story_count_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedloadtime': {
'Meta': {'object_name': 'FeedLoadtime'},
'date_accessed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loadtime': ('django.db.models.fields.FloatField', [], {})
}
}
complete_apps = ['rss_feeds']
| mit |
sumeetsk/NEXT-1 | next/assistant/pijemont/verifier.py | 3 | 7768 | import yaml, json
import random
import traceback
import sys
import os
import next.utils as utils
DICT = {'dict','dictionary','map'}
LIST = {'list'}
TUPLE = {'tuple'}
ONEOF = {'oneof'}
NUM = {'num','number','float'}
STRING = {'str','string','multiline'}
ANY = {'any','stuff'}
FILE = {'file'}
BOOL = {'boolean','bool'}
def load_doc(filename,base_path):
errs = []
with open(filename) as f:
ref = yaml.load(f.read())
ds = []
for ext in ref.pop('extends',[]):
r,e = load_doc(base_path+ext,base_path)
ds += [r]
errs += e
for d in ds:
ref = merge_dict(ref, d)
errs = check_format(ref,'args' in ref[list(ref.keys())[0]])
return ref,errs
def merge_dict(d1,d2,prefer=1):
for k in d2:
if k in d1:
if type(d1[k]) == dict:
d1[k] = merge_dict(d1[k],d2[k])
if prefer == 2:
d1[k] = d2[k]
else:
d1[k] = d2[k]
return d1
def check_format(doc,rets=True):
errs = []
if rets:
for x in doc:
if 'args' in doc[x]:
errs += check_format_helper({'type':'dict','values':doc[x]['args']},'args/'+x)
if 'rets' in doc[x]:
errs += check_format_helper({'type':'dict','values':doc[x]['rets']},'rets/'+x)
else:
for x in doc:
errs += check_format_helper(doc[x],x)
return errs
def check_format_helper(doc,name):
errs = []
if not 'type' in doc:
errs += ['{}: "type" key missing'.format(name)]
diff = set(doc.keys()) - {'type','description','values','optional','default'}
if len(diff) > 0:
errs += ["{}: extra keys in spec: {}".format(name,", ".join(list(diff)))]
if not doc['type'] in DICT | LIST | TUPLE | ONEOF | NUM | STRING | BOOL | ANY | FILE:
errs += ['{}: invlid type: {}'.format(name, doc['type'])]
if doc['type'] in DICT | LIST | TUPLE | ONEOF and not 'values' in doc:
errs += ['{}: requires "values" key'.format(name)]
if len(errs) > 0:
return errs
if doc['type'] in DICT:
for x in doc['values']:
errs += check_format_helper(doc['values'][x],'{}/{}'.format(name,x))
elif doc['type'] in LIST:
errs += check_format_helper(doc['values'],'{}/values'.format(name))
elif doc['type'] in TUPLE:
for x in doc['values']:
errs += check_format_helper(doc['values'][x],'{}/{}'.format(name,str(x)))
elif doc['type'] in ONEOF:
for x in doc['values']:
errs += check_format_helper(doc['values'][x],'{}/{}'.format(name,str(x)))
return errs
def verify(input_dict, reference_dict):
"""
Returns: modified_input, success, list_of_errors
where:
- modified_input is the input populated with default values where applicable
- success is a boolean true if there were no problems and false otherwise
- list_of_errors is as in verify_helper
"""
input_dict, messages = verify_helper("", input_dict, {'type':'dict','values':reference_dict})
try:
if len(messages)>0:
raise Exception("Failed to verify: {}".format(messages))
else:
return input_dict
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
print("Exception: {} {}".format(error, traceback.format_exc()))
traceback.print_tb(exc_traceback)
raise Exception(error)
def verify_helper(name, input_element, reference_dict):
"""
Returns: modified_input,list_of_errors
where:
- modified_input is the input populated with default values
- list_of_errors is: [{name: name, message: ...}, ...]
"""
ans = []
if reference_dict['type'] in DICT:
if not isinstance(input_element, (dict)):
ans += [{"name":name, "message":"invalid dict"}]
else:
l1,l2 = compare_dict_keys(input_element, reference_dict['values'])
if len(l1) > 0:
ans += [{"name":name, "message":"extra keys in input: " + ",".join(l1)}]
else:
ok = True
for k in l2:
if 'default' in reference_dict['values'][k]:
input_element[k] = reference_dict['values'][k]['default']
if reference_dict['values'][k]['type'] in NUM:
input_element[k] = float(input_element[k])
elif (not 'optional' in reference_dict['values'][k]) or reference_dict['values'][k]['optional'] == False:
ans += [{"name":name+'/'+k, "message":"required key is absent"}]
ok = False
if(ok):
for k in input_element:
input_element[k], temp_ans = verify_helper(name + '/' + k, input_element[k], reference_dict['values'][str(k)])
ans += temp_ans
elif reference_dict['type'] in LIST:
if not isinstance(input_element, (list)):
ans += [{"name":name, "message":"invalid list"}]
else:
for i in range(len(input_element)):
input_element[i],temp_ans = verify_helper(name+'/'+str(i), input_element[i], reference_dict['values'])
ans += temp_ans
elif reference_dict['type'] in TUPLE:
if not isinstance(input_element, (list,tuple)):
ans += [{"name":name, "message":"invalid tuple"}]
else:
new_tuple = list(input_element)
for i in range(len(input_element)):
new_tuple[i], temp_ans = verify_helper(name+'/'+str(i), input_element[i], reference_dict['values'][i])
ans += temp_ans
new_tuple = tuple(new_tuple)
elif reference_dict['type'] in BOOL:
if not isinstance(input_element, (bool)):
ans += [{"name":name, "message":"invalid boolean"}]
elif reference_dict['type'] in NUM:
if not isinstance(input_element, (int, long, float)):
ans += [{"name":name, "message":"invalid number"}]
elif reference_dict['type'] in STRING:
if not isinstance(input_element, (str, unicode)):
ans += [{"name":name, "message":"expected a string, got {}".format(type(input_element))}]
elif 'values' in reference_dict and not input_element in reference_dict['values']:
ans += [{"name":name, "message":"argument must be one of the specified strings: "+", ".join(reference_dict['values'])}]
elif reference_dict['type'] in ONEOF:
count = 0
for k in reference_dict['values']:
if k in input_element:
count += 1
if count > 1:
ans += [{"name":name+"/"+k,"message":"More than one argument specified for 'oneof arg: " + name}]
if count == 0:
if 'default' in reference_dict:
input_element = reference_dict['default']
else:
ans += [{"name":name, "message":"no argument provided for 'oneof' arg"}]
elif reference_dict['type'] in ANY | FILE:
pass
else:
ans += [{"name":name, "message":"invalid type: {}".format(reference_dict['type'])}]
return input_element,ans
def compare_dict_keys(d1, d2):
"""
Returns [things in d1 not in d2, things in d2 not in d1]
"""
return [k for k in d1 if not k in d2], [k for k in d2 if not k in d1]
if __name__ == '__main__':
if len(sys.argv) > 1:
r,e = load_doc(sys.argv[1])
print('doc',r)
print('errs',e)
if len(sys.argv) > 2:
i,e = verify(sys.argv[2],r)
print("Errors",e)
print("Verified input",i)
| apache-2.0 |
nachtmaar/androlyze | androlyze/util/Util.py | 1 | 8887 |
# encoding: utf-8
__author__ = "Nils Tobias Schmidt"
__email__ = "schmidt89 at informatik.uni-marburg.de"
'''
Utility module
'''
from Queue import Empty
import itertools
from os.path import splitext
import re
import sys
import time
import traceback
from androlyze.log.Log import log
def sha256(data):
'''
Calculate the sha256 hash
Parameters
----------
data: object
Returns
-------
str
sha256 as hexstring
'''
import hashlib
hasher = hashlib.sha256()
hasher.update(data)
return hasher.hexdigest()
def cs_classnames(class_list, sort = True):
''' Returns a comma separated str build from the name attribute '''
class_names = [c.__name__ for c in class_list]
if sort:
class_names = sorted(class_names)
return ', '.join(class_names)
def filter_not_none(sequence):
return filter(lambda x: x is not None, sequence)
def get_fst_not_none(sequence):
''' Get the first object that is not None.
Returns None if nothing found '''
res = filter_not_none(sequence)
if res:
return res[0]
return None
def format_exception(exc_info_obj, as_string = True):
'''
Format the exception infos to a string
Parameters
----------
exc_info_obj : (type, value, traceback)
An object like sys.exc_info() returns
as_string : bool
If true, return the formatted exception as string, not list<str>
Returns
-------
str
'''
res = traceback.format_exception(*exc_info_obj)
if as_string:
res = ''.join(res)
return res
############################################################
#---Datetime conversions
############################################################
def utc2local(utc_datetime):
''' Convert `datetime` object in utc to local time zone.
Parameters
----------
utc_datetime : datetime
Datetime object with utc timezone
Examples
--------
>>> from datetime import datetime
... print utc2local(datetime.utcnow())
Returns
-------
datetime
Datetime object with local timezone
'''
import calendar
from datetime import datetime
# get integer timestamp to avoid precision lost
# but we lose microseconds due to the timetuple()
timestamp = calendar.timegm(utc_datetime.timetuple())
local_datetime = datetime.fromtimestamp(timestamp)
# get micro seconds back
local_datetime.replace(microsecond=utc_datetime.microsecond)
return local_datetime
############################################################
#---Conversion ISO-8601 <-> Datetime
############################################################
def iso8601_to_datetime(iso_dt):
''' Convert a date represented as ISO-8601 string to a `datetime ` object (utc).
Examples
--------
>>> from datetime import datetime
... iso8601_to_datetime(datetime.utcnow().isoformat())
'''
from dateutil import parser
return parser.parse(iso_dt)
def datetime_to_iso8601(dt):
''' Convert a `datetime` object to a ISO-8601 string.
Examples
--------
>>> from datetime import datetime
... datetime_to_iso8601(datetime.utcnow())
'''
return dt.isoformat()
############################################################
#---Other
############################################################
def transform_key(key, from_mapping, to_mapping):
'''
Find the `key` in `from_mapping` and return the value of `to_mapping` at this index.
Parameters
----------
key : str
from_mapping : list<str>
to_mapping : list<str>
Returns
-------
appropriate value in the `to_mapping`
'''
try:
idx = from_mapping.index(key)
return to_mapping[idx]
except ValueError:
return None
def timeit(func, *args, **kwargs):
''' Returns the execution time in seconds of the func.
Returns
-------
int
Execution time if no result value
tuple<int, object>
If the `func` has a return value, a tuple will be returned.
1. arg time, 2. arg result value'''
start = time.time()
res = func(*args, **kwargs)
end = time.time()
duration = end - start
if res is None:
return duration
return (duration, res)
def set_androguard_path(settings):
''' Set the path to androguard from read from `settings` if not already in python path!
Parameters
----------
settings : Settings
'''
# check if path already set
try:
import androguard
return
except ImportError:
pass
from androlyze.settings import SECTION_ANDROGUARD, KEY_ANDROGUARD_PATH
ANDROGUARD_PATH = settings[(SECTION_ANDROGUARD, KEY_ANDROGUARD_PATH)]
# set androguard location before importing any androguard stuff
sys.path.append(ANDROGUARD_PATH)
log.info('appending "%s" to sys.path', ANDROGUARD_PATH)
############################################################
#---Logging
############################################################
def print_dyn_progress(progress_str):
''' Print progress on stdout.
Parameters
----------
progress_str : str
'''
sys.stdout.write("\r%s" % progress_str)
sys.stdout.flush()
def log_will_retry(secs, exc = None, what = ''):
'''
Parameters
----------
secs : int
Retry in `secs` seconds.
exc: Exception, optional (default is None)
Exception to log
what : str, optional (default is '')
What to try again.
'''
if exc is not None:
log.exception(exc)
log.warn("Trying %s again in %ss", what, secs)
############################################################
#---Lists
############################################################
def flatten(l):
''' Flatten the iterable `l` '''
return list(itertools.chain(*l))
############################################################
#---Progress
############################################################
def format_progress(cur_cnt, total_cnt):
''' Format progress and return it as str.
Parameters
----------
cur_cnt : int
total_cnt : int
'''
progress_percentage = calc_progress(cur_cnt, total_cnt)
progress_percentage_str = "%.2f" % progress_percentage
return "%s/%s (%s %%)" % (cur_cnt, total_cnt, progress_percentage_str)
def calc_progress(cur_cnt, total_cnt):
''' Calculate progress and return it as float.
Parameters
----------
cur_cnt : int
total_cnt : int
'''
progress_percentage = 0
if total_cnt != 0:
progress_percentage = cur_cnt * 100.0 / total_cnt
return progress_percentage
############################################################
#---Import
############################################################
def path_2_package_name(path):
''' Convert path to package name '''
return re.sub("/+", ".", splitext(path)[0])
def package_name_2_path(package_name):
''' Convert package name to path'''
return re.sub("\.+", "/", package_name) + ".py"
def module_names_from_class(classes):
''' Get module names from instantiated `classes` '''
return map(lambda s: s.__module__, classes)
############################################################
#---Itertools
############################################################
def count_iterable_n_clone(iterable):
'''
Parameters
----------
iterable: iterable structure
Returtns
--------
iterable, int
The first component is the iterable structure.
List if `iterable` was list, otherwise `itertools.tee`
Second is the count of elements.
'''
is_list = isinstance(iterable, list)
orig, copy = itertools.tee(iterable)
if is_list:
orig = list(orig)
return orig, len(list(copy))
def queue_to_list(queue):
''' Get all elements from the `queue` and return them as list.
Parameters
----------
qeueue: multiprocessing.Queue
'''
res = []
while True:
try:
r = queue.get_nowait()
res.append(r)
except Empty:
break
return res
def split_n_uniform_distri(l, n = 10):
''' Split the list `l` into `n` sublists and try to fill each sublist with nearly the same number of elements.
So achieve an uniform distribution in all sublists.
Examples
--------
>>> split_n_uniform_distri(range(11))
[[0, 10], [1], [2], [3], [4], [5], [6], [7], [8], [9]]
'''
return [l[i::n] for i in range(0, n)]
def clear_queue(queue):
''' Clear the queue by removing all elements (without blocking)
Parameters
----------
queue : multiprocessing.Queue or Queue
Warning
-------
If the queue is a `JoinableQueue` it's not joinable any more!
'''
try:
while True:
queue.get_nowait()
except Empty:
pass | mit |
wdurhamh/statsmodels | statsmodels/genmod/families/links.py | 35 | 20123 | '''
Defines the link functions to be used with GLM and GEE families.
'''
import numpy as np
import scipy.stats
FLOAT_EPS = np.finfo(float).eps
class Link(object):
"""
A generic link function for one-parameter exponential family.
`Link` does nothing, but lays out the methods expected of any subclass.
"""
def __call__(self, p):
"""
Return the value of the link function. This is just a placeholder.
Parameters
----------
p : array-like
Probabilities
Returns
-------
g(p) : array-like
The value of the link function g(p) = z
"""
return NotImplementedError
def inverse(self, z):
"""
Inverse of the link function. Just a placeholder.
Parameters
----------
z : array-like
`z` is usually the linear predictor of the transformed variable
in the IRLS algorithm for GLM.
Returns
-------
g^(-1)(z) : array
The value of the inverse of the link function g^(-1)(z) = p
"""
return NotImplementedError
def deriv(self, p):
"""
Derivative of the link function g'(p). Just a placeholder.
Parameters
----------
p : array-like
Returns
-------
g'(p) : array
The value of the derivative of the link function g'(p)
"""
return NotImplementedError
def deriv2(self, p):
"""Second derivative of the link function g''(p)
implemented through numerical differentiation
"""
from statsmodels.tools.numdiff import approx_fprime_cs
# TODO: workaround proplem with numdiff for 1d
return np.diag(approx_fprime_cs(p, self.deriv))
def inverse_deriv(self, z):
"""
Derivative of the inverse link function g^(-1)(z).
Notes
-----
This reference implementation gives the correct result but is
inefficient, so it can be overriden in subclasses.
Parameters
----------
z : array-like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : array
The value of the derivative of the inverse of the link function
"""
return 1 / self.deriv(self.inverse(z))
class Logit(Link):
"""
The logit transform
Notes
-----
call and derivative use a private method _clean to make trim p by
machine epsilon so that p is in (0,1)
Alias of Logit:
logit = Logit()
"""
def _clean(self, p):
"""
Clip logistic values to range (eps, 1-eps)
Parameters
-----------
p : array-like
Probabilities
Returns
--------
pclip : array
Clipped probabilities
"""
return np.clip(p, FLOAT_EPS, 1. - FLOAT_EPS)
def __call__(self, p):
"""
The logit transform
Parameters
----------
p : array-like
Probabilities
Returns
-------
z : array
Logit transform of `p`
Notes
-----
g(p) = log(p / (1 - p))
"""
p = self._clean(p)
return np.log(p / (1. - p))
def inverse(self, z):
"""
Inverse of the logit transform
Parameters
----------
z : array-like
The value of the logit transform at `p`
Returns
-------
p : array
Probabilities
Notes
-----
g^(-1)(z) = exp(z)/(1+exp(z))
"""
z = np.asarray(z)
t = np.exp(-z)
return 1. / (1. + t)
def deriv(self, p):
"""
Derivative of the logit transform
Parameters
----------
p: array-like
Probabilities
Returns
-------
g'(p) : array
Value of the derivative of logit transform at `p`
Notes
-----
g'(p) = 1 / (p * (1 - p))
Alias for `Logit`:
logit = Logit()
"""
p = self._clean(p)
return 1. / (p * (1 - p))
def inverse_deriv(self, z):
"""
Derivative of the inverse of the logit transform
Parameters
----------
z : array-like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : array
The value of the derivative of the inverse of the logit function
"""
t = np.exp(z)
return t/(1 + t)**2
def deriv2(self, p):
"""
Second derivative of the logit function.
Parameters
----------
p : array-like
probabilities
Returns
-------
g''(z) : array
The value of the second derivative of the logit function
"""
v = p * (1 - p)
return (2*p - 1) / v**2
class logit(Logit):
pass
class Power(Link):
"""
The power transform
Parameters
----------
power : float
The exponent of the power transform
Notes
-----
Aliases of Power:
inverse = Power(power=-1)
sqrt = Power(power=.5)
inverse_squared = Power(power=-2.)
identity = Power(power=1.)
"""
def __init__(self, power=1.):
self.power = power
def __call__(self, p):
"""
Power transform link function
Parameters
----------
p : array-like
Mean parameters
Returns
-------
z : array-like
Power transform of x
Notes
-----
g(p) = x**self.power
"""
z = np.power(p, self.power)
return z
def inverse(self, z):
"""
Inverse of the power transform link function
Parameters
----------
`z` : array-like
Value of the transformed mean parameters at `p`
Returns
-------
`p` : array
Mean parameters
Notes
-----
g^(-1)(z`) = `z`**(1/`power`)
"""
p = np.power(z, 1. / self.power)
return p
def deriv(self, p):
"""
Derivative of the power transform
Parameters
----------
p : array-like
Mean parameters
Returns
--------
g'(p) : array
Derivative of power transform of `p`
Notes
-----
g'(`p`) = `power` * `p`**(`power` - 1)
"""
return self.power * np.power(p, self.power - 1)
def deriv2(self, p):
"""
Second derivative of the power transform
Parameters
----------
p : array-like
Mean parameters
Returns
--------
g''(p) : array
Second derivative of the power transform of `p`
Notes
-----
g''(`p`) = `power` * (`power` - 1) * `p`**(`power` - 2)
"""
return self.power * (self.power - 1) * np.power(p, self.power - 2)
def inverse_deriv(self, z):
"""
Derivative of the inverse of the power transform
Parameters
----------
z : array-like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g^(-1)'(z) : array
The value of the derivative of the inverse of the power transform
function
"""
return np.power(z, (1 - self.power)/self.power) / self.power
class inverse_power(Power):
"""
The inverse transform
Notes
-----
g(p) = 1/p
Alias of statsmodels.family.links.Power(power=-1.)
"""
def __init__(self):
super(inverse_power, self).__init__(power=-1.)
class sqrt(Power):
"""
The square-root transform
Notes
-----
g(`p`) = sqrt(`p`)
Alias of statsmodels.family.links.Power(power=.5)
"""
def __init__(self):
super(sqrt, self).__init__(power=.5)
class inverse_squared(Power):
"""
The inverse squared transform
Notes
-----
g(`p`) = 1/(`p`\ \*\*2)
Alias of statsmodels.family.links.Power(power=2.)
"""
def __init__(self):
super(inverse_squared, self).__init__(power=-2.)
class identity(Power):
"""
The identity transform
Notes
-----
g(`p`) = `p`
Alias of statsmodels.family.links.Power(power=1.)
"""
def __init__(self):
super(identity, self).__init__(power=1.)
class Log(Link):
"""
The log transform
Notes
-----
call and derivative call a private method _clean to trim the data by
machine epsilon so that p is in (0,1). log is an alias of Log.
"""
def _clean(self, x):
return np.clip(x, FLOAT_EPS, np.inf)
def __call__(self, p, **extra):
"""
Log transform link function
Parameters
----------
x : array-like
Mean parameters
Returns
-------
z : array
log(x)
Notes
-----
g(p) = log(p)
"""
x = self._clean(p)
return np.log(x)
def inverse(self, z):
"""
Inverse of log transform link function
Parameters
----------
z : array
The inverse of the link function at `p`
Returns
-------
p : array
The mean probabilities given the value of the inverse `z`
Notes
-----
g^{-1}(z) = exp(z)
"""
return np.exp(z)
def deriv(self, p):
"""
Derivative of log transform link function
Parameters
----------
p : array-like
Mean parameters
Returns
-------
g'(p) : array
derivative of log transform of x
Notes
-----
g'(x) = 1/x
"""
p = self._clean(p)
return 1. / p
def deriv2(self, p):
"""
Second derivative of the log transform link function
Parameters
----------
p : array-like
Mean parameters
Returns
-------
g''(p) : array
Second derivative of log transform of x
Notes
-----
g''(x) = -1/x^2
"""
p = self._clean(p)
return -1. / p**2
def inverse_deriv(self, z):
"""
Derivative of the inverse of the log transform link function
Parameters
----------
z : array
The inverse of the link function at `p`
Returns
-------
g^(-1)'(z) : array
The value of the derivative of the inverse of the log function,
the exponential function
"""
return np.exp(z)
class log(Log):
"""
The log transform
Notes
-----
log is a an alias of Log.
"""
pass
# TODO: the CDFLink is untested
class CDFLink(Logit):
"""
The use the CDF of a scipy.stats distribution
CDFLink is a subclass of logit in order to use its _clean method
for the link and its derivative.
Parameters
----------
dbn : scipy.stats distribution
Default is dbn=scipy.stats.norm
Notes
-----
The CDF link is untested.
"""
def __init__(self, dbn=scipy.stats.norm):
self.dbn = dbn
def __call__(self, p):
"""
CDF link function
Parameters
----------
p : array-like
Mean parameters
Returns
-------
z : array
(ppf) inverse of CDF transform of p
Notes
-----
g(`p`) = `dbn`.ppf(`p`)
"""
p = self._clean(p)
return self.dbn.ppf(p)
def inverse(self, z):
"""
The inverse of the CDF link
Parameters
----------
z : array-like
The value of the inverse of the link function at `p`
Returns
-------
p : array
Mean probabilities. The value of the inverse of CDF link of `z`
Notes
-----
g^(-1)(`z`) = `dbn`.cdf(`z`)
"""
return self.dbn.cdf(z)
def deriv(self, p):
"""
Derivative of CDF link
Parameters
----------
p : array-like
mean parameters
Returns
-------
g'(p) : array
The derivative of CDF transform at `p`
Notes
-----
g'(`p`) = 1./ `dbn`.pdf(`dbn`.ppf(`p`))
"""
p = self._clean(p)
return 1. / self.dbn.pdf(self.dbn.ppf(p))
def deriv2(self, p):
"""
Second derivative of the link function g''(p)
implemented through numerical differentiation
"""
from statsmodels.tools.numdiff import approx_fprime
p = np.atleast_1d(p)
# Note: special function for norm.ppf does not support complex
return np.diag(approx_fprime(p, self.deriv, centered=True))
def inverse_deriv(self, z):
"""
Derivative of the inverse of the CDF transformation link function
Parameters
----------
z : array
The inverse of the link function at `p`
Returns
-------
g^(-1)'(z) : array
The value of the derivative of the inverse of the logit function
"""
return 1/self.deriv(self.inverse(z))
class probit(CDFLink):
"""
The probit (standard normal CDF) transform
Notes
--------
g(p) = scipy.stats.norm.ppf(p)
probit is an alias of CDFLink.
"""
pass
class cauchy(CDFLink):
"""
The Cauchy (standard Cauchy CDF) transform
Notes
-----
g(p) = scipy.stats.cauchy.ppf(p)
cauchy is an alias of CDFLink with dbn=scipy.stats.cauchy
"""
def __init__(self):
super(cauchy, self).__init__(dbn=scipy.stats.cauchy)
def deriv2(self, p):
"""
Second derivative of the Cauchy link function.
Parameters
----------
p: array-like
Probabilities
Returns
-------
g''(p) : array
Value of the second derivative of Cauchy link function at `p`
"""
a = np.pi * (p - 0.5)
d2 = 2 * np.pi**2 * np.sin(a) / np.cos(a)**3
return d2
class CLogLog(Logit):
"""
The complementary log-log transform
CLogLog inherits from Logit in order to have access to its _clean method
for the link and its derivative.
Notes
-----
CLogLog is untested.
"""
def __call__(self, p):
"""
C-Log-Log transform link function
Parameters
----------
p : array
Mean parameters
Returns
-------
z : array
The CLogLog transform of `p`
Notes
-----
g(p) = log(-log(1-p))
"""
p = self._clean(p)
return np.log(-np.log(1 - p))
def inverse(self, z):
"""
Inverse of C-Log-Log transform link function
Parameters
----------
z : array-like
The value of the inverse of the CLogLog link function at `p`
Returns
-------
p : array
Mean parameters
Notes
-----
g^(-1)(`z`) = 1-exp(-exp(`z`))
"""
return 1 - np.exp(-np.exp(z))
def deriv(self, p):
"""
Derivative of C-Log-Log transform link function
Parameters
----------
p : array-like
Mean parameters
Returns
-------
g'(p) : array
The derivative of the CLogLog transform link function
Notes
-----
g'(p) = - 1 / ((p-1)*log(1-p))
"""
p = self._clean(p)
return 1. / ((p - 1) * (np.log(1 - p)))
def deriv2(self, p):
"""
Second derivative of the C-Log-Log ink function
Parameters
----------
p : array-like
Mean parameters
Returns
-------
g''(p) : array
The second derivative of the CLogLog link function
"""
p = self._clean(p)
fl = np.log(1 - p)
d2 = -1 / ((1 - p)**2 * fl)
d2 *= 1 + 1 / fl
return d2
def inverse_deriv(self, z):
"""
Derivative of the inverse of the C-Log-Log transform link function
Parameters
----------
z : array-like
The value of the inverse of the CLogLog link function at `p`
Returns
-------
g^(-1)'(z) : array
The derivative of the inverse of the CLogLog link function
"""
return np.exp(z - np.exp(z))
class cloglog(CLogLog):
"""
The CLogLog transform link function.
Notes
-----
g(`p`) = log(-log(1-`p`))
cloglog is an alias for CLogLog
cloglog = CLogLog()
"""
pass
class NegativeBinomial(object):
'''
The negative binomial link function
Parameters
----------
alpha : float, optional
Alpha is the ancillary parameter of the Negative Binomial link
function. It is assumed to be nonstochastic. The default value is 1.
Permissible values are usually assumed to be in (.01, 2).
'''
def __init__(self, alpha=1.):
self.alpha = alpha
def _clean(self, x):
return np.clip(x, FLOAT_EPS, np.inf)
def __call__(self, p):
'''
Negative Binomial transform link function
Parameters
----------
p : array-like
Mean parameters
Returns
-------
z : array
The negative binomial transform of `p`
Notes
-----
g(p) = log(p/(p + 1/alpha))
'''
p = self._clean(p)
return np.log(p/(p + 1/self.alpha))
def inverse(self, z):
'''
Inverse of the negative binomial transform
Parameters
-----------
z : array-like
The value of the inverse of the negative binomial link at `p`.
Returns
-------
p : array
Mean parameters
Notes
-----
g^(-1)(z) = exp(z)/(alpha*(1-exp(z)))
'''
return -1/(self.alpha * (1 - np.exp(-z)))
def deriv(self, p):
'''
Derivative of the negative binomial transform
Parameters
----------
p : array-like
Mean parameters
Returns
-------
g'(p) : array
The derivative of the negative binomial transform link function
Notes
-----
g'(x) = 1/(x+alpha*x^2)
'''
return 1/(p + self.alpha * p**2)
def deriv2(self,p):
'''
Second derivative of the negative binomial link function.
Parameters
----------
p : array-like
Mean parameters
Returns
-------
g''(p) : array
The second derivative of the negative binomial transform link
function
Notes
-----
g''(x) = -(1+2*alpha*x)/(x+alpha*x^2)^2
'''
numer = -(1 + 2 * self.alpha * p)
denom = (p + self.alpha * p**2)**2
return numer / denom
def inverse_deriv(self, z):
'''
Derivative of the inverse of the negative binomial transform
Parameters
-----------
z : array-like
Usually the linear predictor for a GLM or GEE model
Returns
-------
g^(-1)'(z) : array
The value of the derivative of the inverse of the negative
binomial link
'''
t = np.exp(z)
return t / (self.alpha * (1-t)**2)
class nbinom(NegativeBinomial):
"""
The negative binomial link function.
Notes
-----
g(p) = log(p/(p + 1/alpha))
nbinom is an alias of NegativeBinomial.
nbinom = NegativeBinomial(alpha=1.)
"""
pass
| bsd-3-clause |
mottosso/mindbender-setup | bin/windows/python36/Lib/os.py | 22 | 37442 | r"""OS routines for NT or Posix depending on what system we're on.
This exports:
- all functions from posix or nt, e.g. unlink, stat, etc.
- os.path is either posixpath or ntpath
- os.name is either 'posix' or 'nt'
- os.curdir is a string representing the current directory (always '.')
- os.pardir is a string representing the parent directory (always '..')
- os.sep is the (or a most common) pathname separator ('/' or '\\')
- os.extsep is the extension separator (always '.')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import abc
import sys, errno
import stat as st
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR",
"SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen",
"popen", "extsep"]
def _exists(name):
return name in globals()
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
# Any new dependencies of the os module and/or changes in path separator
# requires updating importlib as well.
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
__all__.append('_exit')
except ImportError:
pass
import posixpath as path
try:
from posix import _have_functions
except ImportError:
pass
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
__all__.append('_exit')
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
try:
from nt import _have_functions
except ImportError:
pass
else:
raise ImportError('no os specific module found')
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
if _exists("_have_functions"):
_globals = globals()
def _add(str, fn):
if (fn in _globals) and (str in _have_functions):
_set.add(_globals[fn])
_set = set()
_add("HAVE_FACCESSAT", "access")
_add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_FUTIMESAT", "utime")
_add("HAVE_LINKAT", "link")
_add("HAVE_MKDIRAT", "mkdir")
_add("HAVE_MKFIFOAT", "mkfifo")
_add("HAVE_MKNODAT", "mknod")
_add("HAVE_OPENAT", "open")
_add("HAVE_READLINKAT", "readlink")
_add("HAVE_RENAMEAT", "rename")
_add("HAVE_SYMLINKAT", "symlink")
_add("HAVE_UNLINKAT", "unlink")
_add("HAVE_UNLINKAT", "rmdir")
_add("HAVE_UTIMENSAT", "utime")
supports_dir_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
supports_effective_ids = _set
_set = set()
_add("HAVE_FCHDIR", "chdir")
_add("HAVE_FCHMOD", "chmod")
_add("HAVE_FCHOWN", "chown")
_add("HAVE_FDOPENDIR", "listdir")
_add("HAVE_FEXECVE", "execve")
_set.add(stat) # fstat always works
_add("HAVE_FTRUNCATE", "truncate")
_add("HAVE_FUTIMENS", "utime")
_add("HAVE_FUTIMES", "utime")
_add("HAVE_FPATHCONF", "pathconf")
if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3
_add("HAVE_FSTATVFS", "statvfs")
supports_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
# Some platforms don't support lchmod(). Often the function exists
# anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP.
# (No, I don't know why that's a good design.) ./configure will detect
# this and reject it--so HAVE_LCHMOD still won't be defined on such
# platforms. This is Very Helpful.
#
# However, sometimes platforms without a working lchmod() *do* have
# fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15,
# OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes
# it behave like lchmod(). So in theory it would be a suitable
# replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s
# flag doesn't work *either*. Sadly ./configure isn't sophisticated
# enough to detect this condition--it only determines whether or not
# fchmodat() minimally works.
#
# Therefore we simply ignore fchmodat() when deciding whether or not
# os.chmod supports follow_symlinks. Just checking lchmod() is
# sufficient. After all--if you have a working fchmodat(), your
# lchmod() almost certainly works too.
#
# _add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_LCHFLAGS", "chflags")
_add("HAVE_LCHMOD", "chmod")
if _exists("lchown"): # mac os x10.3
_add("HAVE_LCHOWN", "chown")
_add("HAVE_LINKAT", "link")
_add("HAVE_LUTIMES", "utime")
_add("HAVE_LSTAT", "stat")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_UTIMENSAT", "utime")
_add("MS_WINDOWS", "stat")
supports_follow_symlinks = _set
del _set
del _have_functions
del _globals
del _add
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
# Other possible SEEK values are directly imported from posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works like
mkdir, except that any intermediate path segment (not just the rightmost)
will be created if it does not exist. If the target directory already
exists, raise an OSError if exist_ok is False. Otherwise no exception is
raised. This is recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode, exist_ok)
except FileExistsError:
# Defeats race condition when another thread created the path
pass
cdir = curdir
if isinstance(tail, bytes):
cdir = bytes(curdir, 'ASCII')
if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists
return
try:
mkdir(name, mode)
except OSError:
# Cannot rely on checking for EEXIST, since the operating system
# could give priority to other errors like EACCES or EROFS
if not exist_ok or not path.isdir(name):
raise
def removedirs(name):
"""removedirs(name)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except OSError:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except OSError:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune the
search, or to impose a specific order of visiting. Modifying dirnames when
topdown is false is ineffective, since the directories in dirnames have
already been generated by the time dirnames itself is generated. No matter
the value of topdown, the list of subdirectories is retrieved before the
tuples for the directory and its subdirectories are generated.
By default errors from the os.scandir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an OSError instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([getsize(join(root, name)) for name in files]), end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
top = fspath(top)
dirs = []
nondirs = []
walk_dirs = []
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that scandir is global in this module due
# to earlier import-*.
scandir_it = scandir(top)
except OSError as error:
if onerror is not None:
onerror(error)
return
with scandir_it:
while True:
try:
try:
entry = next(scandir_it)
except StopIteration:
break
except OSError as error:
if onerror is not None:
onerror(error)
return
try:
is_dir = entry.is_dir()
except OSError:
# If is_dir() raises an OSError, consider that the entry is not
# a directory, same behaviour than os.path.isdir().
is_dir = False
if is_dir:
dirs.append(entry.name)
else:
nondirs.append(entry.name)
if not topdown and is_dir:
# Bottom-up: recurse into sub-directory, but exclude symlinks to
# directories if followlinks is False
if followlinks:
walk_into = True
else:
try:
is_symlink = entry.is_symlink()
except OSError:
# If is_symlink() raises an OSError, consider that the
# entry is not a symbolic link, same behaviour than
# os.path.islink().
is_symlink = False
walk_into = not is_symlink
if walk_into:
walk_dirs.append(entry.path)
# Yield before recursion if going top down
if topdown:
yield top, dirs, nondirs
# Recurse into sub-directories
islink, join = path.islink, path.join
for dirname in dirs:
new_path = join(top, dirname)
# Issue #23605: os.path.islink() is used instead of caching
# entry.is_symlink() result during the loop on os.scandir() because
# the caller can replace the directory entry during the "yield"
# above.
if followlinks or not islink(new_path):
yield from walk(new_path, topdown, onerror, followlinks)
else:
# Recurse into sub-directories
for new_path in walk_dirs:
yield from walk(new_path, topdown, onerror, followlinks)
# Yield after recursion if going bottom up
yield top, dirs, nondirs
__all__.append("walk")
if {open, stat} <= supports_dir_fd and {listdir, stat} <= supports_fd:
def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None):
"""Directory tree generator.
This behaves exactly like walk(), except that it yields a 4-tuple
dirpath, dirnames, filenames, dirfd
`dirpath`, `dirnames` and `filenames` are identical to walk() output,
and `dirfd` is a file descriptor referring to the directory `dirpath`.
The advantage of fwalk() over walk() is that it's safe against symlink
races (when follow_symlinks is False).
If dir_fd is not None, it should be a file descriptor open to a directory,
and top should be relative; top will then be relative to that directory.
(dir_fd is always supported for fwalk.)
Caution:
Since fwalk() yields file descriptors, those are only valid until the
next iteration step, so you should dup() them if you want to keep them
for a longer period.
Example:
import os
for root, dirs, files, rootfd in os.fwalk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([os.stat(name, dir_fd=rootfd).st_size for name in files]),
end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
if not isinstance(top, int) or not hasattr(top, '__index__'):
top = fspath(top)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd)
topfd = open(top, O_RDONLY, dir_fd=dir_fd)
try:
if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and
path.samestat(orig_st, stat(topfd)))):
yield from _fwalk(topfd, top, topdown, onerror, follow_symlinks)
finally:
close(topfd)
def _fwalk(topfd, toppath, topdown, onerror, follow_symlinks):
# Note: This uses O(depth of the directory tree) file descriptors: if
# necessary, it can be adapted to only require O(1) FDs, see issue
# #13734.
names = listdir(topfd)
dirs, nondirs = [], []
for name in names:
try:
# Here, we don't use AT_SYMLINK_NOFOLLOW to be consistent with
# walk() which reports symlinks to directories as directories.
# We do however check for symlinks before recursing into
# a subdirectory.
if st.S_ISDIR(stat(name, dir_fd=topfd).st_mode):
dirs.append(name)
else:
nondirs.append(name)
except OSError:
try:
# Add dangling symlinks, ignore disappeared files
if st.S_ISLNK(stat(name, dir_fd=topfd, follow_symlinks=False)
.st_mode):
nondirs.append(name)
except OSError:
continue
if topdown:
yield toppath, dirs, nondirs, topfd
for name in dirs:
try:
orig_st = stat(name, dir_fd=topfd, follow_symlinks=follow_symlinks)
dirfd = open(name, O_RDONLY, dir_fd=topfd)
except OSError as err:
if onerror is not None:
onerror(err)
continue
try:
if follow_symlinks or path.samestat(orig_st, stat(dirfd)):
dirpath = path.join(toppath, name)
yield from _fwalk(dirfd, dirpath, topdown, onerror, follow_symlinks)
finally:
close(dirfd)
if not topdown:
yield toppath, dirs, nondirs, topfd
__all__.append("fwalk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
exec_func = execve
argrest = (args, env)
else:
exec_func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
exec_func(file, *argrest)
return
last_exc = saved_exc = None
saved_tb = None
path_list = get_exec_path(env)
if name != 'nt':
file = fsencode(file)
path_list = map(fsencode, path_list)
for dir in path_list:
fullname = path.join(dir, file)
try:
exec_func(fullname, *argrest)
except OSError as e:
last_exc = e
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise saved_exc.with_traceback(saved_tb)
raise last_exc.with_traceback(tb)
def get_exec_path(env=None):
"""Returns the sequence of directories that will be searched for the
named executable (similar to a shell) when launching a process.
*env* must be an environment variable dict or None. If *env* is None,
os.environ will be used.
"""
# Use a local import instead of a global import to limit the number of
# modules loaded at startup: the os module is always loaded at startup by
# Python. It may also avoid a bootstrap issue.
import warnings
if env is None:
env = environ
# {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a
# BytesWarning when using python -b or python -bb: ignore the warning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BytesWarning)
try:
path_list = env.get('PATH')
except TypeError:
path_list = None
if supports_bytes_environ:
try:
path_listb = env[b'PATH']
except (KeyError, TypeError):
pass
else:
if path_list is not None:
raise ValueError(
"env cannot contain 'PATH' and b'PATH' keys")
path_list = path_listb
if path_list is not None and isinstance(path_list, bytes):
path_list = fsdecode(path_list)
if path_list is None:
path_list = defpath
return path_list.split(pathsep)
# Change environ to automatically call putenv(), unsetenv if they exist.
from _collections_abc import MutableMapping
class _Environ(MutableMapping):
def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv):
self.encodekey = encodekey
self.decodekey = decodekey
self.encodevalue = encodevalue
self.decodevalue = decodevalue
self.putenv = putenv
self.unsetenv = unsetenv
self._data = data
def __getitem__(self, key):
try:
value = self._data[self.encodekey(key)]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
return self.decodevalue(value)
def __setitem__(self, key, value):
key = self.encodekey(key)
value = self.encodevalue(value)
self.putenv(key, value)
self._data[key] = value
def __delitem__(self, key):
encodedkey = self.encodekey(key)
self.unsetenv(encodedkey)
try:
del self._data[encodedkey]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
def __iter__(self):
for key in self._data:
yield self.decodekey(key)
def __len__(self):
return len(self._data)
def __repr__(self):
return 'environ({{{}}})'.format(', '.join(
('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value))
for key, value in self._data.items())))
def copy(self):
return dict(self)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
try:
_putenv = putenv
except NameError:
_putenv = lambda key, value: None
else:
if "putenv" not in __all__:
__all__.append("putenv")
try:
_unsetenv = unsetenv
except NameError:
_unsetenv = lambda key: _putenv(key, "")
else:
if "unsetenv" not in __all__:
__all__.append("unsetenv")
def _createenviron():
if name == 'nt':
# Where Env Var Names Must Be UPPERCASE
def check_str(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value
encode = check_str
decode = str
def encodekey(key):
return encode(key).upper()
data = {}
for key, value in environ.items():
data[encodekey(key)] = value
else:
# Where Env Var Names Can Be Mixed Case
encoding = sys.getfilesystemencoding()
def encode(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value.encode(encoding, 'surrogateescape')
def decode(value):
return value.decode(encoding, 'surrogateescape')
encodekey = encode
data = environ
return _Environ(data,
encodekey, decode,
encode, decode,
_putenv, _unsetenv)
# unicode environ
environ = _createenviron()
del _createenviron
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str."""
return environ.get(key, default)
supports_bytes_environ = (name != 'nt')
__all__.extend(("getenv", "supports_bytes_environ"))
if supports_bytes_environ:
def _check_bytes(value):
if not isinstance(value, bytes):
raise TypeError("bytes expected, not %s" % type(value).__name__)
return value
# bytes environ
environb = _Environ(environ._data,
_check_bytes, bytes,
_check_bytes, bytes,
_putenv, _unsetenv)
del _check_bytes
def getenvb(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are bytes."""
return environb.get(key, default)
__all__.extend(("environb", "getenvb"))
def _fscodec():
encoding = sys.getfilesystemencoding()
errors = sys.getfilesystemencodeerrors()
def fsencode(filename):
"""Encode filename (an os.PathLike, bytes, or str) to the filesystem
encoding with 'surrogateescape' error handler, return bytes unchanged.
On Windows, use 'strict' error handler if the file system encoding is
'mbcs' (which is the default encoding).
"""
filename = fspath(filename) # Does type-checking of `filename`.
if isinstance(filename, str):
return filename.encode(encoding, errors)
else:
return filename
def fsdecode(filename):
"""Decode filename (an os.PathLike, bytes, or str) from the filesystem
encoding with 'surrogateescape' error handler, return str unchanged. On
Windows, use 'strict' error handler if the file system encoding is
'mbcs' (which is the default encoding).
"""
filename = fspath(filename) # Does type-checking of `filename`.
if isinstance(filename, bytes):
return filename.decode(encoding, errors)
else:
return filename
return fsencode, fsdecode
fsencode, fsdecode = _fscodec()
del _fscodec
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
__all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"])
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
if not isinstance(args, (tuple, list)):
raise TypeError('argv must be a tuple or a list')
if not args or not args[0]:
raise ValueError('argv first element cannot be empty')
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise OSError("Not stopped, signaled or exited???")
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
__all__.extend(["spawnv", "spawnve", "spawnvp", "spawnvpe"])
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnl", "spawnle"])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnlp", "spawnlpe"])
# Supply os.popen()
def popen(cmd, mode="r", buffering=-1):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
if mode not in ("r", "w"):
raise ValueError("invalid mode %r" % mode)
if buffering == 0 or buffering is None:
raise ValueError("popen() does not support unbuffered streams")
import subprocess, io
if mode == "r":
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdout), proc)
else:
proc = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdin), proc)
# Helper for popen() -- a proxy for a file whose close waits for the process
class _wrap_close:
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
returncode = self._proc.wait()
if returncode == 0:
return None
if name == 'nt':
return returncode
else:
return returncode << 8 # Shift left to match old behavior
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
# Supply os.fdopen()
def fdopen(fd, *args, **kwargs):
if not isinstance(fd, int):
raise TypeError("invalid fd type (%s, expected integer)" % type(fd))
import io
return io.open(fd, *args, **kwargs)
# For testing purposes, make sure the function is available when the C
# implementation exists.
def _fspath(path):
"""Return the path representation of a path-like object.
If str or bytes is passed in, it is returned unchanged. Otherwise the
os.PathLike interface is used to get the path representation. If the
path representation is not str or bytes, TypeError is raised. If the
provided path is not str, bytes, or os.PathLike, TypeError is raised.
"""
if isinstance(path, (str, bytes)):
return path
# Work from the object's type to match method resolution of other magic
# methods.
path_type = type(path)
try:
path_repr = path_type.__fspath__(path)
except AttributeError:
if hasattr(path_type, '__fspath__'):
raise
else:
raise TypeError("expected str, bytes or os.PathLike object, "
"not " + path_type.__name__)
if isinstance(path_repr, (str, bytes)):
return path_repr
else:
raise TypeError("expected {}.__fspath__() to return str or bytes, "
"not {}".format(path_type.__name__,
type(path_repr).__name__))
# If there is no C implementation, make the pure Python version the
# implementation as transparently as possible.
if not _exists('fspath'):
fspath = _fspath
fspath.__name__ = "fspath"
class PathLike(abc.ABC):
"""Abstract base class for implementing the file system path protocol."""
@abc.abstractmethod
def __fspath__(self):
"""Return the file system path representation of the object."""
raise NotImplementedError
@classmethod
def __subclasshook__(cls, subclass):
return hasattr(subclass, '__fspath__')
| mit |
chrischabot/opensocial-python-client | tests/opensocial_tests/oauth_test.py | 4 | 1242 | #!/usr/bin/python
#
# Copyright (C) 2007, 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '[email protected] (David Byttow)'
import logging
import unittest
import urllib2
import opensocial
from opensocial import oauth
class TestOAuth(unittest.TestCase):
def setUp(self):
self.config = opensocial.ContainerConfig(
oauth_consumer_key='oauth.org:12345689',
oauth_consumer_secret='not_a_secret',
server_rpc_base='http://oauthbox.appspot.com/rpc')
self.container = opensocial.ContainerContext(self.config)
self.user_id = '101'
def test_fetch(self):
data = self.container.fetch_person(self.user_id)
self.assertEquals(data.get_field('verified'), 'True')
| apache-2.0 |
spreg-git/pysal | pysal/spreg/error_sp_hom_regimes.py | 4 | 70548 | '''
Hom family of models with regimes.
'''
__author__ = "Luc Anselin [email protected], Pedro V. Amaral [email protected], Daniel Arribas-Bel [email protected]"
from scipy import sparse as SP
import numpy as np
import multiprocessing as mp
from numpy import linalg as la
from pysal import lag_spatial
from utils import power_expansion, set_endog, iter_msg, sp_att
from utils import get_A1_hom, get_A2_hom, get_A1_het, optim_moments
from utils import get_spFilter, get_lags, _moments2eqs
from utils import spdot, RegressionPropsY, set_warn
from ols import BaseOLS
from twosls import BaseTSLS
from error_sp_hom import BaseGM_Error_Hom, BaseGM_Endog_Error_Hom, moments_hom, get_vc_hom, get_omega_hom, get_omega_hom_ols
import regimes as REGI
import user_output as USER
import summary_output as SUMMARY
from platform import system
class GM_Error_Hom_Regimes(RegressionPropsY, REGI.Regimes_Frame):
'''
GMM method for a spatial error model with homoskedasticity, with regimes,
results and diagnostics; based on Drukker et al. (2010) [1]_, following
Anselin (2011) [2]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
w : pysal W object
Spatial weights object
constant_regi: ['one', 'many']
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime (default)
cols2regi : list, 'all'
Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all' (default), all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
regime_lag_sep : boolean
Always False, kept for consistency, ignored.
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc', then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
vm : boolean
If True, include variance-covariance matrix in summary
results
cores : boolean
Specifies if multiprocessing is to be used
Default: no multiprocessing, cores = False
Note: Multiprocessing may not work on all platforms.
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regime variable for use in the output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
std_err : array
1xk array of standard errors of the betas
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
xtx : float
X'X
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regime variable for use in the output
title : string
Name of the regression method used
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
constant_regi: ['one', 'many']
Ignored if regimes=False. Constant option for regimes.
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime
cols2regi : list, 'all'
Ignored if regimes=False. Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all', all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
kr : int
Number of variables/columns to be "regimized" or subject
to change by regime. These will result in one parameter
estimate by regime for each variable (i.e. nr parameters per
variable)
kf : int
Number of variables/columns to be considered fixed or
global across regimes and hence only obtain one parameter
estimate
nr : int
Number of different regimes in the 'regimes' list
multi : dictionary
Only available when multiple regressions are estimated,
i.e. when regime_err_sep=True and no variable is fixed
across regimes.
Contains all attributes of each individual regression
References
----------
.. [1] Drukker, D. M., Egger, P., Prucha, I. R. (2010)
"On Two-step Estimation of a Spatial Autoregressive Model with Autoregressive
Disturbances and Endogenous Regressors". Working paper.
.. [2] Anselin, L. (2011) "GMM Estimation of Spatial Error Autocorrelation
with and without Heteroskedasticity".
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on NCOVR US County Homicides (3085 areas) using pysal.open().
This is the DBF associated with the NAT shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path("NAT.dbf"),'r')
Extract the HR90 column (homicide rates in 1990) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y_var = 'HR90'
>>> y = np.array([db.by_col(y_var)]).reshape(3085,1)
Extract UE90 (unemployment rate) and PS90 (population structure) vectors from
the DBF to be used as independent variables in the regression. Other variables
can be inserted by adding their names to x_var, such as x_var = ['Var1','Var2','...]
Note that PySAL requires this to be an nxj numpy array, where j is the
number of independent variables (not including a constant). By default
this model adds a vector of ones to the independent variables passed in.
>>> x_var = ['PS90','UE90']
>>> x = np.array([db.by_col(name) for name in x_var]).T
The different regimes in this data are given according to the North and
South dummy (SOUTH).
>>> r_var = 'SOUTH'
>>> regimes = db.by_col(r_var)
Since we want to run a spatial lag model, we need to specify
the spatial weights matrix that includes the spatial configuration of the
observations. To do that, we can open an already existing gal file or
create a new one. In this case, we will create one from ``NAT.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("NAT.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, this allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
We are all set with the preliminaries, we are good to run the model. In this
case, we will need the variables and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> reg = GM_Error_Hom_Regimes(y, x, regimes, w=w, name_y=y_var, name_x=x_var, name_ds='NAT')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. This class offers an error model that assumes
homoskedasticity but that unlike the models from
``pysal.spreg.error_sp``, it allows for inference on the spatial
parameter. This is why you obtain as many coefficient estimates as
standard errors, which you calculate taking the square root of the
diagonal of the variance-covariance matrix of the parameters. Alternatively,
we can have a summary of the output by typing: model.summary
>>> print reg.name_x
['0_CONSTANT', '0_PS90', '0_UE90', '1_CONSTANT', '1_PS90', '1_UE90', 'lambda']
>>> print np.around(reg.betas,4)
[[ 0.069 ]
[ 0.7885]
[ 0.5398]
[ 5.0948]
[ 1.1965]
[ 0.6018]
[ 0.4104]]
>>> print np.sqrt(reg.vm.diagonal())
[ 0.39105854 0.15664624 0.05254328 0.48379958 0.20018799 0.05834139
0.01882401]
'''
def __init__(self, y, x, regimes, w,
max_iter=1, epsilon=0.00001, A1='het', cores=False,
constant_regi='many', cols2regi='all', regime_err_sep=False,
regime_lag_sep=False,
vm=False, name_y=None, name_x=None,
name_w=None, name_ds=None, name_regimes=None):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
self.constant_regi = constant_regi
self.cols2regi = cols2regi
self.regime_err_sep = regime_err_sep
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_w = USER.set_name_w(name_w, w)
self.name_regimes = USER.set_name_ds(name_regimes)
self.n = n
self.y = y
x_constant = USER.check_constant(x)
name_x = USER.set_name_x(name_x, x)
self.name_x_r = name_x
cols2regi = REGI.check_cols2regi(constant_regi, cols2regi, x)
self.regimes_set = REGI._get_regimes_set(regimes)
self.regimes = regimes
USER.check_regimes(self.regimes_set, self.n, x.shape[1])
self.regime_err_sep = regime_err_sep
if regime_err_sep == True:
if set(cols2regi) == set([True]):
self._error_regimes_multi(y, x, regimes, w, cores,
max_iter, epsilon, A1, cols2regi, vm, name_x)
else:
raise Exception, "All coefficients must vary accross regimes if regime_err_sep = True."
else:
if A1 == 'hom':
wA1 = get_A1_hom(w.sparse)
elif A1 == 'hom_sc':
wA1 = get_A1_hom(w.sparse, scalarKP=True)
elif A1 == 'het':
wA1 = get_A1_het(w.sparse)
wA2 = get_A2_hom(w.sparse)
# 1a. OLS --> \tilde{\delta}
self.x, self.name_x = REGI.Regimes_Frame.__init__(self, x_constant,
regimes, constant_regi=None, cols2regi=cols2regi, names=name_x)
ols = BaseOLS(y=y, x=self.x)
self.k = ols.x.shape[1]
# 1b. GM --> \tilde{\rho}
moments = moments_hom(w.sparse, wA1, wA2, ols.u)
lambda1 = optim_moments(moments)
lambda_old = lambda1
self.iteration, eps = 0, 1
while self.iteration < max_iter and eps > epsilon:
# 2a. SWLS --> \hat{\delta}
xs = get_spFilter(w, lambda1, x_constant)
ys = get_spFilter(w, lambda1, y)
xs = REGI.Regimes_Frame.__init__(self, xs,
regimes, constant_regi=None, cols2regi=cols2regi)[0]
ols_s = BaseOLS(y=ys, x=xs)
self.predy = spdot(self.x, ols_s.betas)
self.u = self.y - self.predy
# 2b. GM 2nd iteration --> \hat{\rho}
moments = moments_hom(w.sparse, wA1, wA2, self.u)
psi = get_vc_hom(w.sparse, wA1, wA2, self, lambda_old)[0]
lambda2 = optim_moments(moments, psi)
eps = abs(lambda2 - lambda_old)
lambda_old = lambda2
self.iteration += 1
self.iter_stop = iter_msg(self.iteration, max_iter)
# Output
self.betas = np.vstack((ols_s.betas, lambda2))
self.vm, self.sig2 = get_omega_hom_ols(
w.sparse, wA1, wA2, self, lambda2, moments[0])
self.e_filtered = self.u - lambda2 * lag_spatial(w, self.u)
self.title = "SPATIALLY WEIGHTED LEAST SQUARES (HOM) - REGIMES"
self.name_x.append('lambda')
self.kf += 1
self.chow = REGI.Chow(self)
self._cache = {}
SUMMARY.GM_Error_Hom(reg=self, w=w, vm=vm, regimes=True)
def _error_regimes_multi(self, y, x, regimes, w, cores,
max_iter, epsilon, A1, cols2regi, vm, name_x):
regi_ids = dict(
(r, list(np.where(np.array(regimes) == r)[0])) for r in self.regimes_set)
results_p = {}
"""
for r in self.regimes_set:
if system() == 'Windows':
is_win = True
results_p[r] = _work_error(*(y,x,regi_ids,r,w,max_iter,epsilon,A1,self.name_ds,self.name_y,name_x+['lambda'],self.name_w,self.name_regimes))
else:
pool = mp.Pool(cores)
results_p[r] = pool.apply_async(_work_error,args=(y,x,regi_ids,r,w,max_iter,epsilon,A1,self.name_ds,self.name_y,name_x+['lambda'],self.name_w,self.name_regimes, ))
is_win = False
"""
for r in self.regimes_set:
if cores:
pool = mp.Pool(None)
results_p[r] = pool.apply_async(_work_error, args=(
y, x, regi_ids, r, w, max_iter, epsilon, A1, self.name_ds, self.name_y, name_x + ['lambda'], self.name_w, self.name_regimes, ))
else:
results_p[r] = _work_error(
*(y, x, regi_ids, r, w, max_iter, epsilon, A1, self.name_ds, self.name_y, name_x + ['lambda'], self.name_w, self.name_regimes))
self.kryd = 0
self.kr = len(cols2regi) + 1
self.kf = 0
self.nr = len(self.regimes_set)
self.vm = np.zeros((self.nr * self.kr, self.nr * self.kr), float)
self.betas = np.zeros((self.nr * self.kr, 1), float)
self.u = np.zeros((self.n, 1), float)
self.predy = np.zeros((self.n, 1), float)
self.e_filtered = np.zeros((self.n, 1), float)
self.name_y, self.name_x = [], []
"""
if not is_win:
pool.close()
pool.join()
"""
if cores:
pool.close()
pool.join()
results = {}
counter = 0
for r in self.regimes_set:
"""
if is_win:
results[r] = results_p[r]
else:
results[r] = results_p[r].get()
"""
if not cores:
results[r] = results_p[r]
else:
results[r] = results_p[r].get()
self.vm[(counter * self.kr):((counter + 1) * self.kr),
(counter * self.kr):((counter + 1) * self.kr)] = results[r].vm
self.betas[
(counter * self.kr):((counter + 1) * self.kr), ] = results[r].betas
self.u[regi_ids[r], ] = results[r].u
self.predy[regi_ids[r], ] = results[r].predy
self.e_filtered[regi_ids[r], ] = results[r].e_filtered
self.name_y += results[r].name_y
self.name_x += results[r].name_x
counter += 1
self.chow = REGI.Chow(self)
self.multi = results
SUMMARY.GM_Error_Hom_multi(
reg=self, multireg=self.multi, vm=vm, regimes=True)
class GM_Endog_Error_Hom_Regimes(RegressionPropsY, REGI.Regimes_Frame):
'''
GMM method for a spatial error model with homoskedasticity, regimes and
endogenous variables.
Based on Drukker et al. (2010) [1]_, following Anselin (2011) [2]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
w : pysal W object
Spatial weights object
constant_regi: ['one', 'many']
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime (default)
cols2regi : list, 'all'
Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all' (default), all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
regime_lag_sep : boolean
Always False, kept for consistency, ignored.
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc', then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
cores : boolean
Specifies if multiprocessing is to be used
Default: no multiprocessing, cores = False
Note: Multiprocessing may not work on all platforms.
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regime variable for use in the output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
z : array
nxk array of variables (combination of x and yend)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
h : array
nxl array of instruments (combination of x and q)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
sig2 : float
Sigma squared used in computations
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
std_err : array
1xk array of standard errors of the betas
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
hth : float
H'H
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_z : list of strings
Names of exogenous and endogenous variables for use in
output
name_q : list of strings
Names of external instruments
name_h : list of strings
Names of all instruments used in ouput
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regimes variable for use in output
title : string
Name of the regression method used
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
constant_regi : ['one', 'many']
Ignored if regimes=False. Constant option for regimes.
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime
cols2regi : list, 'all'
Ignored if regimes=False. Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all', all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
kr : int
Number of variables/columns to be "regimized" or subject
to change by regime. These will result in one parameter
estimate by regime for each variable (i.e. nr parameters per
variable)
kf : int
Number of variables/columns to be considered fixed or
global across regimes and hence only obtain one parameter
estimate
nr : int
Number of different regimes in the 'regimes' list
multi : dictionary
Only available when multiple regressions are estimated,
i.e. when regime_err_sep=True and no variable is fixed
across regimes.
Contains all attributes of each individual regression
References
----------
.. [1] Drukker, D. M., Egger, P., Prucha, I. R. (2010)
"On Two-step Estimation of a Spatial Autoregressive Model with Autoregressive
Disturbances and Endogenous Regressors". Working paper.
.. [2] Anselin, L. (2011) "GMM Estimation of Spatial Error Autocorrelation
with and without Heteroskedasticity".
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on NCOVR US County Homicides (3085 areas) using pysal.open().
This is the DBF associated with the NAT shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path("NAT.dbf"),'r')
Extract the HR90 column (homicide rates in 1990) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y_var = 'HR90'
>>> y = np.array([db.by_col(y_var)]).reshape(3085,1)
Extract UE90 (unemployment rate) and PS90 (population structure) vectors from
the DBF to be used as independent variables in the regression. Other variables
can be inserted by adding their names to x_var, such as x_var = ['Var1','Var2','...]
Note that PySAL requires this to be an nxj numpy array, where j is the
number of independent variables (not including a constant). By default
this model adds a vector of ones to the independent variables passed in.
>>> x_var = ['PS90','UE90']
>>> x = np.array([db.by_col(name) for name in x_var]).T
For the endogenous models, we add the endogenous variable RD90 (resource deprivation)
and we decide to instrument for it with FP89 (families below poverty):
>>> yd_var = ['RD90']
>>> yend = np.array([db.by_col(name) for name in yd_var]).T
>>> q_var = ['FP89']
>>> q = np.array([db.by_col(name) for name in q_var]).T
The different regimes in this data are given according to the North and
South dummy (SOUTH).
>>> r_var = 'SOUTH'
>>> regimes = db.by_col(r_var)
Since we want to run a spatial error model, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will create one
from ``NAT.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("NAT.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, this allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
We are all set with the preliminaries, we are good to run the model. In this
case, we will need the variables (exogenous and endogenous), the
instruments and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> reg = GM_Endog_Error_Hom_Regimes(y, x, yend, q, regimes, w=w, A1='hom_sc', name_y=y_var, name_x=x_var, name_yend=yd_var, name_q=q_var, name_regimes=r_var, name_ds='NAT.dbf')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. This class offers an error model that assumes
homoskedasticity but that unlike the models from
``pysal.spreg.error_sp``, it allows for inference on the spatial
parameter. Hence, we find the same number of betas as of standard errors,
which we calculate taking the square root of the diagonal of the
variance-covariance matrix. Alternatively, we can have a summary of the
output by typing: model.summary
>>> print reg.name_z
['0_CONSTANT', '0_PS90', '0_UE90', '1_CONSTANT', '1_PS90', '1_UE90', '0_RD90', '1_RD90', 'lambda']
>>> print np.around(reg.betas,4)
[[ 3.5973]
[ 1.0652]
[ 0.1582]
[ 9.198 ]
[ 1.8809]
[-0.2489]
[ 2.4616]
[ 3.5796]
[ 0.2541]]
>>> print np.around(np.sqrt(reg.vm.diagonal()),4)
[ 0.5204 0.1371 0.0629 0.4721 0.1824 0.0725 0.2992 0.2395 0.024 ]
'''
def __init__(self, y, x, yend, q, regimes, w,
constant_regi='many', cols2regi='all', regime_err_sep=False,
regime_lag_sep=False,
max_iter=1, epsilon=0.00001, A1='het', cores=False,
vm=False, name_y=None, name_x=None,
name_yend=None, name_q=None, name_w=None,
name_ds=None, name_regimes=None, summ=True, add_lag=False):
n = USER.check_arrays(y, x, yend, q)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
self.constant_regi = constant_regi
self.cols2regi = cols2regi
self.name_ds = USER.set_name_ds(name_ds)
self.name_regimes = USER.set_name_ds(name_regimes)
self.name_w = USER.set_name_w(name_w, w)
self.n = n
self.y = y
name_x = USER.set_name_x(name_x, x)
if summ:
name_yend = USER.set_name_yend(name_yend, yend)
self.name_y = USER.set_name_y(name_y)
name_q = USER.set_name_q(name_q, q)
self.name_x_r = name_x + name_yend
cols2regi = REGI.check_cols2regi(
constant_regi, cols2regi, x, yend=yend)
self.regimes_set = REGI._get_regimes_set(regimes)
self.regimes = regimes
USER.check_regimes(self.regimes_set, self.n, x.shape[1])
self.regime_err_sep = regime_err_sep
if regime_err_sep == True:
if set(cols2regi) == set([True]):
self._endog_error_regimes_multi(y, x, regimes, w, yend, q, cores,
max_iter, epsilon, A1, cols2regi, vm,
name_x, name_yend, name_q, add_lag)
else:
raise Exception, "All coefficients must vary accross regimes if regime_err_sep = True."
else:
x_constant = USER.check_constant(x)
q, name_q = REGI.Regimes_Frame.__init__(self, q,
regimes, constant_regi=None, cols2regi='all', names=name_q)
x, name_x = REGI.Regimes_Frame.__init__(self, x_constant,
regimes, constant_regi=None, cols2regi=cols2regi,
names=name_x)
yend2, name_yend = REGI.Regimes_Frame.__init__(self, yend,
regimes, constant_regi=None,
cols2regi=cols2regi, yend=True, names=name_yend)
if A1 == 'hom':
wA1 = get_A1_hom(w.sparse)
elif A1 == 'hom_sc':
wA1 = get_A1_hom(w.sparse, scalarKP=True)
elif A1 == 'het':
wA1 = get_A1_het(w.sparse)
wA2 = get_A2_hom(w.sparse)
# 1a. S2SLS --> \tilde{\delta}
tsls = BaseTSLS(y=y, x=x, yend=yend2, q=q)
self.k = tsls.z.shape[1]
self.x = tsls.x
self.yend, self.z, self.h = tsls.yend, tsls.z, tsls.h
# 1b. GM --> \tilde{\rho}
moments = moments_hom(w.sparse, wA1, wA2, tsls.u)
lambda1 = optim_moments(moments)
lambda_old = lambda1
self.iteration, eps = 0, 1
while self.iteration < max_iter and eps > epsilon:
# 2a. GS2SLS --> \hat{\delta}
xs = get_spFilter(w, lambda1, x_constant)
xs = REGI.Regimes_Frame.__init__(self, xs,
regimes, constant_regi=None, cols2regi=cols2regi)[0]
ys = get_spFilter(w, lambda1, y)
yend_s = get_spFilter(w, lambda1, yend)
yend_s = REGI.Regimes_Frame.__init__(self, yend_s,
regimes, constant_regi=None, cols2regi=cols2regi,
yend=True)[0]
tsls_s = BaseTSLS(ys, xs, yend_s, h=tsls.h)
self.predy = spdot(self.z, tsls_s.betas)
self.u = self.y - self.predy
# 2b. GM 2nd iteration --> \hat{\rho}
moments = moments_hom(w.sparse, wA1, wA2, self.u)
psi = get_vc_hom(
w.sparse, wA1, wA2, self, lambda_old, tsls_s.z)[0]
lambda2 = optim_moments(moments, psi)
eps = abs(lambda2 - lambda_old)
lambda_old = lambda2
self.iteration += 1
self.iter_stop = iter_msg(self.iteration, max_iter)
# Output
self.betas = np.vstack((tsls_s.betas, lambda2))
self.vm, self.sig2 = get_omega_hom(
w.sparse, wA1, wA2, self, lambda2, moments[0])
self.e_filtered = self.u - lambda2 * lag_spatial(w, self.u)
self.name_x = USER.set_name_x(name_x, x, constant=True)
self.name_yend = USER.set_name_yend(name_yend, yend)
self.name_z = self.name_x + self.name_yend
self.name_z.append('lambda')
self.name_q = USER.set_name_q(name_q, q)
self.name_h = USER.set_name_h(self.name_x, self.name_q)
self.kf += 1
self.chow = REGI.Chow(self)
self._cache = {}
if summ:
self.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES (HOM) - REGIMES"
SUMMARY.GM_Endog_Error_Hom(reg=self, w=w, vm=vm, regimes=True)
def _endog_error_regimes_multi(self, y, x, regimes, w, yend, q, cores,
max_iter, epsilon, A1, cols2regi, vm,
name_x, name_yend, name_q, add_lag):
regi_ids = dict(
(r, list(np.where(np.array(regimes) == r)[0])) for r in self.regimes_set)
if add_lag != False:
self.cols2regi += [True]
cols2regi += [True]
self.predy_e = np.zeros((self.n, 1), float)
self.e_pred = np.zeros((self.n, 1), float)
results_p = {}
"""
for r in self.regimes_set:
if system() == 'Windows':
is_win = True
results_p[r] = _work_endog_error(*(y,x,yend,q,regi_ids,r,w,max_iter,epsilon,A1,self.name_ds,self.name_y,name_x,name_yend,name_q,self.name_w,self.name_regimes,add_lag))
else:
pool = mp.Pool(cores)
results_p[r] = pool.apply_async(_work_endog_error,args=(y,x,yend,q,regi_ids,r,w,max_iter,epsilon,A1,self.name_ds,self.name_y,name_x,name_yend,name_q,self.name_w,self.name_regimes,add_lag, ))
is_win = False
"""
for r in self.regimes_set:
if cores:
pool = mp.Pool(None)
results_p[r] = pool.apply_async(_work_endog_error, args=(
y, x, yend, q, regi_ids, r, w, max_iter, epsilon, A1, self.name_ds, self.name_y, name_x, name_yend, name_q, self.name_w, self.name_regimes, add_lag, ))
else:
results_p[r] = _work_endog_error(*(y, x, yend, q, regi_ids, r, w, max_iter, epsilon, A1,
self.name_ds, self.name_y, name_x, name_yend, name_q, self.name_w, self.name_regimes, add_lag))
self.kryd, self.kf = 0, 0
self.kr = len(cols2regi) + 1
self.nr = len(self.regimes_set)
self.vm = np.zeros((self.nr * self.kr, self.nr * self.kr), float)
self.betas = np.zeros((self.nr * self.kr, 1), float)
self.u = np.zeros((self.n, 1), float)
self.predy = np.zeros((self.n, 1), float)
self.e_filtered = np.zeros((self.n, 1), float)
"""
if not is_win:
pool.close()
pool.join()
"""
if cores:
pool.close()
pool.join()
results = {}
self.name_y, self.name_x, self.name_yend, self.name_q, self.name_z, self.name_h = [
], [], [], [], [], []
counter = 0
for r in self.regimes_set:
"""
if is_win:
results[r] = results_p[r]
else:
results[r] = results_p[r].get()
"""
if not cores:
results[r] = results_p[r]
else:
results[r] = results_p[r].get()
self.vm[(counter * self.kr):((counter + 1) * self.kr),
(counter * self.kr):((counter + 1) * self.kr)] = results[r].vm
self.betas[
(counter * self.kr):((counter + 1) * self.kr), ] = results[r].betas
self.u[regi_ids[r], ] = results[r].u
self.predy[regi_ids[r], ] = results[r].predy
self.e_filtered[regi_ids[r], ] = results[r].e_filtered
self.name_y += results[r].name_y
self.name_x += results[r].name_x
self.name_yend += results[r].name_yend
self.name_q += results[r].name_q
self.name_z += results[r].name_z
self.name_h += results[r].name_h
if add_lag != False:
self.predy_e[regi_ids[r], ] = results[r].predy_e
self.e_pred[regi_ids[r], ] = results[r].e_pred
counter += 1
self.chow = REGI.Chow(self)
self.multi = results
if add_lag != False:
SUMMARY.GM_Combo_Hom_multi(
reg=self, multireg=self.multi, vm=vm, regimes=True)
else:
SUMMARY.GM_Endog_Error_Hom_multi(
reg=self, multireg=self.multi, vm=vm, regimes=True)
class GM_Combo_Hom_Regimes(GM_Endog_Error_Hom_Regimes):
'''
GMM method for a spatial lag and error model with homoskedasticity,
regimes and endogenous variables, with results and diagnostics;
based on Drukker et al. (2010) [1]_, following Anselin (2011) [2]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
w : pysal W object
Spatial weights object (always needed)
constant_regi: ['one', 'many']
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime (default)
cols2regi : list, 'all'
Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all' (default), all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
regime_lag_sep : boolean
If True, the spatial parameter for spatial lag is also
computed according to different regimes. If False (default),
the spatial parameter is fixed accross regimes.
w_lags : integer
Orders of W to include as instruments for the spatially
lagged dependent variable. For example, w_lags=1, then
instruments are WX; if w_lags=2, then WX, WWX; and so on.
lag_q : boolean
If True, then include spatial lags of the additional
instruments (q).
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc', then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
vm : boolean
If True, include variance-covariance matrix in summary
results
cores : boolean
Specifies if multiprocessing is to be used
Default: no multiprocessing, cores = False
Note: Multiprocessing may not work on all platforms.
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regime variable for use in the output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
e_pred : array
nx1 array of residuals (using reduced form)
predy : array
nx1 array of predicted y values
predy_e : array
nx1 array of predicted y values (using reduced form)
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
z : array
nxk array of variables (combination of x and yend)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
h : array
nxl array of instruments (combination of x and q)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
pr2_e : float
Pseudo R squared (squared correlation between y and ypred_e
(using reduced form))
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
sig2 : float
Sigma squared used in computations (based on filtered
residuals)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
std_err : array
1xk array of standard errors of the betas
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_z : list of strings
Names of exogenous and endogenous variables for use in
output
name_q : list of strings
Names of external instruments
name_h : list of strings
Names of all instruments used in ouput
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regimes variable for use in output
title : string
Name of the regression method used
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
constant_regi : ['one', 'many']
Ignored if regimes=False. Constant option for regimes.
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime
cols2regi : list, 'all'
Ignored if regimes=False. Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all', all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
regime_lag_sep : boolean
If True, the spatial parameter for spatial lag is also
computed according to different regimes. If False (default),
the spatial parameter is fixed accross regimes.
kr : int
Number of variables/columns to be "regimized" or subject
to change by regime. These will result in one parameter
estimate by regime for each variable (i.e. nr parameters per
variable)
kf : int
Number of variables/columns to be considered fixed or
global across regimes and hence only obtain one parameter
estimate
nr : int
Number of different regimes in the 'regimes' list
multi : dictionary
Only available when multiple regressions are estimated,
i.e. when regime_err_sep=True and no variable is fixed
across regimes.
Contains all attributes of each individual regression
References
----------
.. [1] Drukker, D. M., Egger, P., Prucha, I. R. (2010)
"On Two-step Estimation of a Spatial Autoregressive Model with Autoregressive
Disturbances and Endogenous Regressors". Working paper.
.. [2] Anselin, L. (2011) "GMM Estimation of Spatial Error Autocorrelation
with and without Heteroskedasticity".
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on NCOVR US County Homicides (3085 areas) using pysal.open().
This is the DBF associated with the NAT shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path("NAT.dbf"),'r')
Extract the HR90 column (homicide rates in 1990) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y_var = 'HR90'
>>> y = np.array([db.by_col(y_var)]).reshape(3085,1)
Extract UE90 (unemployment rate) and PS90 (population structure) vectors from
the DBF to be used as independent variables in the regression. Other variables
can be inserted by adding their names to x_var, such as x_var = ['Var1','Var2','...]
Note that PySAL requires this to be an nxj numpy array, where j is the
number of independent variables (not including a constant). By default
this model adds a vector of ones to the independent variables passed in.
>>> x_var = ['PS90','UE90']
>>> x = np.array([db.by_col(name) for name in x_var]).T
The different regimes in this data are given according to the North and
South dummy (SOUTH).
>>> r_var = 'SOUTH'
>>> regimes = db.by_col(r_var)
Since we want to run a spatial combo model, we need to specify
the spatial weights matrix that includes the spatial configuration of the
observations. To do that, we can open an already existing gal file or
create a new one. In this case, we will create one from ``NAT.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("NAT.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, this allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
We are all set with the preliminaries, we are good to run the model. In this
case, we will need the variables and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
Example only with spatial lag
The Combo class runs an SARAR model, that is a spatial lag+error model.
In this case we will run a simple version of that, where we have the
spatial effects as well as exogenous variables. Since it is a spatial
model, we have to pass in the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional. We can have a
summary of the output by typing: model.summary
Alternatively, we can check the betas:
>>> reg = GM_Combo_Hom_Regimes(y, x, regimes, w=w, A1='hom_sc', name_y=y_var, name_x=x_var, name_regimes=r_var, name_ds='NAT')
>>> print reg.name_z
['0_CONSTANT', '0_PS90', '0_UE90', '1_CONSTANT', '1_PS90', '1_UE90', '_Global_W_HR90', 'lambda']
>>> print np.around(reg.betas,4)
[[ 1.4607]
[ 0.9579]
[ 0.5658]
[ 9.1129]
[ 1.1339]
[ 0.6517]
[-0.4583]
[ 0.6634]]
This class also allows the user to run a spatial lag+error model with the
extra feature of including non-spatial endogenous regressors. This means
that, in addition to the spatial lag and error, we consider some of the
variables on the right-hand side of the equation as endogenous and we
instrument for this. In this case we consider RD90 (resource deprivation)
as an endogenous regressor. We use FP89 (families below poverty)
for this and hence put it in the instruments parameter, 'q'.
>>> yd_var = ['RD90']
>>> yd = np.array([db.by_col(name) for name in yd_var]).T
>>> q_var = ['FP89']
>>> q = np.array([db.by_col(name) for name in q_var]).T
And then we can run and explore the model analogously to the previous combo:
>>> reg = GM_Combo_Hom_Regimes(y, x, regimes, yd, q, w=w, A1='hom_sc', name_y=y_var, name_x=x_var, name_yend=yd_var, name_q=q_var, name_regimes=r_var, name_ds='NAT')
>>> print reg.name_z
['0_CONSTANT', '0_PS90', '0_UE90', '1_CONSTANT', '1_PS90', '1_UE90', '0_RD90', '1_RD90', '_Global_W_HR90', 'lambda']
>>> print reg.betas
[[ 3.4196478 ]
[ 1.04065595]
[ 0.16630304]
[ 8.86570777]
[ 1.85134286]
[-0.24921597]
[ 2.43007651]
[ 3.61656899]
[ 0.03315061]
[ 0.22636055]]
>>> print np.sqrt(reg.vm.diagonal())
[ 0.53989913 0.13506086 0.06143434 0.77049956 0.18089997 0.07246848
0.29218837 0.25378655 0.06184801 0.06323236]
>>> print 'lambda: ', np.around(reg.betas[-1], 4)
lambda: [ 0.2264]
'''
def __init__(self, y, x, regimes, yend=None, q=None,
w=None, w_lags=1, lag_q=True, cores=False,
max_iter=1, epsilon=0.00001, A1='het',
constant_regi='many', cols2regi='all',
regime_err_sep=False, regime_lag_sep=False,
vm=False, name_y=None, name_x=None,
name_yend=None, name_q=None,
name_w=None, name_ds=None, name_regimes=None):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
name_x = USER.set_name_x(name_x, x, constant=True)
self.name_y = USER.set_name_y(name_y)
name_yend = USER.set_name_yend(name_yend, yend)
name_q = USER.set_name_q(name_q, q)
name_q.extend(
USER.set_name_q_sp(name_x, w_lags, name_q, lag_q, force_all=True))
cols2regi = REGI.check_cols2regi(
constant_regi, cols2regi, x, yend=yend, add_cons=False)
self.regimes_set = REGI._get_regimes_set(regimes)
self.regimes = regimes
USER.check_regimes(self.regimes_set, n, x.shape[1])
self.regime_err_sep = regime_err_sep
self.regime_lag_sep = regime_lag_sep
if regime_lag_sep == True:
if regime_err_sep == False:
raise Exception, "For spatial combo models, if spatial lag is set by regimes (regime_lag_sep=True), spatial error must also be set by regimes (regime_err_sep=True)."
add_lag = [w_lags, lag_q]
else:
cols2regi += [False]
add_lag = False
if regime_err_sep == True:
raise Exception, "For spatial combo models, if spatial error is set by regimes (regime_err_sep=True), all coefficients including lambda (regime_lag_sep=True) must be set by regimes."
yend, q = set_endog(y, x, w, yend, q, w_lags, lag_q)
name_yend.append(USER.set_name_yend_sp(self.name_y))
GM_Endog_Error_Hom_Regimes.__init__(self, y=y, x=x, yend=yend,
q=q, regimes=regimes, w=w, vm=vm, constant_regi=constant_regi,
cols2regi=cols2regi, regime_err_sep=regime_err_sep,
max_iter=max_iter, epsilon=epsilon, A1=A1, cores=cores,
name_y=self.name_y, name_x=name_x, name_yend=name_yend,
name_q=name_q, name_w=name_w, name_ds=name_ds,
name_regimes=name_regimes, summ=False, add_lag=add_lag)
if regime_err_sep != True:
self.rho = self.betas[-2]
self.predy_e, self.e_pred, warn = sp_att(w, self.y,
self.predy, yend[:, -1].reshape(self.n, 1), self.rho)
set_warn(self, warn)
self.regime_lag_sep = regime_lag_sep
self.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES (HOM) - REGIMES"
SUMMARY.GM_Combo_Hom(reg=self, w=w, vm=vm, regimes=True)
def _work_error(y, x, regi_ids, r, w, max_iter, epsilon, A1, name_ds, name_y, name_x, name_w, name_regimes):
w_r, warn = REGI.w_regime(w, regi_ids[r], r, transform=True)
y_r = y[regi_ids[r]]
x_r = x[regi_ids[r]]
x_constant = USER.check_constant(x_r)
model = BaseGM_Error_Hom(
y_r, x_constant, w_r.sparse, max_iter=max_iter, epsilon=epsilon, A1=A1)
set_warn(model, warn)
model.w = w_r
model.title = "SPATIALLY WEIGHTED LEAST SQUARES ESTIMATION (HOM) - REGIME %s" % r
model.name_ds = name_ds
model.name_y = '%s_%s' % (str(r), name_y)
model.name_x = ['%s_%s' % (str(r), i) for i in name_x]
model.name_w = name_w
model.name_regimes = name_regimes
return model
def _work_endog_error(y, x, yend, q, regi_ids, r, w, max_iter, epsilon, A1, name_ds, name_y, name_x, name_yend, name_q, name_w, name_regimes, add_lag):
w_r, warn = REGI.w_regime(w, regi_ids[r], r, transform=True)
y_r = y[regi_ids[r]]
x_r = x[regi_ids[r]]
if yend != None:
yend_r = yend[regi_ids[r]]
q_r = q[regi_ids[r]]
else:
yend_r, q_r = None, None
if add_lag != False:
yend_r, q_r = set_endog(
y_r, x_r, w_r, yend_r, q_r, add_lag[0], add_lag[1])
x_constant = USER.check_constant(x_r)
model = BaseGM_Endog_Error_Hom(
y_r, x_constant, yend_r, q_r, w_r.sparse, max_iter=max_iter, epsilon=epsilon, A1=A1)
set_warn(model, warn)
if add_lag != False:
model.rho = model.betas[-2]
model.predy_e, model.e_pred, warn = sp_att(w_r, model.y,
model.predy, model.yend[:, -1].reshape(model.n, 1), model.rho)
set_warn(model, warn)
model.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES (HOM) - REGIME %s" % r
model.name_ds = name_ds
model.name_y = '%s_%s' % (str(r), name_y)
model.name_x = ['%s_%s' % (str(r), i) for i in name_x]
model.name_yend = ['%s_%s' % (str(r), i) for i in name_yend]
model.name_z = model.name_x + model.name_yend + ['lambda']
model.name_q = ['%s_%s' % (str(r), i) for i in name_q]
model.name_h = model.name_x + model.name_q
model.name_w = name_w
model.name_regimes = name_regimes
return model
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
| bsd-3-clause |
fxia22/ASM_xf | PythonD/lib/python2.4/distutils/log.py | 7 | 1424 | """A simple log mechanism styled after PEP 282."""
# This module should be kept compatible with Python 2.1.
# The class here is styled after PEP 282 so that it could later be
# replaced with a standard Python logging implementation.
DEBUG = 1
INFO = 2
WARN = 3
ERROR = 4
FATAL = 5
import sys
class Log:
def __init__(self, threshold=WARN):
self.threshold = threshold
def _log(self, level, msg, args):
if level >= self.threshold:
print msg % args
sys.stdout.flush()
def log(self, level, msg, *args):
self._log(level, msg, args)
def debug(self, msg, *args):
self._log(DEBUG, msg, args)
def info(self, msg, *args):
self._log(INFO, msg, args)
def warn(self, msg, *args):
self._log(WARN, msg, args)
def error(self, msg, *args):
self._log(ERROR, msg, args)
def fatal(self, msg, *args):
self._log(FATAL, msg, args)
_global_log = Log()
log = _global_log.log
debug = _global_log.debug
info = _global_log.info
warn = _global_log.warn
error = _global_log.error
fatal = _global_log.fatal
def set_threshold(level):
# return the old threshold for use from tests
old = _global_log.threshold
_global_log.threshold = level
return old
def set_verbosity(v):
if v <= 0:
set_threshold(WARN)
elif v == 1:
set_threshold(INFO)
elif v >= 2:
set_threshold(DEBUG)
| gpl-2.0 |
manikTharaka/al-go-rithms | cryptography/ElGamal/Python/ElGamal.py | 2 | 1960 | from algo import mod_inv
from random import randrange, randint
from ecurves import *
def elgalmal_encrypt(M,k,a,p):
'''
Input:
M - menssage
k - recipient private key
a - generator
p - prime
Output (c1,c2) [Encrypted]
'''
s = randrange(1,p-1)
c1 = a**s
c2 = m*((a**k)**s)
return c1,c2
def elgamal_decrypt(c1,c2,k,p):
'''
Input:
c1 - a^s
c2 - m*(a^k)^s
k - private key
p - prime
Output: c2*(c1^k)^-1 [Decrypted]
'''
s = pow(c1, k, p)
return (c2*(mod_inv(s,p)))%p
def generateKey(curve):
'''
Input:
curve - Valid Curve
Output: (Int, Point)
'''
privKey = randint(1, curve.p - 1)
pubKey = privKey*curve.gen
return privKey, pubKey
def ECEG_encrypt(curve, m, pubKey):
'''
Input:
curve - Valid Curve
m - message
pubKey - Public Key
Output: (Point, Point)
'''
r = randint(1, curve.p - 1)
c1 = r*curve.gen
c2 = (r*pubKey) + m
return (c1, c2)
def ECEG_decrypt(curve, c1, c2, privKey):
'''
Input:
curve - Valid Curve
(c1, c2) - Ciphered Text
privKey - Private Key
Output: Point
'''
m = c2 - (privKey*c1)
return m
def ECEG_test():
c = EC(317689081251325503476317476413827693272746955927, \
79052896607878758718120572025718535432100651934, \
785963102379428822376694789446897396207498568951)
gen = Point(c, 771507216262649826170648268565579889907769254176, \
390157510246556628525279459266514995562533196655)
c.gen = gen
KprivBob, KpubBob = generateKey(c)
# What alice does
m = 10301*gen
print("Plain Text = " + str(m))
ciphered = ECEG_encrypt(c, m, KpubBob)
# print("Cipher Text = " + "(" + ','.join(str(x) for x in ciphered) + ")")
# Bob decrypts the message
m_prime = ECEG_decrypt(c, ciphered[0], ciphered[1], KprivBob)
print("Decrypted = " + str(m_prime))
| mit |
dongguangming/youtube-dl | youtube_dl/extractor/kuwo.py | 22 | 11149 | # coding: utf-8
from __future__ import unicode_literals
import re
import itertools
from .common import InfoExtractor
from ..utils import (
get_element_by_id,
clean_html,
ExtractorError,
remove_start,
)
class KuwoBaseIE(InfoExtractor):
_FORMATS = [
{'format': 'ape', 'ext': 'ape', 'preference': 100},
{'format': 'mp3-320', 'ext': 'mp3', 'br': '320kmp3', 'abr': 320, 'preference': 80},
{'format': 'mp3-192', 'ext': 'mp3', 'br': '192kmp3', 'abr': 192, 'preference': 70},
{'format': 'mp3-128', 'ext': 'mp3', 'br': '128kmp3', 'abr': 128, 'preference': 60},
{'format': 'wma', 'ext': 'wma', 'preference': 20},
{'format': 'aac', 'ext': 'aac', 'abr': 48, 'preference': 10}
]
def _get_formats(self, song_id):
formats = []
for file_format in self._FORMATS:
song_url = self._download_webpage(
'http://antiserver.kuwo.cn/anti.s?format=%s&br=%s&rid=MUSIC_%s&type=convert_url&response=url' %
(file_format['ext'], file_format.get('br', ''), song_id),
song_id, note='Download %s url info' % file_format['format'],
)
if song_url.startswith('http://') or song_url.startswith('https://'):
formats.append({
'url': song_url,
'format_id': file_format['format'],
'format': file_format['format'],
'preference': file_format['preference'],
'abr': file_format.get('abr'),
})
self._sort_formats(formats)
return formats
class KuwoIE(KuwoBaseIE):
IE_NAME = 'kuwo:song'
IE_DESC = '酷我音乐'
_VALID_URL = r'http://www\.kuwo\.cn/yinyue/(?P<id>\d+?)/'
_TESTS = [{
'url': 'http://www.kuwo.cn/yinyue/635632/',
'info_dict': {
'id': '635632',
'ext': 'ape',
'title': '爱我别走',
'creator': '张震岳',
'upload_date': '20080122',
'description': 'md5:ed13f58e3c3bf3f7fd9fbc4e5a7aa75c'
},
}, {
'url': 'http://www.kuwo.cn/yinyue/6446136/',
'info_dict': {
'id': '6446136',
'ext': 'mp3',
'title': '心',
'creator': 'IU',
'upload_date': '20150518',
},
'params': {
'format': 'mp3-320'
},
}]
def _real_extract(self, url):
song_id = self._match_id(url)
webpage = self._download_webpage(
url, song_id, note='Download song detail info',
errnote='Unable to get song detail info')
song_name = self._html_search_regex(
r'<h1[^>]+title="([^"]+)">', webpage, 'song name')
singer_name = self._html_search_regex(
r'<div[^>]+class="s_img">\s*<a[^>]+title="([^>]+)"',
webpage, 'singer name', fatal=False)
lrc_content = clean_html(get_element_by_id('lrcContent', webpage))
if lrc_content == '暂无': # indicates no lyrics
lrc_content = None
formats = self._get_formats(song_id)
album_id = self._html_search_regex(
r'<p[^>]+class="album"[^<]+<a[^>]+href="http://www\.kuwo\.cn/album/(\d+)/"',
webpage, 'album id', fatal=False)
publish_time = None
if album_id is not None:
album_info_page = self._download_webpage(
'http://www.kuwo.cn/album/%s/' % album_id, song_id,
note='Download album detail info',
errnote='Unable to get album detail info')
publish_time = self._html_search_regex(
r'发行时间:(\d{4}-\d{2}-\d{2})', album_info_page,
'publish time', fatal=False)
if publish_time:
publish_time = publish_time.replace('-', '')
return {
'id': song_id,
'title': song_name,
'creator': singer_name,
'upload_date': publish_time,
'description': lrc_content,
'formats': formats,
}
class KuwoAlbumIE(InfoExtractor):
IE_NAME = 'kuwo:album'
IE_DESC = '酷我音乐 - 专辑'
_VALID_URL = r'http://www\.kuwo\.cn/album/(?P<id>\d+?)/'
_TEST = {
'url': 'http://www.kuwo.cn/album/502294/',
'info_dict': {
'id': '502294',
'title': 'M',
'description': 'md5:6a7235a84cc6400ec3b38a7bdaf1d60c',
},
'playlist_count': 2,
}
def _real_extract(self, url):
album_id = self._match_id(url)
webpage = self._download_webpage(
url, album_id, note='Download album info',
errnote='Unable to get album info')
album_name = self._html_search_regex(
r'<div[^>]+class="comm"[^<]+<h1[^>]+title="([^"]+)"', webpage,
'album name')
album_intro = remove_start(
clean_html(get_element_by_id('intro', webpage)),
'%s简介:' % album_name)
entries = [
self.url_result(song_url, 'Kuwo') for song_url in re.findall(
r'<p[^>]+class="listen"><a[^>]+href="(http://www\.kuwo\.cn/yinyue/\d+/)"',
webpage)
]
return self.playlist_result(entries, album_id, album_name, album_intro)
class KuwoChartIE(InfoExtractor):
IE_NAME = 'kuwo:chart'
IE_DESC = '酷我音乐 - 排行榜'
_VALID_URL = r'http://yinyue\.kuwo\.cn/billboard_(?P<id>[^.]+).htm'
_TEST = {
'url': 'http://yinyue.kuwo.cn/billboard_香港中文龙虎榜.htm',
'info_dict': {
'id': '香港中文龙虎榜',
'title': '香港中文龙虎榜',
'description': 're:\d{4}第\d{2}期',
},
'playlist_mincount': 10,
}
def _real_extract(self, url):
chart_id = self._match_id(url)
webpage = self._download_webpage(
url, chart_id, note='Download chart info',
errnote='Unable to get chart info')
chart_name = self._html_search_regex(
r'<h1[^>]+class="unDis">([^<]+)</h1>', webpage, 'chart name')
chart_desc = self._html_search_regex(
r'<p[^>]+class="tabDef">(\d{4}第\d{2}期)</p>', webpage, 'chart desc')
entries = [
self.url_result(song_url, 'Kuwo') for song_url in re.findall(
r'<a[^>]+href="(http://www\.kuwo\.cn/yinyue/\d+)/"', webpage)
]
return self.playlist_result(entries, chart_id, chart_name, chart_desc)
class KuwoSingerIE(InfoExtractor):
IE_NAME = 'kuwo:singer'
IE_DESC = '酷我音乐 - 歌手'
_VALID_URL = r'http://www\.kuwo\.cn/mingxing/(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.kuwo.cn/mingxing/bruno+mars/',
'info_dict': {
'id': 'bruno+mars',
'title': 'Bruno Mars',
},
'playlist_count': 10,
}, {
'url': 'http://www.kuwo.cn/mingxing/Ali/music.htm',
'info_dict': {
'id': 'Ali',
'title': 'Ali',
},
'playlist_mincount': 95,
'skip': 'Regularly stalls travis build', # See https://travis-ci.org/rg3/youtube-dl/jobs/78878540
}]
def _real_extract(self, url):
singer_id = self._match_id(url)
webpage = self._download_webpage(
url, singer_id, note='Download singer info',
errnote='Unable to get singer info')
singer_name = self._html_search_regex(
r'<div class="title clearfix">\s*<h1>([^<]+)<span', webpage, 'singer name'
)
entries = []
first_page_only = False if re.search(r'/music(?:_\d+)?\.htm', url) else True
for page_num in itertools.count(1):
webpage = self._download_webpage(
'http://www.kuwo.cn/mingxing/%s/music_%d.htm' % (singer_id, page_num),
singer_id, note='Download song list page #%d' % page_num,
errnote='Unable to get song list page #%d' % page_num)
entries.extend([
self.url_result(song_url, 'Kuwo') for song_url in re.findall(
r'<p[^>]+class="m_name"><a[^>]+href="(http://www\.kuwo\.cn/yinyue/\d+)/',
webpage)
][:10 if first_page_only else None])
if first_page_only or not re.search(r'<a[^>]+href="[^"]+">下一页</a>', webpage):
break
return self.playlist_result(entries, singer_id, singer_name)
class KuwoCategoryIE(InfoExtractor):
IE_NAME = 'kuwo:category'
IE_DESC = '酷我音乐 - 分类'
_VALID_URL = r'http://yinyue\.kuwo\.cn/yy/cinfo_(?P<id>\d+?).htm'
_TEST = {
'url': 'http://yinyue.kuwo.cn/yy/cinfo_86375.htm',
'info_dict': {
'id': '86375',
'title': '八十年代精选',
'description': '这些都是属于八十年代的回忆!',
},
'playlist_count': 30,
}
def _real_extract(self, url):
category_id = self._match_id(url)
webpage = self._download_webpage(
url, category_id, note='Download category info',
errnote='Unable to get category info')
category_name = self._html_search_regex(
r'<h1[^>]+title="([^<>]+?)">[^<>]+?</h1>', webpage, 'category name')
category_desc = remove_start(
get_element_by_id('intro', webpage).strip(),
'%s简介:' % category_name)
jsonm = self._parse_json(self._html_search_regex(
r'var\s+jsonm\s*=\s*([^;]+);', webpage, 'category songs'), category_id)
entries = [
self.url_result('http://www.kuwo.cn/yinyue/%s/' % song['musicrid'], 'Kuwo')
for song in jsonm['musiclist']
]
return self.playlist_result(entries, category_id, category_name, category_desc)
class KuwoMvIE(KuwoBaseIE):
IE_NAME = 'kuwo:mv'
IE_DESC = '酷我音乐 - MV'
_VALID_URL = r'http://www\.kuwo\.cn/mv/(?P<id>\d+?)/'
_TEST = {
'url': 'http://www.kuwo.cn/mv/6480076/',
'info_dict': {
'id': '6480076',
'ext': 'mkv',
'title': '我们家MV',
'creator': '2PM',
},
}
_FORMATS = KuwoBaseIE._FORMATS + [
{'format': 'mkv', 'ext': 'mkv', 'preference': 250},
{'format': 'mp4', 'ext': 'mp4', 'preference': 200},
]
def _real_extract(self, url):
song_id = self._match_id(url)
webpage = self._download_webpage(
url, song_id, note='Download mv detail info: %s' % song_id,
errnote='Unable to get mv detail info: %s' % song_id)
mobj = re.search(
r'<h1[^>]+title="(?P<song>[^"]+)">[^<]+<span[^>]+title="(?P<singer>[^"]+)"',
webpage)
if mobj:
song_name = mobj.group('song')
singer_name = mobj.group('singer')
else:
raise ExtractorError('Unable to find song or singer names')
formats = self._get_formats(song_id)
return {
'id': song_id,
'title': song_name,
'creator': singer_name,
'formats': formats,
}
| unlicense |
ericzundel/pants | tests/python/pants_test/base/test_worker_pool.py | 33 | 1085 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import threading
import unittest
from pants.base.worker_pool import Work, WorkerPool
from pants.base.workunit import WorkUnit
from pants.util.contextutil import temporary_dir
class FakeRunTracker(object):
def register_thread(self, one):
pass
def keyboard_interrupt_raiser():
raise KeyboardInterrupt()
class WorkerPoolTest(unittest.TestCase):
def test_keyboard_interrupts_propagated(self):
condition = threading.Condition()
condition.acquire()
with self.assertRaises(KeyboardInterrupt):
with temporary_dir() as rundir:
pool = WorkerPool(WorkUnit(rundir, None, "work"), FakeRunTracker(), 1)
try:
pool.submit_async_work(Work(keyboard_interrupt_raiser, [()]))
condition.wait(2)
finally:
pool.abort()
| apache-2.0 |
jjas0nn/solvem | tensorflow/lib/python2.7/site-packages/tensorflow/contrib/distributions/python/ops/laplace.py | 11 | 6785 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Laplace distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
class Laplace(distribution.Distribution):
"""The Laplace distribution with location and scale > 0 parameters.
#### Mathematical details
The PDF of this distribution is:
```f(x | mu, b, b > 0) = 0.5 / b exp(-|x - mu| / b)```
Note that the Laplace distribution can be thought of two exponential
distributions spliced together "back-to-back."
"""
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Laplace"):
"""Construct Laplace distribution with parameters `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g., `loc / scale` is a valid operation).
Args:
loc: Floating point tensor which characterizes the location (center)
of the distribution.
scale: Positive floating point tensor which characterizes the spread of
the distribution.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: if `loc` and `scale` are of different dtype.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[loc, scale]) as ns:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
contrib_tensor_util.assert_same_float_dtype((self._loc, self._scale))
super(Laplace, self).__init__(
dtype=self._loc.dtype,
is_continuous=True,
is_reparameterized=True,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=ns)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the location."""
return self._loc
@property
def scale(self):
"""Distribution parameter for scale."""
return self._scale
def _batch_shape(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale))
def _get_batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(), self.scale.get_shape())
def _event_shape(self):
return constant_op.constant([], dtype=dtypes.int32)
def _get_event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
shape = array_ops.concat(([n], self.batch_shape()), 0)
# Sample uniformly-at-random from the open-interval (-1, 1).
uniform_samples = random_ops.random_uniform(
shape=shape,
minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
self.dtype.as_numpy_dtype(0.)),
maxval=1.,
dtype=self.dtype,
seed=seed)
return (self.loc - self.scale * math_ops.sign(uniform_samples) *
math_ops.log(1. - math_ops.abs(uniform_samples)))
def _log_prob(self, x):
return (-math.log(2.) - math_ops.log(self.scale) -
math_ops.abs(x - self.loc) / self.scale)
def _prob(self, x):
return 0.5 / self.scale * math_ops.exp(
-math_ops.abs(x - self.loc) / self.scale)
def _log_cdf(self, x):
return math_ops.log(self.cdf(x))
def _cdf(self, x):
y = x - self.loc
return (0.5 + 0.5 * math_ops.sign(y) *
(1. - math_ops.exp(-math_ops.abs(y) / self.scale)))
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast scale.
scale = self.scale + array_ops.zeros_like(self.loc)
return math.log(2.) + 1. + math_ops.log(scale)
def _mean(self):
return self.loc + array_ops.zeros_like(self.scale)
def _variance(self):
return math_ops.square(self._std())
def _std(self):
return math.sqrt(2.) * self.scale + array_ops.zeros_like(self.loc)
def _median(self):
return self._mean()
def _mode(self):
return self._mean()
class LaplaceWithSoftplusScale(Laplace):
"""Laplace with softplus applied to `scale`."""
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="LaplaceWithSoftplusScale"):
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[loc, scale]) as ns:
super(LaplaceWithSoftplusScale, self).__init__(
loc=loc,
scale=nn.softplus(scale, name="softplus_scale"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
| mit |
pravsripad/mne-python | mne/__init__.py | 4 | 5932 | """MNE software for MEG and EEG data analysis."""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.devN' where N is an integer.
#
from ._version import __version__
# have to import verbose first since it's needed by many things
from .utils import (set_log_level, set_log_file, verbose, set_config,
get_config, get_config_path, set_cache_dir,
set_memmap_min_size, grand_average, sys_info, open_docs)
from .io.pick import (pick_types, pick_channels,
pick_channels_regexp, pick_channels_forward,
pick_types_forward, pick_channels_cov,
pick_channels_evoked, pick_info,
channel_type, channel_indices_by_type)
from .io.base import concatenate_raws
from .io.meas_info import create_info, Info
from .io.proj import Projection
from .io.kit import read_epochs_kit
from .io.eeglab import read_epochs_eeglab
from .io.reference import (set_eeg_reference, set_bipolar_reference,
add_reference_channels)
from .io.what import what
from .bem import (make_sphere_model, make_bem_model, make_bem_solution,
read_bem_surfaces, write_bem_surfaces, write_head_bem,
read_bem_solution, write_bem_solution)
from .cov import (read_cov, write_cov, Covariance, compute_raw_covariance,
compute_covariance, whiten_evoked, make_ad_hoc_cov)
from .event import (read_events, write_events, find_events, merge_events,
pick_events, make_fixed_length_events, concatenate_events,
find_stim_steps, AcqParserFIF)
from .forward import (read_forward_solution, apply_forward, apply_forward_raw,
average_forward_solutions, Forward,
write_forward_solution, make_forward_solution,
convert_forward_solution, make_field_map,
make_forward_dipole, use_coil_def)
from .source_estimate import (read_source_estimate,
SourceEstimate, VectorSourceEstimate,
VolSourceEstimate, VolVectorSourceEstimate,
MixedSourceEstimate, MixedVectorSourceEstimate,
grade_to_tris,
spatial_src_adjacency,
spatial_tris_adjacency,
spatial_dist_adjacency,
spatial_inter_hemi_adjacency,
spatio_temporal_src_adjacency,
spatio_temporal_tris_adjacency,
spatio_temporal_dist_adjacency,
extract_label_time_course, stc_near_sensors)
from .surface import (read_surface, write_surface, decimate_surface, read_tri,
read_morph_map, get_head_surf, get_meg_helmet_surf,
dig_mri_distances)
from .morph import (SourceMorph, read_source_morph, grade_to_vertices,
compute_source_morph)
from .source_space import (read_source_spaces, vertex_to_mni,
head_to_mni, head_to_mri, read_talxfm,
write_source_spaces, setup_source_space,
setup_volume_source_space, SourceSpaces,
add_source_space_distances, morph_source_spaces,
get_volume_labels_from_aseg,
get_volume_labels_from_src, read_freesurfer_lut)
from .annotations import (Annotations, read_annotations, annotations_from_events,
events_from_annotations)
from .epochs import (BaseEpochs, Epochs, EpochsArray, read_epochs,
concatenate_epochs, make_fixed_length_epochs)
from .evoked import Evoked, EvokedArray, read_evokeds, write_evokeds, combine_evoked
from .label import (read_label, label_sign_flip,
write_label, stc_to_label, grow_labels, Label, split_label,
BiHemiLabel, read_labels_from_annot, write_labels_to_annot,
random_parcellation, morph_labels, labels_to_stc)
from .misc import parse_config, read_reject_parameters
from .coreg import (create_default_subject, scale_bem, scale_mri, scale_labels,
scale_source_space)
from .transforms import (read_trans, write_trans,
transform_surface_to, Transform)
from .proj import (read_proj, write_proj, compute_proj_epochs,
compute_proj_evoked, compute_proj_raw, sensitivity_map)
from .dipole import read_dipole, Dipole, DipoleFixed, fit_dipole
from .channels import (equalize_channels, rename_channels, find_layout,
read_vectorview_selection)
from .report import Report, open_report
from .io import read_epochs_fieldtrip, read_evoked_fieldtrip, read_evokeds_mff
from .rank import compute_rank
from . import beamformer
from . import channels
from . import chpi
from . import commands
from . import connectivity
from . import coreg
from . import cuda
from . import datasets
from . import dipole
from . import epochs
from . import event
from . import externals
from . import io
from . import filter
from . import gui
from . import inverse_sparse
from . import minimum_norm
from . import preprocessing
from . import simulation
from . import stats
from . import surface
from . import time_frequency
from . import viz
from . import decoding
# deprecations
from .utils import deprecated_alias
deprecated_alias('read_selection', read_vectorview_selection)
# initialize logging
set_log_level(None, False)
set_log_file()
| bsd-3-clause |
Radium-Devices/Radium_shamu | scripts/build-all.py | 236 | 11419 | #! /usr/bin/env python
# Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
import glob
from optparse import OptionParser
import os
import re
import shutil
import subprocess
import sys
version = 'build-all.py, version 1.99'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
all_options = {}
compile64 = os.environ.get('CROSS_COMPILE64')
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
if not os.environ.get('CROSS_COMPILE'):
fail("CROSS_COMPILE must be set in the environment")
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
failed_targets = []
class LogRunner:
def __init__(self, logname, make_env):
self.logname = logname
self.fd = open(logname, 'w')
self.make_env = make_env
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=self.make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.flush()
return result
class Builder():
def __init__(self, name, defconfig):
self.name = name
self.defconfig = defconfig
self.confname = self.defconfig.split('/')[-1]
# Determine if this is a 64-bit target based on the location
# of the defconfig.
self.make_env = os.environ.copy()
if "/arm64/" in defconfig:
if compile64:
self.make_env['CROSS_COMPILE'] = compile64
else:
fail("Attempting to build 64-bit, without setting CROSS_COMPILE64")
self.make_env['ARCH'] = 'arm64'
else:
self.make_env['ARCH'] = 'arm'
self.make_env['KCONFIG_NOTIMESTAMP'] = 'true'
def build(self):
dest_dir = os.path.join(build_dir, self.name)
log_name = "%s/log-%s.log" % (build_dir, self.name)
print 'Building %s in %s log %s' % (self.name, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = self.defconfig
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
# shutil.copyfile(defconfig, dotconfig) # Not really right.
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
shutil.rmtree(os.path.join(dest_dir, staging_dir), ignore_errors=True)
with open('/dev/null', 'r') as devnull:
subprocess.check_call(['make', 'O=%s' % dest_dir,
self.confname], env=self.make_env,
stdin=devnull)
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of
# previous build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
build = LogRunner(log_name, self.make_env)
for t in build_targets:
result = build.run(cmd_line + [t])
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" %
(t, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
with open('/dev/null', 'r') as devnull:
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=self.make_env, stdin=devnull)
shutil.copyfile(savedefconfig, defconfig)
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
with open(file, 'a') as defconfig:
defconfig.write(str + '\n')
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = []
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
r'mdm*_defconfig',
r'mpq*_defconfig',
)
arch64_pats = (
r'msm_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
name = os.path.basename(n)[:-10]
names.append(Builder(name, n))
if 'CROSS_COMPILE64' in os.environ:
for p in arch64_pats:
for n in glob.glob('arch/arm64/configs/' + p):
name = os.path.basename(n)[:-10] + "-64"
names.append(Builder(name, n))
return names
def build_many(targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(target.defconfig, all_options.updateconfigs)
target.build()
if failed_targets:
fail("\n ".join(["Failed targets:"] +
[target.name for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs:
print " %s" % target.name
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs)
elif args == ['perf']:
targets = []
for t in configs:
if "perf" in t.name:
targets.append(t)
build_many(targets)
elif args == ['noperf']:
targets = []
for t in configs:
if "perf" not in t.name:
targets.append(t)
build_many(targets)
elif len(args) > 0:
all_configs = {}
for t in configs:
all_configs[t.name] = t
targets = []
for t in args:
if t not in all_configs:
parser.error("Target '%s' not one of %s" % (t, all_configs.keys()))
targets.append(all_configs[t])
build_many(targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
jeroenseegers/git-history | setup.py | 1 | 1125 | import os
from setuptools import setup
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
setup(
name='git-history',
version='0.0.1',
description='Keep a history of all your git commands.',
long_description=(read('README.md')),
url='http://github.com/jeroenseegers/git-history/',
license='MIT',
author='Jeroen Seegers',
author_email='[email protected]',
py_modules=['git-history'],
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Version Control',
'Topic :: Utilities',
],
entry_points={
'console_scripts': [
'git-history = git_history:track_history',
]
},
)
| mit |
jelugbo/tundex | lms/djangoapps/courseware/features/problems.py | 6 | 5985 | '''
Steps for problem.feature lettuce tests
'''
# pylint: disable=C0111
# pylint: disable=W0621
from lettuce import world, step
from common import i_am_registered_for_the_course, visit_scenario_item
from problems_setup import PROBLEM_DICT, answer_problem, problem_has_answer, add_problem_to_course
def _view_problem(step, problem_type, problem_settings=None):
i_am_registered_for_the_course(step, 'model_course')
# Ensure that the course has this problem type
add_problem_to_course(world.scenario_dict['COURSE'].number, problem_type, problem_settings)
# Go to the one section in the factory-created course
# which should be loaded with the correct problem
visit_scenario_item('SECTION')
@step(u'I am viewing a "([^"]*)" problem with "([^"]*)" attempt')
def view_problem_with_attempts(step, problem_type, attempts):
_view_problem(step, problem_type, {'max_attempts': attempts})
@step(u'I am viewing a "([^"]*)" that shows the answer "([^"]*)"')
def view_problem_with_show_answer(step, problem_type, answer):
_view_problem(step, problem_type, {'showanswer': answer})
@step(u'I am viewing a "([^"]*)" problem')
def view_problem(step, problem_type):
_view_problem(step, problem_type)
@step(u'External graders respond "([^"]*)"')
def set_external_grader_response(step, correctness):
assert(correctness in ['correct', 'incorrect'])
response_dict = {
'correct': True if correctness == 'correct' else False,
'score': 1 if correctness == 'correct' else 0,
'msg': 'Your problem was graded {0}'.format(correctness)
}
# Set the fake xqueue server to always respond
# correct/incorrect when asked to grade a problem
world.xqueue.config['default'] = response_dict
@step(u'I answer a "([^"]*)" problem "([^"]*)ly"')
def answer_problem_step(step, problem_type, correctness):
""" Mark a given problem type correct or incorrect, then submit it.
*problem_type* is a string representing the type of problem (e.g. 'drop down')
*correctness* is in ['correct', 'incorrect']
"""
# Change the answer on the page
input_problem_answer(step, problem_type, correctness)
# Submit the problem
check_problem(step)
@step(u'I input an answer on a "([^"]*)" problem "([^"]*)ly"')
def input_problem_answer(_, problem_type, correctness):
"""
Have the browser input an answer (either correct or incorrect)
"""
assert(correctness in ['correct', 'incorrect'])
assert(problem_type in PROBLEM_DICT)
answer_problem(world.scenario_dict['COURSE'].number, problem_type, correctness)
@step(u'I check a problem')
def check_problem(step):
# first scroll down so the loading mathjax button does not
# cover up the Check button
world.browser.execute_script("window.scrollTo(0,1024)")
world.css_click("input.check")
# Wait for the problem to finish re-rendering
world.wait_for_ajax_complete()
@step(u'The "([^"]*)" problem displays a "([^"]*)" answer')
def assert_problem_has_answer(step, problem_type, answer_class):
'''
Assert that the problem is displaying a particular answer.
These correspond to the same correct/incorrect
answers we set in answer_problem()
We can also check that a problem has been left blank
by setting answer_class='blank'
'''
assert answer_class in ['correct', 'incorrect', 'blank']
assert problem_type in PROBLEM_DICT
problem_has_answer(world.scenario_dict['COURSE'].number, problem_type, answer_class)
@step(u'I reset the problem')
def reset_problem(_step):
world.css_click('input.reset')
# Wait for the problem to finish re-rendering
world.wait_for_ajax_complete()
@step(u'I press the button with the label "([^"]*)"$')
def press_the_button_with_label(_step, buttonname):
button_css = 'button span.show-label'
elem = world.css_find(button_css).first
world.css_has_text(button_css, elem)
world.css_click(button_css)
@step(u'The "([^"]*)" button does( not)? appear')
def action_button_present(_step, buttonname, doesnt_appear):
button_css = 'div.action input[value*="%s"]' % buttonname
if bool(doesnt_appear):
assert world.is_css_not_present(button_css)
else:
assert world.is_css_present(button_css)
@step(u'the Show/Hide button label is "([^"]*)"$')
def show_hide_label_is(_step, label_name):
# The label text is changed by static/xmodule_js/src/capa/display.js
# so give it some time to change on the page.
label_css = 'button.show span.show-label'
world.wait_for(lambda _: world.css_has_text(label_css, label_name))
@step(u'I should see a score of "([^"]*)"$')
def see_score(_step, score):
# The problem progress is changed by
# cms/static/xmodule_js/src/capa/display.js
# so give it some time to render on the page.
score_css = 'div.problem-progress'
expected_text = '({})'.format(score)
world.wait_for(lambda _: world.css_has_text(score_css, expected_text))
@step(u'[Mm]y "([^"]*)" answer is( NOT)? marked "([^"]*)"')
def assert_answer_mark(_step, problem_type, isnt_marked, correctness):
"""
Assert that the expected answer mark is visible
for a given problem type.
*problem_type* is a string identifying the type of problem (e.g. 'drop down')
*correctness* is in ['correct', 'incorrect', 'unanswered']
"""
# Determine which selector(s) to look for based on correctness
assert(correctness in ['correct', 'incorrect', 'unanswered'])
assert(problem_type in PROBLEM_DICT)
# At least one of the correct selectors should be present
for sel in PROBLEM_DICT[problem_type][correctness]:
if bool(isnt_marked):
has_expected = world.is_css_not_present(sel)
else:
has_expected = world.is_css_present(sel)
# As soon as we find the selector, break out of the loop
if has_expected:
break
# Expect that we found the expected selector
assert(has_expected)
| agpl-3.0 |
codeforamerica/Change-By-Us | framework/page.py | 4 | 3570 | """
:copyright: (c) 2011 Local Projects, all rights reserved
:license: Affero GNU GPL v3, see LICENSE for more details.
"""
import cgi, os
import framework.filters as filters
from lib import jinja2
from framework.config import *
from framework.log import log
log.info("____________________________________________________________________________")
class render_jinja:
def __init__(self, *a, **kwargs):
extensions = kwargs.pop('extensions', [])
globals = kwargs.pop('globals', {})
from jinja2 import Environment, FileSystemLoader
self._lookup = Environment(loader=FileSystemLoader(*a, **kwargs), extensions=extensions)
self._lookup.globals.update(globals)
def __getitem__(self, name): # bh added
t = self._lookup.get_template(name)
return t.render
def render(template_name, template_values=None, suffix="html"):
if template_values is None: template_values = {}
log.info("TEMPLATE %s: %s" % (template_name, template_values))
config = Config.get_all()
config['base_url'] = Config.base_url()
for key in config:
if type(config[key]) is list:
for param in config[key][0]:
template_values[key + "_" + param] = config[key][0][param]
else:
template_values[key] = config[key]
template_values['template_name'] = template_name
renderer = render_jinja(os.path.dirname(__file__) + '/../')
renderer._lookup.filters.update(filters.filters)
print("Content-Type: text/plain\n")
log.info("200: text/html (%s)" % template_name)
print(renderer[template_name + "." + suffix](template_values))
def json(data):
output = json.dumps(data, indent=4)
print("Content-Type: text/plain\n")
log.info("200: text/plain (JSON)")
print(output)
def xml(data):
print("Content-Type: application/xml\n")
output = "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"
output += data ## should make this use a real library
log.info("200: application/xml")
print(output)
def html(html):
print("Content-Type: text/html\n")
doc = "<html><head><meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" /></head><body>"
doc += html
doc += "</body></html>"
log.info("200: text/html")
print(doc)
def text(string):
print("Content-Type: text/plain\n")
log.info("200: text/plain")
print(string)
def csv(string, filename):
print("Content-Type: text/csv\n")
print("Content-Disposition attachment; filename=%s" % filename)
log.info("200: text/csv")
print(string)
def image(image):
print("Content-Type: image/png\n")
print("Expires Thu, 15 Apr 2050 20:00:00 GMT")
log.info("200: image/png")
print(image)
def temp_image(image):
print("Content-Type: image/png\n")
print("Cache-Control no-cache")
log.info("200: image/png (temporary)")
print(image)
# def error(message):
# log.error("400: %s" % message)
# return web.BadRequest(message)
# def not_found(self):
# log.error("404: Page not found")
# return web.NotFound()
#
# def redirect(url):
# log.info("303: Redirecting to " + url)
# return web.SeeOther(url)
#
# def refresh(self):
# url = web.ctx.path
# log.info("303: Redirecting to " + url + " (refresh)")
# return web.SeeOther(url) | agpl-3.0 |
googleads/google-ads-python | google/ads/googleads/v8/errors/types/reach_plan_error.py | 1 | 1178 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.errors",
marshal="google.ads.googleads.v8",
manifest={"ReachPlanErrorEnum",},
)
class ReachPlanErrorEnum(proto.Message):
r"""Container for enum describing possible errors returned from
the ReachPlanService.
"""
class ReachPlanError(proto.Enum):
r"""Enum describing possible errors from ReachPlanService."""
UNSPECIFIED = 0
UNKNOWN = 1
NOT_FORECASTABLE_MISSING_RATE = 2
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
zengenti/ansible | lib/ansible/modules/network/cumulus/_cl_license.py | 11 | 4949 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <[email protected]>
#
# This file is part of Ansible
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['deprecated'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cl_license
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Install Cumulus Linux license
deprecated: Deprecated in 2.3.
description:
- Installs a Cumulus Linux license. The module reports no change of status
when a license is installed.
For more details go the Cumulus Linux License Documentation at
U(http://docs.cumulusnetwork.com) and the Licensing KB Site at
U(https://support.cumulusnetworks.com/hc/en-us/sections/200507688)
notes:
- To activate a license for the FIRST time, the switchd service must be
restarted. This action is disruptive. The license renewal process occurs
via the Cumulus Networks Customer Portal -
U(http://customers.cumulusnetworks.com).
- A non-EULA license is REQUIRED for automation. Manually install the
license on a test switch, using the command "cl-license -i <license_file>"
to confirm the license is a Non-EULA license.
See EXAMPLES, for the proper way to issue this notify action.
options:
src:
description:
- The full path to the license. Can be local path or HTTP URL.
required: true
force:
description:
- Force installation of a license. Typically not needed.
It is recommended to manually run this command via the ansible
command. A reload of switchd is not required. Running the force
option in a playbook will break the idempotent state machine of
the module and cause the switchd notification to kick in all the
time, causing a disruption.
choices:
- yes
- no
'''
EXAMPLES = '''
# Example playbook using the cl_license module to manage licenses on Cumulus Linux
- hosts: all
tasks:
- name: install license using http url
cl_license:
src: http://10.1.1.1/license.txt
notify: restart switchd
- name: Triggers switchd to be restarted right away, before play, or role
is over. This is desired behaviour
meta: flush_handlers
- name: Configure interfaces
template:
src: interfaces.j2
dest: /etc/network/interfaces
notify: restart networking
handlers:
- name: restart switchd
service:
name: switchd
state: restarted
- name: restart networking
service:
name: networking
state: reloaded
# Force all switches to accept a new license. Typically not needed
# ansible -m cl_license -a "src='http://10.1.1.1/new_lic' force=yes" -u root all
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
CL_LICENSE_PATH='/usr/cumulus/bin/cl-license'
def install_license(module):
# license is not installed, install it
_url = module.params.get('src')
(_rc, out, _err) = module.run_command("%s -i %s" % (CL_LICENSE_PATH, _url))
if _rc > 0:
module.fail_json(msg=_err)
def main():
module = AnsibleModule(
argument_spec=dict(
src=dict(required=True, type='str'),
force=dict(type='bool', choices=BOOLEANS,
default=False)
),
)
# check if license is installed
# if force is enabled then set return code to nonzero
if module.params.get('force') is True:
_rc = 10
else:
(_rc, out, _err) = module.run_command(CL_LICENSE_PATH)
if _rc == 0:
module.msg = "No change. License already installed"
module.changed = False
else:
install_license(module)
module.msg = "License installation completed"
module.changed = True
module.exit_json(changed=module.changed, msg=module.msg)
# import module snippets
from ansible.module_utils.basic import *
# from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 |
NirBenTalLab/proorigami-cde-package | cde-root/usr/lib64/python2.4/site-packages/numpy/ctypeslib.py | 2 | 12373 | """
============================
``ctypes`` Utility Functions
============================
See Also
---------
load_library : Load a C library.
ndpointer : Array restype/argtype with verification.
as_ctypes : Create a ctypes array from an ndarray.
as_array : Create an ndarray from a ctypes array.
References
----------
.. [1] "SciPy Cookbook: ctypes", http://www.scipy.org/Cookbook/Ctypes
Examples
--------
Load the C library:
>>> _lib = np.ctypeslib.load_library('libmystuff', '.') #DOCTEST: +ignore
Our result type, an ndarray that must be of type double, be 1-dimensional
and is C-contiguous in memory:
>>> array_1d_double = np.ctypeslib.ndpointer(
... dtype=np.double,
... ndim=1, flags='CONTIGUOUS') #DOCTEST: +ignore
Our C-function typically takes an array and updates its values
in-place. For example::
void foo_func(double* x, int length)
{
int i;
for (i = 0; i < length; i++) {
x[i] = i*i;
}
}
We wrap it using:
>>> lib.foo_func.restype = None #DOCTEST: +ignore
>>> lib.foo.argtypes = [array_1d_double, c_int] #DOCTEST: +ignore
Then, we're ready to call ``foo_func``:
>>> out = np.empty(15, dtype=np.double)
>>> _lib.foo_func(out, len(out)) #DOCTEST: +ignore
"""
__all__ = ['load_library', 'ndpointer', 'test', 'ctypes_load_library',
'c_intp', 'as_ctypes', 'as_array']
import sys, os
from numpy import integer, ndarray, dtype as _dtype, deprecate, array
from numpy.core.multiarray import _flagdict, flagsobj
try:
import ctypes
except ImportError:
ctypes = None
if ctypes is None:
def _dummy(*args, **kwds):
"""
Dummy object that raises an ImportError if ctypes is not available.
Raises
------
ImportError
If ctypes is not available.
"""
raise ImportError, "ctypes is not available."
ctypes_load_library = _dummy
load_library = _dummy
as_ctypes = _dummy
as_array = _dummy
from numpy import intp as c_intp
_ndptr_base = object
else:
import numpy.core._internal as nic
c_intp = nic._getintp_ctype()
del nic
_ndptr_base = ctypes.c_void_p
# Adapted from Albert Strasheim
def load_library(libname, loader_path):
if ctypes.__version__ < '1.0.1':
import warnings
warnings.warn("All features of ctypes interface may not work " \
"with ctypes < 1.0.1")
ext = os.path.splitext(libname)[1]
if not ext:
# Try to load library with platform-specific name, otherwise
# default to libname.[so|pyd]. Sometimes, these files are built
# erroneously on non-linux platforms.
libname_ext = ['%s.so' % libname, '%s.pyd' % libname]
if sys.platform == 'win32':
libname_ext.insert(0, '%s.dll' % libname)
elif sys.platform == 'darwin':
libname_ext.insert(0, '%s.dylib' % libname)
else:
libname_ext = [libname]
loader_path = os.path.abspath(loader_path)
if not os.path.isdir(loader_path):
libdir = os.path.dirname(loader_path)
else:
libdir = loader_path
for ln in libname_ext:
try:
libpath = os.path.join(libdir, ln)
return ctypes.cdll[libpath]
except OSError, e:
pass
raise e
ctypes_load_library = deprecate(load_library, 'ctypes_load_library',
'load_library')
def _num_fromflags(flaglist):
num = 0
for val in flaglist:
num += _flagdict[val]
return num
_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE',
'OWNDATA', 'UPDATEIFCOPY']
def _flags_fromnum(num):
res = []
for key in _flagnames:
value = _flagdict[key]
if (num & value):
res.append(key)
return res
class _ndptr(_ndptr_base):
def _check_retval_(self):
"""This method is called when this class is used as the .restype
asttribute for a shared-library function. It constructs a numpy
array from a void pointer."""
return array(self)
@property
def __array_interface__(self):
return {'descr': self._dtype_.descr,
'__ref': self,
'strides': None,
'shape': self._shape_,
'version': 3,
'typestr': self._dtype_.descr[0][1],
'data': (self.value, False),
}
@classmethod
def from_param(cls, obj):
if not isinstance(obj, ndarray):
raise TypeError, "argument must be an ndarray"
if cls._dtype_ is not None \
and obj.dtype != cls._dtype_:
raise TypeError, "array must have data type %s" % cls._dtype_
if cls._ndim_ is not None \
and obj.ndim != cls._ndim_:
raise TypeError, "array must have %d dimension(s)" % cls._ndim_
if cls._shape_ is not None \
and obj.shape != cls._shape_:
raise TypeError, "array must have shape %s" % str(cls._shape_)
if cls._flags_ is not None \
and ((obj.flags.num & cls._flags_) != cls._flags_):
raise TypeError, "array must have flags %s" % \
_flags_fromnum(cls._flags_)
return obj.ctypes
# Factory for an array-checking class with from_param defined for
# use with ctypes argtypes mechanism
_pointer_type_cache = {}
def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
"""
Array-checking restype/argtypes.
An ndpointer instance is used to describe an ndarray in restypes
and argtypes specifications. This approach is more flexible than
using, for example, ``POINTER(c_double)``, since several restrictions
can be specified, which are verified upon calling the ctypes function.
These include data type, number of dimensions, shape and flags. If a
given array does not satisfy the specified restrictions,
a ``TypeError`` is raised.
Parameters
----------
dtype : data-type, optional
Array data-type.
ndim : int, optional
Number of array dimensions.
shape : tuple of ints, optional
Array shape.
flags : str or tuple of str
Array flags; may be one or more of:
- C_CONTIGUOUS / C / CONTIGUOUS
- F_CONTIGUOUS / F / FORTRAN
- OWNDATA / O
- WRITEABLE / W
- ALIGNED / A
- UPDATEIFCOPY / U
Returns
-------
klass : ndpointer type object
A type object, which is an ``_ndtpr`` instance containing
dtype, ndim, shape and flags information.
Raises
------
TypeError
If a given array does not satisfy the specified restrictions.
Examples
--------
>>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64,
... ndim=1,
... flags='C_CONTIGUOUS')]
>>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64))
"""
if dtype is not None:
dtype = _dtype(dtype)
num = None
if flags is not None:
if isinstance(flags, str):
flags = flags.split(',')
elif isinstance(flags, (int, integer)):
num = flags
flags = _flags_fromnum(num)
elif isinstance(flags, flagsobj):
num = flags.num
flags = _flags_fromnum(num)
if num is None:
try:
flags = [x.strip().upper() for x in flags]
except:
raise TypeError, "invalid flags specification"
num = _num_fromflags(flags)
try:
return _pointer_type_cache[(dtype, ndim, shape, num)]
except KeyError:
pass
if dtype is None:
name = 'any'
elif dtype.names:
name = str(id(dtype))
else:
name = dtype.str
if ndim is not None:
name += "_%dd" % ndim
if shape is not None:
try:
strshape = [str(x) for x in shape]
except TypeError:
strshape = [str(shape)]
shape = (shape,)
shape = tuple(shape)
name += "_"+"x".join(strshape)
if flags is not None:
name += "_"+"_".join(flags)
else:
flags = []
klass = type("ndpointer_%s"%name, (_ndptr,),
{"_dtype_": dtype,
"_shape_" : shape,
"_ndim_" : ndim,
"_flags_" : num})
_pointer_type_cache[dtype] = klass
return klass
if ctypes is not None:
ct = ctypes
################################################################
# simple types
# maps the numpy typecodes like '<f8' to simple ctypes types like
# c_double. Filled in by prep_simple.
_typecodes = {}
def prep_simple(simple_type, typestr):
"""Given a ctypes simple type, construct and attach an
__array_interface__ property to it if it does not yet have one.
"""
try: simple_type.__array_interface__
except AttributeError: pass
else: return
_typecodes[typestr] = simple_type
def __array_interface__(self):
return {'descr': [('', typestr)],
'__ref': self,
'strides': None,
'shape': (),
'version': 3,
'typestr': typestr,
'data': (ct.addressof(self), False),
}
simple_type.__array_interface__ = property(__array_interface__)
if sys.byteorder == "little":
TYPESTR = "<%c%d"
else:
TYPESTR = ">%c%d"
simple_types = [
((ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong), "i"),
((ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong), "u"),
((ct.c_float, ct.c_double), "f"),
]
# Prep that numerical ctypes types:
for types, code in simple_types:
for tp in types:
prep_simple(tp, TYPESTR % (code, ct.sizeof(tp)))
################################################################
# array types
_ARRAY_TYPE = type(ct.c_int * 1)
def prep_array(array_type):
"""Given a ctypes array type, construct and attach an
__array_interface__ property to it if it does not yet have one.
"""
try: array_type.__array_interface__
except AttributeError: pass
else: return
shape = []
ob = array_type
while type(ob) == _ARRAY_TYPE:
shape.append(ob._length_)
ob = ob._type_
shape = tuple(shape)
ai = ob().__array_interface__
descr = ai['descr']
typestr = ai['typestr']
def __array_interface__(self):
return {'descr': descr,
'__ref': self,
'strides': None,
'shape': shape,
'version': 3,
'typestr': typestr,
'data': (ct.addressof(self), False),
}
array_type.__array_interface__ = property(__array_interface__)
################################################################
# public functions
def as_array(obj):
"""Create a numpy array from a ctypes array. The numpy array
shares the memory with the ctypes object."""
tp = type(obj)
try: tp.__array_interface__
except AttributeError: prep_array(tp)
return array(obj, copy=False)
def as_ctypes(obj):
"""Create and return a ctypes object from a numpy array. Actually
anything that exposes the __array_interface__ is accepted."""
ai = obj.__array_interface__
if ai["strides"]:
raise TypeError("strided arrays not supported")
if ai["version"] != 3:
raise TypeError("only __array_interface__ version 3 supported")
addr, readonly = ai["data"]
if readonly:
raise TypeError("readonly arrays unsupported")
tp = _typecodes[ai["typestr"]]
for dim in ai["shape"][::-1]:
tp = tp * dim
result = tp.from_address(addr)
result.__keep = ai
return result
| mit |
jackjennings/Mechanic | src/lib/site-packages/requests/packages/urllib3/util/ssl_.py | 4 | 12101 | from __future__ import absolute_import
import errno
import warnings
import hmac
from binascii import hexlify, unhexlify
from hashlib import md5, sha1, sha256
from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
SSLContext = None
HAS_SNI = False
IS_PYOPENSSL = False
# Maps the length of a digest to a possible hash function producing this digest
HASHFUNC_MAP = {
32: md5,
40: sha1,
64: sha256,
}
def _const_compare_digest_backport(a, b):
"""
Compare two digests of equal length in constant time.
The digests must be of type str/bytes.
Returns True if the digests match, and False otherwise.
"""
result = abs(len(a) - len(b))
for l, r in zip(bytearray(a), bytearray(b)):
result |= l ^ r
return result == 0
_const_compare_digest = getattr(hmac, 'compare_digest',
_const_compare_digest_backport)
try: # Test for SSL features
import ssl
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
try:
from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
except ImportError:
OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
OP_NO_COMPRESSION = 0x20000
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
# security,
# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
# - disable NULL authentication, MD5 MACs and DSS for security reasons.
DEFAULT_CIPHERS = ':'.join([
'ECDH+AESGCM',
'ECDH+CHACHA20',
'DH+AESGCM',
'DH+CHACHA20',
'ECDH+AES256',
'DH+AES256',
'ECDH+AES128',
'DH+AES',
'RSA+AESGCM',
'RSA+AES',
'!aNULL',
'!eNULL',
'!MD5',
])
try:
from ssl import SSLContext # Modern SSL?
except ImportError:
import sys
class SSLContext(object): # Platform-specific: Python 2 & 3.1
supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or
(3, 2) <= sys.version_info)
def __init__(self, protocol_version):
self.protocol = protocol_version
# Use default values from a real SSLContext
self.check_hostname = False
self.verify_mode = ssl.CERT_NONE
self.ca_certs = None
self.options = 0
self.certfile = None
self.keyfile = None
self.ciphers = None
def load_cert_chain(self, certfile, keyfile):
self.certfile = certfile
self.keyfile = keyfile
def load_verify_locations(self, cafile=None, capath=None):
self.ca_certs = cafile
if capath is not None:
raise SSLError("CA directories not supported in older Pythons")
def set_ciphers(self, cipher_suite):
if not self.supports_set_ciphers:
raise TypeError(
'Your version of Python does not support setting '
'a custom cipher suite. Please upgrade to Python '
'2.7, 3.2, or later if you need this functionality.'
)
self.ciphers = cipher_suite
def wrap_socket(self, socket, server_hostname=None, server_side=False):
warnings.warn(
'A true SSLContext object is not available. This prevents '
'urllib3 from configuring SSL appropriately and may cause '
'certain SSL connections to fail. You can upgrade to a newer '
'version of Python to solve this. For more information, see '
'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
'#ssl-warnings',
InsecurePlatformWarning
)
kwargs = {
'keyfile': self.keyfile,
'certfile': self.certfile,
'ca_certs': self.ca_certs,
'cert_reqs': self.verify_mode,
'ssl_version': self.protocol,
'server_side': server_side,
}
if self.supports_set_ciphers: # Platform-specific: Python 2.7+
return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
else: # Platform-specific: Python 2.6
return wrap_socket(socket, **kwargs)
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
fingerprint = fingerprint.replace(':', '').lower()
digest_length = len(fingerprint)
hashfunc = HASHFUNC_MAP.get(digest_length)
if not hashfunc:
raise SSLError(
'Fingerprint of invalid length: {0}'.format(fingerprint))
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
cert_digest = hashfunc(cert).digest()
if not _const_compare_digest(cert_digest, fingerprint_bytes):
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(fingerprint, hexlify(cert_digest)))
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
def create_urllib3_context(ssl_version=None, cert_reqs=None,
options=None, ciphers=None):
"""All arguments have the same meaning as ``ssl_wrap_socket``.
By default, this function does a lot of the same work that
``ssl.create_default_context`` does on Python 3.4+. It:
- Disables SSLv2, SSLv3, and compression
- Sets a restricted set of server ciphers
If you wish to enable SSLv3, you can do::
from urllib3.util import ssl_
context = ssl_.create_urllib3_context()
context.options &= ~ssl_.OP_NO_SSLv3
You can do the same to enable compression (substituting ``COMPRESSION``
for ``SSLv3`` in the last line above).
:param ssl_version:
The desired protocol version to use. This will default to
PROTOCOL_SSLv23 which will negotiate the highest protocol that both
the server and your installation of OpenSSL support.
:param cert_reqs:
Whether to require the certificate verification. This defaults to
``ssl.CERT_REQUIRED``.
:param options:
Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
:param ciphers:
Which cipher suites to allow the server to select.
:returns:
Constructed SSLContext object with specified options
:rtype: SSLContext
"""
context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
# Setting the default here, as we may have no ssl module on import
cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
options |= OP_NO_SSLv2
# SSLv3 has several problems and is now dangerous
options |= OP_NO_SSLv3
# Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (issue #309)
options |= OP_NO_COMPRESSION
context.options |= options
if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
context.set_ciphers(ciphers or DEFAULT_CIPHERS)
context.verify_mode = cert_reqs
if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
# We do our own verification, including fingerprints and alternative
# hostnames. So disable it here
context.check_hostname = False
return context
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None, ciphers=None, ssl_context=None,
ca_cert_dir=None):
"""
All arguments except for server_hostname, ssl_context, and ca_cert_dir have
the same meaning as they do when using :func:`ssl.wrap_socket`.
:param server_hostname:
When SNI is supported, the expected hostname of the certificate
:param ssl_context:
A pre-made :class:`SSLContext` object. If none is provided, one will
be created using :func:`create_urllib3_context`.
:param ciphers:
A string of ciphers we wish the client to support. This is not
supported on Python 2.6 as the ssl module does not support it.
:param ca_cert_dir:
A directory containing CA certificates in multiple separate files, as
supported by OpenSSL's -CApath flag or the capath argument to
SSLContext.load_verify_locations().
"""
context = ssl_context
if context is None:
# Note: This branch of code and all the variables in it are no longer
# used by urllib3 itself. We should consider deprecating and removing
# this code.
context = create_urllib3_context(ssl_version, cert_reqs,
ciphers=ciphers)
if ca_certs or ca_cert_dir:
print("|%s|" % ca_certs, "|%s|" % ca_cert_dir)
try:
context.load_verify_locations(ca_certs, ca_cert_dir)
except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
raise SSLError(e)
# Py33 raises FileNotFoundError which subclasses OSError
# These are not equivalent unless we check the errno attribute
except OSError as e: # Platform-specific: Python 3.3 and beyond
if e.errno == errno.ENOENT:
raise SSLError(e)
raise
elif getattr(context, 'load_default_certs', None) is not None:
# try to load OS default certs; works well on Windows (require Python3.4+)
context.load_default_certs()
if certfile:
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
warnings.warn(
'An HTTPS request has been made, but the SNI (Subject Name '
'Indication) extension to TLS is not available on this platform. '
'This may cause the server to present an incorrect TLS '
'certificate, which can cause validation failures. You can upgrade to '
'a newer version of Python to solve this. For more information, see '
'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
'#ssl-warnings',
SNIMissingWarning
)
return context.wrap_socket(sock)
| mit |
3dfxsoftware/cbss-addons | account_analytic_analysis/account_analytic_analysis.py | 1 | 46100 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from dateutil.relativedelta import relativedelta
import datetime
import logging
import time
import traceback
from openerp.osv import osv, fields
from openerp.osv.orm import intersect, except_orm
import openerp.tools
from openerp.tools.translate import _
from openerp.addons.decimal_precision import decimal_precision as dp
_logger = logging.getLogger(__name__)
class account_analytic_invoice_line(osv.osv):
_name = "account.analytic.invoice.line"
def _amount_line(self, cr, uid, ids, prop, unknow_none, unknow_dict, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = line.quantity * line.price_unit
if line.analytic_account_id.pricelist_id:
cur = line.analytic_account_id.pricelist_id.currency_id
res[line.id] = self.pool.get('res.currency').round(cr, uid, cur, res[line.id])
return res
_columns = {
'product_id': fields.many2one('product.product','Product',required=True),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'name': fields.text('Description', required=True),
'quantity': fields.float('Quantity', required=True),
'uom_id': fields.many2one('product.uom', 'Unit of Measure',required=True),
'price_unit': fields.float('Unit Price', required=True),
'price_subtotal': fields.function(_amount_line, string='Sub Total', type="float",digits_compute= dp.get_precision('Account')),
}
_defaults = {
'quantity' : 1,
}
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', partner_id=False, price_unit=False, pricelist_id=False, company_id=None, context=None):
context = context or {}
uom_obj = self.pool.get('product.uom')
company_id = company_id or False
context.update({'company_id': company_id, 'force_company': company_id, 'pricelist_id': pricelist_id})
if not product:
return {'value': {'price_unit': 0.0}, 'domain':{'product_uom':[]}}
if partner_id:
part = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
if part.lang:
context.update({'lang': part.lang})
result = {}
res = self.pool.get('product.product').browse(cr, uid, product, context=context)
result.update({'name': name or res.description or False,'uom_id': uom_id or res.uom_id.id or False, 'price_unit': price_unit or res.list_price or 0.0})
res_final = {'value':result}
if result['uom_id'] != res.uom_id.id:
selected_uom = uom_obj.browse(cr, uid, result['uom_id'], context=context)
new_price = uom_obj._compute_price(cr, uid, res.uom_id.id, res_final['value']['price_unit'], result['uom_id'])
res_final['value']['price_unit'] = new_price
return res_final
class account_analytic_account(osv.osv):
_name = "account.analytic.account"
_inherit = "account.analytic.account"
def _analysis_all(self, cr, uid, ids, fields, arg, context=None):
dp = 2
res = dict([(i, {}) for i in ids])
parent_ids = tuple(ids) #We don't want consolidation for each of these fields because those complex computation is resource-greedy.
accounts = self.browse(cr, uid, ids, context=context)
for f in fields:
if f == 'user_ids':
cr.execute('SELECT MAX(id) FROM res_users')
max_user = cr.fetchone()[0]
if parent_ids:
cr.execute('SELECT DISTINCT("user") FROM account_analytic_analysis_summary_user ' \
'WHERE account_id IN %s AND unit_amount <> 0.0', (parent_ids,))
result = cr.fetchall()
else:
result = []
for id in ids:
res[id][f] = [int((id * max_user) + x[0]) for x in result]
elif f == 'month_ids':
if parent_ids:
cr.execute('SELECT DISTINCT(month_id) FROM account_analytic_analysis_summary_month ' \
'WHERE account_id IN %s AND unit_amount <> 0.0', (parent_ids,))
result = cr.fetchall()
else:
result = []
for id in ids:
res[id][f] = [int(id * 1000000 + int(x[0])) for x in result]
elif f == 'last_worked_invoiced_date':
for id in ids:
res[id][f] = False
if parent_ids:
cr.execute("SELECT account_analytic_line.account_id, MAX(date) \
FROM account_analytic_line \
WHERE account_id IN %s \
AND invoice_id IS NOT NULL \
GROUP BY account_analytic_line.account_id;", (parent_ids,))
for account_id, sum in cr.fetchall():
if account_id not in res:
res[account_id] = {}
res[account_id][f] = sum
elif f == 'ca_to_invoice':
for id in ids:
res[id][f] = 0.0
res2 = {}
for account in accounts:
cr.execute("""
SELECT product_id, sum(amount), user_id, to_invoice, sum(unit_amount), product_uom_id, line.name
FROM account_analytic_line line
LEFT JOIN account_analytic_journal journal ON (journal.id = line.journal_id)
WHERE account_id = %s
AND journal.type != 'purchase'
AND invoice_id IS NULL
AND to_invoice IS NOT NULL
GROUP BY product_id, user_id, to_invoice, product_uom_id, line.name""", (account.id,))
res[account.id][f] = 0.0
for product_id, price, user_id, factor_id, qty, uom, line_name in cr.fetchall():
price = -price
if product_id:
price = self.pool.get('account.analytic.line')._get_invoice_price(cr, uid, account, product_id, user_id, qty, context)
factor = self.pool.get('hr_timesheet_invoice.factor').browse(cr, uid, factor_id, context=context)
res[account.id][f] += price * qty * (100-factor.factor or 0.0) / 100.0
# sum both result on account_id
for id in ids:
res[id][f] = round(res.get(id, {}).get(f, 0.0), dp) + round(res2.get(id, 0.0), 2)
elif f == 'last_invoice_date':
for id in ids:
res[id][f] = False
if parent_ids:
cr.execute ("SELECT account_analytic_line.account_id, \
DATE(MAX(account_invoice.date_invoice)) \
FROM account_analytic_line \
JOIN account_invoice \
ON account_analytic_line.invoice_id = account_invoice.id \
WHERE account_analytic_line.account_id IN %s \
AND account_analytic_line.invoice_id IS NOT NULL \
GROUP BY account_analytic_line.account_id",(parent_ids,))
for account_id, lid in cr.fetchall():
res[account_id][f] = lid
elif f == 'last_worked_date':
for id in ids:
res[id][f] = False
if parent_ids:
cr.execute("SELECT account_analytic_line.account_id, MAX(date) \
FROM account_analytic_line \
WHERE account_id IN %s \
AND invoice_id IS NULL \
GROUP BY account_analytic_line.account_id",(parent_ids,))
for account_id, lwd in cr.fetchall():
if account_id not in res:
res[account_id] = {}
res[account_id][f] = lwd
elif f == 'hours_qtt_non_invoiced':
for id in ids:
res[id][f] = 0.0
if parent_ids:
cr.execute("SELECT account_analytic_line.account_id, COALESCE(SUM(unit_amount), 0.0) \
FROM account_analytic_line \
JOIN account_analytic_journal \
ON account_analytic_line.journal_id = account_analytic_journal.id \
WHERE account_analytic_line.account_id IN %s \
AND account_analytic_journal.type='general' \
AND invoice_id IS NULL \
AND to_invoice IS NOT NULL \
GROUP BY account_analytic_line.account_id;",(parent_ids,))
for account_id, sua in cr.fetchall():
if account_id not in res:
res[account_id] = {}
res[account_id][f] = round(sua, dp)
for id in ids:
res[id][f] = round(res[id][f], dp)
elif f == 'hours_quantity':
for id in ids:
res[id][f] = 0.0
if parent_ids:
cr.execute("SELECT account_analytic_line.account_id, COALESCE(SUM(unit_amount), 0.0) \
FROM account_analytic_line \
JOIN account_analytic_journal \
ON account_analytic_line.journal_id = account_analytic_journal.id \
WHERE account_analytic_line.account_id IN %s \
AND account_analytic_journal.type='general' \
GROUP BY account_analytic_line.account_id",(parent_ids,))
ff = cr.fetchall()
for account_id, hq in ff:
if account_id not in res:
res[account_id] = {}
res[account_id][f] = round(hq, dp)
for id in ids:
res[id][f] = round(res[id][f], dp)
elif f == 'ca_theorical':
# TODO Take care of pricelist and purchase !
for id in ids:
res[id][f] = 0.0
# Warning
# This computation doesn't take care of pricelist !
# Just consider list_price
if parent_ids:
cr.execute("""SELECT account_analytic_line.account_id AS account_id, \
COALESCE(SUM((account_analytic_line.unit_amount * pt.list_price) \
- (account_analytic_line.unit_amount * pt.list_price \
* hr.factor)), 0.0) AS somme
FROM account_analytic_line \
LEFT JOIN account_analytic_journal \
ON (account_analytic_line.journal_id = account_analytic_journal.id) \
JOIN product_product pp \
ON (account_analytic_line.product_id = pp.id) \
JOIN product_template pt \
ON (pp.product_tmpl_id = pt.id) \
JOIN account_analytic_account a \
ON (a.id=account_analytic_line.account_id) \
JOIN hr_timesheet_invoice_factor hr \
ON (hr.id=a.to_invoice) \
WHERE account_analytic_line.account_id IN %s \
AND a.to_invoice IS NOT NULL \
AND account_analytic_journal.type IN ('purchase', 'general')
GROUP BY account_analytic_line.account_id""",(parent_ids,))
for account_id, sum in cr.fetchall():
res[account_id][f] = round(sum, dp)
return res
def _ca_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
res_final = {}
child_ids = tuple(ids) #We don't want consolidation for each of these fields because those complex computation is resource-greedy.
for i in child_ids:
res[i] = 0.0
if not child_ids:
return res
if child_ids:
#Search all invoice lines not in cancelled state that refer to this analytic account
inv_line_obj = self.pool.get("account.invoice.line")
inv_lines = inv_line_obj.search(cr, uid, ['&', ('account_analytic_id', 'in', child_ids), ('invoice_id.state', '!=', 'cancel')], context=context)
for line in inv_line_obj.browse(cr, uid, inv_lines, context=context):
res[line.account_analytic_id.id] += line.price_subtotal
for acc in self.browse(cr, uid, res.keys(), context=context):
res[acc.id] = res[acc.id] - (acc.timesheet_ca_invoiced or 0.0)
res_final = res
return res_final
def _total_cost_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
res_final = {}
child_ids = tuple(ids) #We don't want consolidation for each of these fields because those complex computation is resource-greedy.
for i in child_ids:
res[i] = 0.0
if not child_ids:
return res
if child_ids:
cr.execute("""SELECT account_analytic_line.account_id, COALESCE(SUM(amount), 0.0) \
FROM account_analytic_line \
JOIN account_analytic_journal \
ON account_analytic_line.journal_id = account_analytic_journal.id \
WHERE account_analytic_line.account_id IN %s \
AND amount<0 \
GROUP BY account_analytic_line.account_id""",(child_ids,))
for account_id, sum in cr.fetchall():
res[account_id] = round(sum,2)
res_final = res
return res_final
def _remaining_hours_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
if account.quantity_max != 0:
res[account.id] = account.quantity_max - account.hours_quantity
else:
res[account.id] = 0.0
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _remaining_hours_to_invoice_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = max(account.hours_qtt_est - account.timesheet_ca_invoiced, account.ca_to_invoice)
return res
def _hours_qtt_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = account.hours_quantity - account.hours_qtt_non_invoiced
if res[account.id] < 0:
res[account.id] = 0.0
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _revenue_per_hour_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
if account.hours_qtt_invoiced == 0:
res[account.id]=0.0
else:
res[account.id] = account.ca_invoiced / account.hours_qtt_invoiced
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _real_margin_rate_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
if account.ca_invoiced == 0:
res[account.id]=0.0
elif account.total_cost != 0.0:
res[account.id] = -(account.real_margin / account.total_cost) * 100
else:
res[account.id] = 0.0
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _fix_price_to_invoice_calc(self, cr, uid, ids, name, arg, context=None):
sale_obj = self.pool.get('sale.order')
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = 0.0
sale_ids = sale_obj.search(cr, uid, [('project_id','=', account.id), ('state', '=', 'manual')], context=context)
for sale in sale_obj.browse(cr, uid, sale_ids, context=context):
res[account.id] += sale.amount_untaxed
for invoice in sale.invoice_ids:
if invoice.state != 'cancel':
res[account.id] -= invoice.amount_untaxed
return res
def _timesheet_ca_invoiced_calc(self, cr, uid, ids, name, arg, context=None):
lines_obj = self.pool.get('account.analytic.line')
res = {}
inv_ids = []
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = 0.0
line_ids = lines_obj.search(cr, uid, [('account_id','=', account.id), ('invoice_id','!=',False), ('to_invoice','!=', False), ('journal_id.type', '=', 'general')], context=context)
for line in lines_obj.browse(cr, uid, line_ids, context=context):
if line.invoice_id not in inv_ids:
inv_ids.append(line.invoice_id)
res[account.id] += line.invoice_id.amount_untaxed
return res
def _remaining_ca_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = max(account.amount_max - account.ca_invoiced, account.fix_price_to_invoice)
return res
def _real_margin_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = account.ca_invoiced + account.total_cost
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _theorical_margin_calc(self, cr, uid, ids, name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
res[account.id] = account.ca_theorical + account.total_cost
for id in ids:
res[id] = round(res.get(id, 0.0),2)
return res
def _is_overdue_quantity(self, cr, uid, ids, fieldnames, args, context=None):
result = dict.fromkeys(ids, 0)
for record in self.browse(cr, uid, ids, context=context):
if record.quantity_max > 0.0:
result[record.id] = int(record.hours_quantity >= record.quantity_max)
else:
result[record.id] = 0
return result
def _get_analytic_account(self, cr, uid, ids, context=None):
result = set()
for line in self.pool.get('account.analytic.line').browse(cr, uid, ids, context=context):
result.add(line.account_id.id)
return list(result)
def _get_total_estimation(self, account):
tot_est = 0.0
if account.fix_price_invoices:
tot_est += account.amount_max
if account.invoice_on_timesheets:
tot_est += account.hours_qtt_est
return tot_est
def _get_total_invoiced(self, account):
total_invoiced = 0.0
if account.fix_price_invoices:
total_invoiced += account.ca_invoiced
if account.invoice_on_timesheets:
total_invoiced += account.timesheet_ca_invoiced
return total_invoiced
def _get_total_remaining(self, account):
total_remaining = 0.0
if account.fix_price_invoices:
total_remaining += account.remaining_ca
if account.invoice_on_timesheets:
total_remaining += account.remaining_hours_to_invoice
return total_remaining
def _get_total_toinvoice(self, account):
total_toinvoice = 0.0
if account.fix_price_invoices:
total_toinvoice += account.fix_price_to_invoice
if account.invoice_on_timesheets:
total_toinvoice += account.ca_to_invoice
return total_toinvoice
def _sum_of_fields(self, cr, uid, ids, name, arg, context=None):
res = dict([(i, {}) for i in ids])
for account in self.browse(cr, uid, ids, context=context):
res[account.id]['est_total'] = self._get_total_estimation(account)
res[account.id]['invoiced_total'] = self._get_total_invoiced(account)
res[account.id]['remaining_total'] = self._get_total_remaining(account)
res[account.id]['toinvoice_total'] = self._get_total_toinvoice(account)
return res
_columns = {
'is_overdue_quantity' : fields.function(_is_overdue_quantity, method=True, type='boolean', string='Overdue Quantity',
store={
'account.analytic.line' : (_get_analytic_account, None, 20),
'account.analytic.account': (lambda self, cr, uid, ids, c=None: ids, ['quantity_max'], 10),
}),
'ca_invoiced': fields.function(_ca_invoiced_calc, type='float', string='Invoiced Amount',
help="Total customer invoiced amount for this account.",
digits_compute=dp.get_precision('Account')),
'total_cost': fields.function(_total_cost_calc, type='float', string='Total Costs',
help="Total of costs for this account. It includes real costs (from invoices) and indirect costs, like time spent on timesheets.",
digits_compute=dp.get_precision('Account')),
'ca_to_invoice': fields.function(_analysis_all, multi='analytic_analysis', type='float', string='Uninvoiced Amount',
help="If invoice from analytic account, the remaining amount you can invoice to the customer based on the total costs.",
digits_compute=dp.get_precision('Account')),
'ca_theorical': fields.function(_analysis_all, multi='analytic_analysis', type='float', string='Theoretical Revenue',
help="Based on the costs you had on the project, what would have been the revenue if all these costs have been invoiced at the normal sale price provided by the pricelist.",
digits_compute=dp.get_precision('Account')),
'hours_quantity': fields.function(_analysis_all, multi='analytic_analysis', type='float', string='Total Worked Time',
help="Number of time you spent on the analytic account (from timesheet). It computes quantities on all journal of type 'general'."),
'last_invoice_date': fields.function(_analysis_all, multi='analytic_analysis', type='date', string='Last Invoice Date',
help="If invoice from the costs, this is the date of the latest invoiced."),
'last_worked_invoiced_date': fields.function(_analysis_all, multi='analytic_analysis', type='date', string='Date of Last Invoiced Cost',
help="If invoice from the costs, this is the date of the latest work or cost that have been invoiced."),
'last_worked_date': fields.function(_analysis_all, multi='analytic_analysis', type='date', string='Date of Last Cost/Work',
help="Date of the latest work done on this account."),
'hours_qtt_non_invoiced': fields.function(_analysis_all, multi='analytic_analysis', type='float', string='Uninvoiced Time',
help="Number of time (hours/days) (from journal of type 'general') that can be invoiced if you invoice based on analytic account."),
'hours_qtt_invoiced': fields.function(_hours_qtt_invoiced_calc, type='float', string='Invoiced Time',
help="Number of time (hours/days) that can be invoiced plus those that already have been invoiced."),
'remaining_hours': fields.function(_remaining_hours_calc, type='float', string='Remaining Time',
help="Computed using the formula: Maximum Time - Total Worked Time"),
'remaining_hours_to_invoice': fields.function(_remaining_hours_to_invoice_calc, type='float', string='Remaining Time',
help="Computed using the formula: Expected on timesheets - Total invoiced on timesheets"),
'fix_price_to_invoice': fields.function(_fix_price_to_invoice_calc, type='float', string='Remaining Time',
help="Sum of quotations for this contract."),
'timesheet_ca_invoiced': fields.function(_timesheet_ca_invoiced_calc, type='float', string='Remaining Time',
help="Sum of timesheet lines invoiced for this contract."),
'remaining_ca': fields.function(_remaining_ca_calc, type='float', string='Remaining Revenue',
help="Computed using the formula: Max Invoice Price - Invoiced Amount.",
digits_compute=dp.get_precision('Account')),
'revenue_per_hour': fields.function(_revenue_per_hour_calc, type='float', string='Revenue per Time (real)',
help="Computed using the formula: Invoiced Amount / Total Time",
digits_compute=dp.get_precision('Account')),
'real_margin': fields.function(_real_margin_calc, type='float', string='Real Margin',
help="Computed using the formula: Invoiced Amount - Total Costs.",
digits_compute=dp.get_precision('Account')),
'theorical_margin': fields.function(_theorical_margin_calc, type='float', string='Theoretical Margin',
help="Computed using the formula: Theoretical Revenue - Total Costs",
digits_compute=dp.get_precision('Account')),
'real_margin_rate': fields.function(_real_margin_rate_calc, type='float', string='Real Margin Rate (%)',
help="Computes using the formula: (Real Margin / Total Costs) * 100.",
digits_compute=dp.get_precision('Account')),
'fix_price_invoices' : fields.boolean('Fixed Price'),
'invoice_on_timesheets' : fields.boolean("On Timesheets"),
'month_ids': fields.function(_analysis_all, multi='analytic_analysis', type='many2many', relation='account_analytic_analysis.summary.month', string='Month'),
'user_ids': fields.function(_analysis_all, multi='analytic_analysis', type="many2many", relation='account_analytic_analysis.summary.user', string='User'),
'hours_qtt_est': fields.float('Estimation of Hours to Invoice'),
'est_total' : fields.function(_sum_of_fields, type="float",multi="sum_of_all", string="Total Estimation"),
'invoiced_total' : fields.function(_sum_of_fields, type="float",multi="sum_of_all", string="Total Invoiced"),
'remaining_total' : fields.function(_sum_of_fields, type="float",multi="sum_of_all", string="Total Remaining", help="Expectation of remaining income for this contract. Computed as the sum of remaining subtotals which, in turn, are computed as the maximum between '(Estimation - Invoiced)' and 'To Invoice' amounts"),
'toinvoice_total' : fields.function(_sum_of_fields, type="float",multi="sum_of_all", string="Total to Invoice", help=" Sum of everything that could be invoiced for this contract."),
'recurring_invoice_line_ids': fields.one2many('account.analytic.invoice.line', 'analytic_account_id', 'Invoice Lines'),
'recurring_invoices' : fields.boolean('Generate recurring invoices automatically'),
'recurring_rule_type': fields.selection([
('daily', 'Day(s)'),
('weekly', 'Week(s)'),
('monthly', 'Month(s)'),
('yearly', 'Year(s)'),
], 'Recurrency', help="Invoice automatically repeat at specified interval"),
'recurring_interval': fields.integer('Repeat Every', help="Repeat every (Days/Week/Month/Year)"),
'recurring_next_date': fields.date('Date of Next Invoice'),
}
_defaults = {
'recurring_interval': 1,
'recurring_next_date': lambda *a: time.strftime('%Y-%m-%d'),
'recurring_rule_type':'monthly'
}
def open_sale_order_lines(self,cr,uid,ids,context=None):
if context is None:
context = {}
sale_ids = self.pool.get('sale.order').search(cr,uid,[('project_id','=',context.get('search_default_project_id',False)),('partner_id','in',context.get('search_default_partner_id',False))])
names = [record.name for record in self.browse(cr, uid, ids, context=context)]
name = _('Sales Order Lines to Invoice of %s') % ','.join(names)
return {
'type': 'ir.actions.act_window',
'name': name,
'view_type': 'form',
'view_mode': 'tree,form',
'context': context,
'domain' : [('order_id','in',sale_ids)],
'res_model': 'sale.order.line',
'nodestroy': True,
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, fix_price_invoices=False, invoice_on_timesheets=False, recurring_invoices=False, context=None):
if not template_id:
return {}
obj_analytic_line = self.pool.get('account.analytic.invoice.line')
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
template = self.browse(cr, uid, template_id, context=context)
if not fix_price_invoices:
res['value']['fix_price_invoices'] = template.fix_price_invoices
res['value']['amount_max'] = template.amount_max
if not invoice_on_timesheets:
res['value']['invoice_on_timesheets'] = template.invoice_on_timesheets
res['value']['hours_qtt_est'] = template.hours_qtt_est
if template.to_invoice.id:
res['value']['to_invoice'] = template.to_invoice.id
if template.pricelist_id.id:
res['value']['pricelist_id'] = template.pricelist_id.id
if not recurring_invoices:
invoice_line_ids = []
for x in template.recurring_invoice_line_ids:
invoice_line_ids.append((0, 0, {
'product_id': x.product_id.id,
'uom_id': x.uom_id.id,
'name': x.name,
'quantity': x.quantity,
'price_unit': x.price_unit,
'analytic_account_id': x.analytic_account_id and x.analytic_account_id.id or False,
}))
res['value']['recurring_invoices'] = template.recurring_invoices
res['value']['recurring_interval'] = template.recurring_interval
res['value']['recurring_rule_type'] = template.recurring_rule_type
res['value']['recurring_invoice_line_ids'] = invoice_line_ids
return res
def onchange_recurring_invoices(self, cr, uid, ids, recurring_invoices, date_start=False, context=None):
value = {}
if date_start and recurring_invoices:
value = {'value': {'recurring_next_date': date_start}}
return value
def cron_account_analytic_account(self, cr, uid, context=None):
if context is None:
context = {}
remind = {}
def fill_remind(key, domain, write_pending=False):
base_domain = [
('type', '=', 'contract'),
('partner_id', '!=', False),
('manager_id', '!=', False),
('manager_id.email', '!=', False),
]
base_domain.extend(domain)
accounts_ids = self.search(cr, uid, base_domain, context=context, order='name asc')
accounts = self.browse(cr, uid, accounts_ids, context=context)
for account in accounts:
if write_pending:
account.write({'state' : 'pending'}, context=context)
remind_user = remind.setdefault(account.manager_id.id, {})
remind_type = remind_user.setdefault(key, {})
remind_partner = remind_type.setdefault(account.partner_id, []).append(account)
# Already expired
fill_remind("old", [('state', 'in', ['pending'])])
# Expires now
fill_remind("new", [('state', 'in', ['draft', 'open']), '|', '&', ('date', '!=', False), ('date', '<=', time.strftime('%Y-%m-%d')), ('is_overdue_quantity', '=', True)], True)
# Expires in less than 30 days
fill_remind("future", [('state', 'in', ['draft', 'open']), ('date', '!=', False), ('date', '<', (datetime.datetime.now() + datetime.timedelta(30)).strftime("%Y-%m-%d"))])
context['base_url'] = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
context['action_id'] = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account_analytic_analysis', 'action_account_analytic_overdue_all')[1]
template_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account_analytic_analysis', 'account_analytic_cron_email_template')[1]
for user_id, data in remind.items():
context["data"] = data
_logger.debug("Sending reminder to uid %s", user_id)
self.pool.get('email.template').send_mail(cr, uid, template_id, user_id, force_send=True, context=context)
return True
def onchange_invoice_on_timesheets(self, cr, uid, ids, invoice_on_timesheets, context=None):
if not invoice_on_timesheets:
return {'value': {'to_invoice': False}}
result = {'value': {'use_timesheets': True}}
try:
to_invoice = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'hr_timesheet_invoice', 'timesheet_invoice_factor1')
result['value']['to_invoice'] = to_invoice[1]
except ValueError:
pass
return result
def hr_to_invoice_timesheets(self, cr, uid, ids, context=None):
domain = [('invoice_id','=',False),('to_invoice','!=',False), ('journal_id.type', '=', 'general'), ('account_id', 'in', ids)]
names = [record.name for record in self.browse(cr, uid, ids, context=context)]
name = _('Timesheets to Invoice of %s') % ','.join(names)
return {
'type': 'ir.actions.act_window',
'name': name,
'view_type': 'form',
'view_mode': 'tree,form',
'domain' : domain,
'res_model': 'account.analytic.line',
'nodestroy': True,
}
def _prepare_invoice(self, cr, uid, contract, context=None):
context = context or {}
inv_obj = self.pool.get('account.invoice')
journal_obj = self.pool.get('account.journal')
fpos_obj = self.pool.get('account.fiscal.position')
if not contract.partner_id:
raise osv.except_osv(_('No Customer Defined!'),_("You must first select a Customer for Contract %s!") % contract.name )
fpos = contract.partner_id.property_account_position or False
journal_ids = journal_obj.search(cr, uid, [('type', '=','sale'),('company_id', '=', contract.company_id.id or False)], limit=1)
if not journal_ids:
raise osv.except_osv(_('Error!'),
_('Please define a sale journal for the company "%s".') % (contract.company_id.name or '', ))
partner_payment_term = contract.partner_id.property_payment_term and contract.partner_id.property_payment_term.id or False
currency_id = False
if contract.pricelist_id:
currency_id = contract.pricelist_id.currency_id.id
elif contract.partner_id.property_product_pricelist:
currency_id = contract.partner_id.property_product_pricelist.currency_id.id
elif contract.company_id:
currency_id = contract.company_id.currency_id.id
inv_data = {
'reference': contract.code or False,
'account_id': contract.partner_id.property_account_receivable.id,
'type': 'out_invoice',
'partner_id': contract.partner_id.id,
'currency_id': currency_id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'date_invoice': contract.recurring_next_date,
'origin': contract.name,
'fiscal_position': fpos and fpos.id,
'payment_term': partner_payment_term,
'company_id': contract.company_id.id or False,
}
invoice_id = inv_obj.create(cr, uid, inv_data, context=context)
for line in contract.recurring_invoice_line_ids:
res = line.product_id
account_id = res.property_account_income.id
if not account_id:
account_id = res.categ_id.property_account_income_categ.id
account_id = fpos_obj.map_account(cr, uid, fpos, account_id)
taxes = res.taxes_id or False
tax_id = fpos_obj.map_tax(cr, uid, fpos, taxes)
invoice_line_vals = {
'name': line.name,
'account_id': account_id,
'account_analytic_id': contract.id,
'price_unit': line.price_unit or 0.0,
'quantity': line.quantity,
'uos_id': line.uom_id.id or False,
'product_id': line.product_id.id or False,
'invoice_id' : invoice_id,
'invoice_line_tax_id': [(6, 0, tax_id)],
}
self.pool.get('account.invoice.line').create(cr, uid, invoice_line_vals, context=context)
inv_obj.button_compute(cr, uid, [invoice_id], context=context)
return invoice_id
def recurring_create_invoice(self, cr, uid, ids, context=None):
return self._recurring_create_invoice(cr, uid, ids, context=context)
def _cron_recurring_create_invoice(self, cr, uid, context=None):
return self._recurring_create_invoice(cr, uid, [], automatic=True, context=context)
def _recurring_create_invoice(self, cr, uid, ids, automatic=False, context=None):
context = context or {}
current_date = time.strftime('%Y-%m-%d')
if ids:
contract_ids = ids
else:
contract_ids = self.search(cr, uid, [('recurring_next_date','<=', current_date), ('state','=', 'open'), ('recurring_invoices','=', True), ('type', '=', 'contract')])
for contract in self.browse(cr, uid, contract_ids, context=context):
try:
invoice_id = self._prepare_invoice(cr, uid, contract, context=context)
next_date = datetime.datetime.strptime(contract.recurring_next_date or current_date, "%Y-%m-%d")
interval = contract.recurring_interval
if contract.recurring_rule_type == 'daily':
new_date = next_date+relativedelta(days=+interval)
elif contract.recurring_rule_type == 'weekly':
new_date = next_date+relativedelta(weeks=+interval)
else:
new_date = next_date+relativedelta(months=+interval)
self.write(cr, uid, [contract.id], {'recurring_next_date': new_date.strftime('%Y-%m-%d')}, context=context)
if automatic:
cr.commit()
except Exception:
if automatic:
cr.rollback()
_logger.error(traceback.format_exc())
else:
raise
return True
class account_analytic_account_summary_user(osv.osv):
_name = "account_analytic_analysis.summary.user"
_description = "Hours Summary by User"
_order='user'
_auto = False
_rec_name = 'user'
def _unit_amount(self, cr, uid, ids, name, arg, context=None):
res = {}
account_obj = self.pool.get('account.analytic.account')
cr.execute('SELECT MAX(id) FROM res_users')
max_user = cr.fetchone()[0]
account_ids = [int(str(x/max_user - (x%max_user == 0 and 1 or 0))) for x in ids]
user_ids = [int(str(x-((x/max_user - (x%max_user == 0 and 1 or 0)) *max_user))) for x in ids]
parent_ids = tuple(account_ids) #We don't want consolidation for each of these fields because those complex computation is resource-greedy.
if parent_ids:
cr.execute('SELECT id, unit_amount ' \
'FROM account_analytic_analysis_summary_user ' \
'WHERE account_id IN %s ' \
'AND "user" IN %s',(parent_ids, tuple(user_ids),))
for sum_id, unit_amount in cr.fetchall():
res[sum_id] = unit_amount
for id in ids:
res[id] = round(res.get(id, 0.0), 2)
return res
_columns = {
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'unit_amount': fields.float('Total Time'),
'user': fields.many2one('res.users', 'User'),
}
def init(self, cr):
openerp.tools.sql.drop_view_if_exists(cr, 'account_analytic_analysis_summary_user')
cr.execute('''CREATE OR REPLACE VIEW account_analytic_analysis_summary_user AS (
with mu as
(select max(id) as max_user from res_users)
, lu AS
(SELECT
l.account_id AS account_id,
coalesce(l.user_id, 0) AS user_id,
SUM(l.unit_amount) AS unit_amount
FROM account_analytic_line AS l,
account_analytic_journal AS j
WHERE (j.type = 'general' ) and (j.id=l.journal_id)
GROUP BY l.account_id, l.user_id
)
select (lu.account_id * mu.max_user) + lu.user_id as id,
lu.account_id as account_id,
lu.user_id as "user",
unit_amount
from lu, mu)''')
class account_analytic_account_summary_month(osv.osv):
_name = "account_analytic_analysis.summary.month"
_description = "Hours summary by month"
_auto = False
_rec_name = 'month'
_columns = {
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'unit_amount': fields.float('Total Time'),
'month': fields.char('Month', size=32, readonly=True),
}
def init(self, cr):
openerp.tools.sql.drop_view_if_exists(cr, 'account_analytic_analysis_summary_month')
cr.execute('CREATE VIEW account_analytic_analysis_summary_month AS (' \
'SELECT ' \
'(TO_NUMBER(TO_CHAR(d.month, \'YYYYMM\'), \'999999\') + (d.account_id * 1000000::bigint))::bigint AS id, ' \
'd.account_id AS account_id, ' \
'TO_CHAR(d.month, \'Mon YYYY\') AS month, ' \
'TO_NUMBER(TO_CHAR(d.month, \'YYYYMM\'), \'999999\') AS month_id, ' \
'COALESCE(SUM(l.unit_amount), 0.0) AS unit_amount ' \
'FROM ' \
'(SELECT ' \
'd2.account_id, ' \
'd2.month ' \
'FROM ' \
'(SELECT ' \
'a.id AS account_id, ' \
'l.month AS month ' \
'FROM ' \
'(SELECT ' \
'DATE_TRUNC(\'month\', l.date) AS month ' \
'FROM account_analytic_line AS l, ' \
'account_analytic_journal AS j ' \
'WHERE j.type = \'general\' ' \
'GROUP BY DATE_TRUNC(\'month\', l.date) ' \
') AS l, ' \
'account_analytic_account AS a ' \
'GROUP BY l.month, a.id ' \
') AS d2 ' \
'GROUP BY d2.account_id, d2.month ' \
') AS d ' \
'LEFT JOIN ' \
'(SELECT ' \
'l.account_id AS account_id, ' \
'DATE_TRUNC(\'month\', l.date) AS month, ' \
'SUM(l.unit_amount) AS unit_amount ' \
'FROM account_analytic_line AS l, ' \
'account_analytic_journal AS j ' \
'WHERE (j.type = \'general\') and (j.id=l.journal_id) ' \
'GROUP BY l.account_id, DATE_TRUNC(\'month\', l.date) ' \
') AS l '
'ON (' \
'd.account_id = l.account_id ' \
'AND d.month = l.month' \
') ' \
'GROUP BY d.month, d.account_id ' \
')')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| gpl-2.0 |
mhrivnak/pulp | server/test/unit/plugins/test_loader.py | 15 | 11811 | import atexit
import os
import shutil
import string
import sys
import tempfile
import traceback
import mock
from .. import base
from pulp.plugins.cataloger import Cataloger
from pulp.plugins.distributor import Distributor
from pulp.plugins.importer import Importer
from pulp.plugins.loader import exceptions, loading, manager
_generated_paths = []
def _delete_generated_paths():
for p in _generated_paths:
if p in sys.path:
sys.path.remove(p)
shutil.rmtree(p)
atexit.register(_delete_generated_paths)
# test file(s) generation
def gen_plugin_root():
path = tempfile.mkdtemp()
sys.path.insert(0, path)
_generated_paths.append(path)
return path
_PLUGIN_TEMPLATE = string.Template('''
from pulp.plugins.$BASE_NAME import $BASE_TITLE
class $PLUGIN_TITLE($BASE_TITLE):
@classmethod
def metadata(cls):
data = {'id': '$PLUGIN_NAME',
'types': $TYPE_LIST}
return data
''')
_MULTI_PLUGIN_TEMPLATE = string.Template('''
from pulp.plugins.$BASE_NAME import $BASE_TITLE
class Plugin1($BASE_TITLE):
@classmethod
def metadata(cls):
data = {'id': 'plugin1',
'types': $TYPE_LIST}
return data
class Plugin2($BASE_TITLE):
@classmethod
def metadata(cls):
data = {'id': 'plugin2',
'types': $TYPE_LIST}
return data
class Plugin3($BASE_TITLE):
@classmethod
def metadata(cls):
data = {'id': 'plugin3',
'types': $TYPE_LIST}
return data
# Should not be loaded as a valid plugin since it starts with _
class _BasePlugin($BASE_TITLE):
@classmethod
def metadata(cls):
data = {'id': 'base_plugin',
'types': $TYPE_LIST}
return data
''')
_CONF_TEMPLATE = string.Template('''
{"enabled": $ENABLED}
''')
_INVALID_CONF_TEMPLATE = string.Template('Not real JSON')
def gen_plugin(root, type_, name, types, enabled=True, conf_template=_CONF_TEMPLATE):
base_name = type_.lower()
base_title = type_.title()
plugin_name = name.lower()
plugin_title = name
type_list = '[%s]' % ', '.join('\'%s\'' % t for t in types)
# create the directory
plugin_dir = os.path.join(root, '%ss' % base_name, plugin_name)
os.makedirs(plugin_dir)
# write the package module
pck_name = os.path.join(plugin_dir, '__init__.py')
handle = open(pck_name, 'w')
handle.write('\n')
handle.close()
# write the plugin module
contents = _PLUGIN_TEMPLATE.safe_substitute({'BASE_NAME': base_name,
'BASE_TITLE': base_title,
'PLUGIN_TITLE': plugin_title,
'PLUGIN_NAME': plugin_name,
'TYPE_LIST': type_list})
mod_name = os.path.join(plugin_dir, '%s.py' % base_name)
handle = open(mod_name, 'w')
handle.write(contents)
handle.close()
# write plugin config
contents = conf_template.safe_substitute({'ENABLED': str(enabled).lower()})
cfg_name = os.path.join(plugin_dir, '%s.conf' % plugin_name)
handle = open(cfg_name, 'w')
handle.write(contents)
handle.close()
# return the top level directory
return os.path.join(root, '%ss' % base_name)
def gen_multi_plugin(root, type_, name, types, enabled=True):
base_name = type_.lower()
base_title = type_.title()
plugin_name = name.lower()
type_list = '[%s]' % ', '.join('\'%s\'' % t for t in types)
# create the directory
plugin_dir = os.path.join(root, '%ss' % base_name, plugin_name)
os.makedirs(plugin_dir)
# write the package module
pck_name = os.path.join(plugin_dir, '__init__.py')
handle = open(pck_name, 'w')
handle.write('\n')
handle.close()
# write the plugin module
contents = _MULTI_PLUGIN_TEMPLATE.safe_substitute(
{'BASE_NAME': base_name, 'BASE_TITLE': base_title, 'TYPE_LIST': type_list})
mod_name = os.path.join(plugin_dir, '%s.py' % base_name)
handle = open(mod_name, 'w')
handle.write(contents)
handle.close()
# write plugin config
contents = _CONF_TEMPLATE.safe_substitute({'ENABLED': str(enabled).lower()})
cfg_name = os.path.join(plugin_dir, '%s.conf' % plugin_name)
handle = open(cfg_name, 'w')
handle.write(contents)
handle.close()
# return the top level directory
return os.path.join(root, '%ss' % base_name)
# test classes
class WebDistributor(Distributor):
@classmethod
def metadata(cls):
return {'types': ['http', 'https']}
class ExcellentImporter(Importer):
@classmethod
def metadata(cls):
return {'types': ['excellent_type']}
class BogusImporter(Importer):
@classmethod
def metadata(cls):
return {'types': ['excellent_type']}
class GoodProfiler(Importer):
@classmethod
def metadata(cls):
return {'types': ['good_type']}
class TestCataloger(Cataloger):
@classmethod
def metadata(cls):
return {'types': ['good_type']}
class PluginMapTests(base.PulpServerTests):
def setUp(self):
super(PluginMapTests, self).setUp()
self.plugin_map = manager._PluginMap()
def test_add_plugin(self):
name = 'excellent'
types = ExcellentImporter.metadata()['types']
self.plugin_map.add_plugin(name, ExcellentImporter, {}, types)
self.assertTrue(name in self.plugin_map.configs)
self.assertTrue(name in self.plugin_map.plugins)
def test_add_disabled(self):
name = 'disabled'
cfg = {'enabled': False}
self.plugin_map.add_plugin(name, BogusImporter, cfg)
self.assertFalse(name in self.plugin_map.configs)
self.assertFalse(name in self.plugin_map.plugins)
self.assertFalse(name in self.plugin_map.types)
def test_conflicting_names(self):
name = 'less_excellent'
types = ExcellentImporter.metadata()['types']
self.plugin_map.add_plugin(name, ExcellentImporter, {}, types)
self.assertRaises(exceptions.ConflictingPluginName,
self.plugin_map.add_plugin,
name, BogusImporter, {}, types)
def test_get_plugin_by_name(self):
name = 'excellent'
self.plugin_map.add_plugin(name, ExcellentImporter, {})
cls = self.plugin_map.get_plugin_by_id(name)[0]
self.assertTrue(cls is ExcellentImporter)
def test_get_plugin_by_type(self):
types = ExcellentImporter.metadata()['types']
self.plugin_map.add_plugin('excellent', ExcellentImporter, {}, types)
id = self.plugin_map.get_plugin_ids_by_type(types[0])[0]
self.assertEqual(id, 'excellent')
def test_name_not_found(self):
self.assertRaises(exceptions.PluginNotFound,
self.plugin_map.get_plugin_by_id,
'bogus')
def test_type_not_found(self):
self.assertRaises(exceptions.PluginNotFound,
self.plugin_map.get_plugin_ids_by_type,
'bogus_type')
def test_remove_plugin(self):
name = 'excellent'
self.plugin_map.add_plugin(name, ExcellentImporter, {})
self.assertTrue(name in self.plugin_map.plugins)
self.plugin_map.remove_plugin(name)
self.assertFalse(name in self.plugin_map.plugins)
class LoaderInstanceTest(base.PulpServerTests):
def test_loader_instantiation(self):
try:
manager.PluginManager()
except Exception, e:
self.fail('\n'.join((repr(e), traceback.format_exc())))
class LoaderTest(base.PulpServerTests):
def setUp(self):
super(LoaderTest, self).setUp()
self.loader = manager.PluginManager()
def tearDown(self):
super(LoaderTest, self).tearDown()
self.loader = None
class LoaderDirectOperationsTests(LoaderTest):
def test_distributor(self):
name = 'spidey'
types = WebDistributor.metadata()['types']
self.loader.distributors.add_plugin(name, WebDistributor, {}, types)
cls = self.loader.distributors.get_plugin_by_id(name)[0]
self.assertTrue(cls is WebDistributor)
cls = self.loader.distributors.get_plugins_by_type(types[0])[0][0]
self.assertTrue(cls is WebDistributor)
cls = self.loader.distributors.get_plugins_by_type(types[1])[0][0]
self.assertTrue(cls is WebDistributor)
distributors = self.loader.distributors.get_loaded_plugins()
self.assertTrue(name in distributors)
self.loader.distributors.remove_plugin(name)
self.assertRaises(exceptions.PluginNotFound,
self.loader.distributors.get_plugin_by_id,
name)
def test_importer(self):
name = 'bill'
types = ExcellentImporter.metadata()['types']
self.loader.importers.add_plugin(name, ExcellentImporter, {}, types)
cls = self.loader.importers.get_plugin_by_id(name)[0]
self.assertTrue(cls is ExcellentImporter)
cls = self.loader.importers.get_plugins_by_type(types[0])[0][0]
self.assertTrue(cls is ExcellentImporter)
importers = self.loader.importers.get_loaded_plugins()
self.assertTrue(name in importers)
self.loader.importers.remove_plugin(name)
self.assertRaises(exceptions.PluginNotFound,
self.loader.importers.get_plugin_by_id,
name)
def test_profiler(self):
name = 'elmer'
types = GoodProfiler.metadata()['types']
self.loader.profilers.add_plugin(name, GoodProfiler, {}, types)
cls = self.loader.profilers.get_plugin_by_id(name)[0]
self.assertTrue(cls is GoodProfiler)
cls = self.loader.profilers.get_plugins_by_type(types[0])[0][0]
self.assertTrue(cls is GoodProfiler)
profilers = self.loader.profilers.get_loaded_plugins()
self.assertTrue(name in profilers)
self.loader.profilers.remove_plugin(name)
self.assertRaises(exceptions.PluginNotFound,
self.loader.profilers.get_plugin_by_id,
name)
def test_cataloger(self):
name = 'elmer'
types = TestCataloger.metadata()['types']
self.loader.catalogers.add_plugin(name, TestCataloger, {}, types)
cls = self.loader.catalogers.get_plugin_by_id(name)[0]
self.assertTrue(cls is TestCataloger)
cls = self.loader.catalogers.get_plugins_by_type(types[0])[0][0]
self.assertTrue(cls is TestCataloger)
catalogers = self.loader.catalogers.get_loaded_plugins()
self.assertTrue(name in catalogers)
self.loader.catalogers.remove_plugin(name)
self.assertRaises(exceptions.PluginNotFound,
self.loader.catalogers.get_plugin_by_id,
name)
cataloger = Cataloger()
self.assertRaises(NotImplementedError, cataloger.refresh, None, None, None)
class TestPluginLoader(base.PulpServerTests):
@mock.patch('pulp.plugins.loader.loading.add_plugin_to_map', autospec=True)
@mock.patch('pkg_resources.iter_entry_points', autospec=True)
def test_load_entry_points(self, mock_iter, mock_add):
ep = mock.MagicMock()
cls = mock.MagicMock()
cfg = mock.MagicMock()
ep.load.return_value.return_value = (cls, cfg)
mock_iter.return_value = [ep]
GROUP_NAME = 'abc'
plugin_map = mock.MagicMock()
# finally, we test
loading.load_plugins_from_entry_point(GROUP_NAME, plugin_map)
mock_iter.assert_called_once_with(GROUP_NAME)
mock_add.assert_called_once_with(cls, cfg, plugin_map)
| gpl-2.0 |
jeffrey4l/nova | nova/tests/unit/fake_volume.py | 9 | 10049 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a fake volume API."""
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from nova import exception
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('cross_az_attach',
'nova.volume.cinder', group='cinder')
class fake_volume(object):
user_uuid = '4a3cd440-b9c2-11e1-afa6-0800200c9a66'
instance_uuid = '4a3cd441-b9c2-11e1-afa6-0800200c9a66'
def __init__(self, size, name,
description, volume_id, snapshot,
volume_type, metadata,
availability_zone):
snapshot_id = None
if snapshot is not None:
snapshot_id = snapshot['id']
if volume_id is None:
volume_id = str(uuid.uuid4())
self.vol = {
'created_at': timeutils.utcnow(),
'deleted_at': None,
'updated_at': timeutils.utcnow(),
'uuid': 'WTF',
'deleted': False,
'id': volume_id,
'user_id': self.user_uuid,
'project_id': 'fake-project-id',
'snapshot_id': snapshot_id,
'host': None,
'size': size,
'availability_zone': availability_zone,
'instance_uuid': None,
'mountpoint': None,
'attach_time': timeutils.utcnow(),
'status': 'available',
'attach_status': 'detached',
'scheduled_at': None,
'launched_at': None,
'terminated_at': None,
'display_name': name,
'display_description': description,
'provider_location': 'fake-location',
'provider_auth': 'fake-auth',
'volume_type_id': 99
}
def get(self, key, default=None):
return self.vol[key]
def __setitem__(self, key, value):
self.vol[key] = value
def __getitem__(self, key):
self.vol[key]
class fake_snapshot(object):
user_uuid = '4a3cd440-b9c2-11e1-afa6-0800200c9a66'
instance_uuid = '4a3cd441-b9c2-11e1-afa6-0800200c9a66'
def __init__(self, volume_id, size, name, desc, id=None):
if id is None:
id = str(uuid.uuid4())
self.snap = {
'created_at': timeutils.utcnow(),
'deleted_at': None,
'updated_at': timeutils.utcnow(),
'uuid': 'WTF',
'deleted': False,
'id': str(id),
'volume_id': volume_id,
'status': 'available',
'progress': '100%',
'volume_size': 1,
'display_name': name,
'display_description': desc,
'user_id': self.user_uuid,
'project_id': 'fake-project-id'
}
def get(self, key, default=None):
return self.snap[key]
def __setitem__(self, key, value):
self.snap[key] = value
def __getitem__(self, key):
self.snap[key]
class API(object):
volume_list = []
snapshot_list = []
_instance = None
class Singleton(object):
def __init__(self):
self.API = None
def __init__(self):
if API._instance is None:
API._instance = API.Singleton()
self._EventHandler_instance = API._instance
def create(self, context, size, name, description, snapshot=None,
volume_type=None, metadata=None, availability_zone=None):
v = fake_volume(size, name,
description, None,
snapshot, volume_type,
metadata, availability_zone)
self.volume_list.append(v.vol)
LOG.info('creating volume %s', v.vol['id'])
return v.vol
def create_with_kwargs(self, context, **kwargs):
volume_id = kwargs.get('volume_id', None)
v = fake_volume(kwargs['size'],
kwargs['name'],
kwargs['description'],
str(volume_id),
None,
None,
None,
None)
if kwargs.get('status', None) is not None:
v.vol['status'] = kwargs['status']
if kwargs['host'] is not None:
v.vol['host'] = kwargs['host']
if kwargs['attach_status'] is not None:
v.vol['attach_status'] = kwargs['attach_status']
if kwargs.get('snapshot_id', None) is not None:
v.vol['snapshot_id'] = kwargs['snapshot_id']
self.volume_list.append(v.vol)
return v.vol
def get(self, context, volume_id):
if str(volume_id) == '87654321':
return {'id': volume_id,
'attach_time': '13:56:24',
'attach_status': 'attached',
'status': 'in-use'}
for v in self.volume_list:
if v['id'] == str(volume_id):
return v
raise exception.VolumeNotFound(volume_id=volume_id)
def get_all(self, context):
return self.volume_list
def delete(self, context, volume_id):
LOG.info('deleting volume %s', volume_id)
self.volume_list = [v for v in self.volume_list
if v['id'] != volume_id]
def check_attach(self, context, volume, instance=None):
if volume['status'] != 'available':
msg = "Status of volume '%s' must be available" % volume
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == 'attached':
msg = "already attached"
raise exception.InvalidVolume(reason=msg)
if instance and not CONF.cinder.cross_az_attach:
if instance['availability_zone'] != volume['availability_zone']:
msg = "Instance and volume not in same availability_zone"
raise exception.InvalidVolume(reason=msg)
def check_detach(self, context, volume):
if volume['status'] == "available":
msg = "already detached"
raise exception.InvalidVolume(reason=msg)
def attach(self, context, volume_id, instance_uuid, mountpoint, mode='rw'):
LOG.info('attaching volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'in-use'
volume['mountpoint'] = mountpoint
volume['attach_status'] = 'attached'
volume['instance_uuid'] = instance_uuid
volume['attach_time'] = timeutils.utcnow()
def reset_fake_api(self, context):
del self.volume_list[:]
del self.snapshot_list[:]
def detach(self, context, volume_id):
LOG.info('detaching volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'available'
volume['mountpoint'] = None
volume['attach_status'] = 'detached'
volume['instance_uuid'] = None
def initialize_connection(self, context, volume_id, connector):
return {'driver_volume_type': 'iscsi', 'data': {}}
def terminate_connection(self, context, volume_id, connector):
return None
def get_snapshot(self, context, snapshot_id):
for snap in self.snapshot_list:
if snap['id'] == str(snapshot_id):
return snap
def get_all_snapshots(self, context):
return self.snapshot_list
def create_snapshot(self, context, volume_id, name, description, id=None):
volume = self.get(context, volume_id)
snapshot = fake_snapshot(volume['id'], volume['size'],
name, description, id)
self.snapshot_list.append(snapshot.snap)
return snapshot.snap
def create_snapshot_with_kwargs(self, context, **kwargs):
snapshot = fake_snapshot(kwargs.get('volume_id'),
kwargs.get('volume_size'),
kwargs.get('name'),
kwargs.get('description'),
kwargs.get('snap_id'))
status = kwargs.get('status', None)
snapshot.snap['status'] = status
self.snapshot_list.append(snapshot.snap)
return snapshot.snap
def create_snapshot_force(self, context, volume_id,
name, description, id=None):
volume = self.get(context, volume_id)
snapshot = fake_snapshot(volume['id'], volume['size'],
name, description, id)
self.snapshot_list.append(snapshot.snap)
return snapshot.snap
def delete_snapshot(self, context, snapshot_id):
self.snapshot_list = [s for s in self.snapshot_list
if s['id'] != snapshot_id]
def reserve_volume(self, context, volume_id):
LOG.info('reserving volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'attaching'
def unreserve_volume(self, context, volume_id):
LOG.info('unreserving volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'available'
def begin_detaching(self, context, volume_id):
LOG.info('begin detaching volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'detaching'
def roll_detaching(self, context, volume_id):
LOG.info('roll detaching volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'in-use'
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.