repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
ddico/odoo
|
refs/heads/master
|
setup/odoo-wsgi.example.py
|
28
|
# WSGI Handler sample configuration file.
#
# Change the appropriate settings below, in order to provide the parameters
# that would normally be passed in the command-line.
# (at least conf['addons_path'])
#
# For generic wsgi handlers a global application is defined.
# For uwsgi this should work:
# $ uwsgi_python --http :9090 --pythonpath . --wsgi-file openerp-wsgi.py
#
# For gunicorn additional globals need to be defined in the Gunicorn section.
# Then the following command should run:
# $ gunicorn odoo:service.wsgi_server.application -c openerp-wsgi.py
import odoo
#----------------------------------------------------------
# Common
#----------------------------------------------------------
odoo.multi_process = True # Nah!
# Equivalent of --load command-line option
odoo.conf.server_wide_modules = ['base', 'web']
conf = odoo.tools.config
# Path to the OpenERP Addons repository (comma-separated for
# multiple locations)
conf['addons_path'] = '../../addons/trunk,../../web/trunk/addons'
# Optional database config if not using local socket
#conf['db_name'] = 'mycompany'
#conf['db_host'] = 'localhost'
#conf['db_user'] = 'foo'
#conf['db_port'] = 5432
#conf['db_password'] = 'secret'
#----------------------------------------------------------
# Generic WSGI handlers application
#----------------------------------------------------------
application = odoo.service.wsgi_server.application
odoo.service.server.load_server_wide_modules()
#----------------------------------------------------------
# Gunicorn
#----------------------------------------------------------
# Standard OpenERP XML-RPC port is 8069
bind = '127.0.0.1:8069'
pidfile = '.gunicorn.pid'
workers = 4
timeout = 240
max_requests = 2000
|
arnaud-morvan/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/qgis/PostGISExecuteAndLoadSQL.py
|
10
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
PostGISExecuteAndLoadSQL.py
---------------------
Date : May 2018
Copyright : (C) 2018 by Anita Graser
Email : anitagraser at gmx dot at
---------------------
based on PostGISExecuteSQL.py by Victor Olaya and Carterix Geomatics
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Anita Graser'
__date__ = 'May 2018'
__copyright__ = '(C) 2018, Anita Graser'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import (Qgis,
QgsProcessingException,
QgsProcessingParameterString,
QgsApplication,
QgsVectorLayer,
QgsProject,
QgsProcessing,
QgsProcessingException,
QgsProcessingOutputVectorLayer,
QgsProcessingContext,
QgsProcessingFeedback)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
from processing.tools import postgis
class PostGISExecuteAndLoadSQL(QgisAlgorithm):
DATABASE = 'DATABASE'
SQL = 'SQL'
OUTPUT = 'OUTPUT'
ID_FIELD = 'ID_FIELD'
GEOMETRY_FIELD = 'GEOMETRY_FIELD'
def group(self):
return self.tr('Database')
def groupId(self):
return 'database'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
db_param = QgsProcessingParameterString(
self.DATABASE,
self.tr('Database (connection name)'))
db_param.setMetadata({
'widget_wrapper': {
'class': 'processing.gui.wrappers_postgis.ConnectionWidgetWrapper'}})
self.addParameter(db_param)
self.addParameter(QgsProcessingParameterString(
self.SQL,
self.tr('SQL query'),
multiLine=True))
self.addParameter(QgsProcessingParameterString(
self.ID_FIELD,
self.tr('Unique ID field name'),
defaultValue='id'))
self.addParameter(QgsProcessingParameterString(
self.GEOMETRY_FIELD,
self.tr('Geometry field name'),
defaultValue='geom',
optional=True))
self.addOutput(QgsProcessingOutputVectorLayer(
self.OUTPUT,
self.tr("Output layer"),
QgsProcessing.TypeVectorAnyGeometry))
def name(self):
return 'postgisexecuteandloadsql'
def displayName(self):
return self.tr('PostgreSQL execute and load SQL')
def shortDescription(self):
return self.tr('Executes a SQL command on a PostgreSQL database and loads the result as a table')
def tags(self):
return self.tr('postgis,table,database').split(',')
def processAlgorithm(self, parameters, context, feedback):
connection = self.parameterAsString(parameters, self.DATABASE, context)
id_field = self.parameterAsString(parameters, self.ID_FIELD, context)
geom_field = self.parameterAsString(
parameters, self.GEOMETRY_FIELD, context)
uri = postgis.uri_from_name(connection)
sql = self.parameterAsString(parameters, self.SQL, context)
sql = sql.replace('\n', ' ')
uri.setDataSource("", "(" + sql + ")", geom_field, "", id_field)
vlayer = QgsVectorLayer(uri.uri(), "layername", "postgres")
if not vlayer.isValid():
raise QgsProcessingException(self.tr("""This layer is invalid!
Please check the PostGIS log for error messages."""))
context.temporaryLayerStore().addMapLayer(vlayer)
context.addLayerToLoadOnCompletion(
vlayer.id(),
QgsProcessingContext.LayerDetails('SQL layer',
context.project(),
self.OUTPUT))
return {self.OUTPUT: vlayer.id()}
|
yedidiaklein/moodle-local_video_directory
|
refs/heads/master
|
classes/task/googleSpeech/google/protobuf/python/compatibility_tests/v2.5.0/tests/__init__.py
|
401
|
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
|
jjs0sbw/CSPLN
|
refs/heads/master
|
apps/scaffolding/win/web2py/gluon/contrib/login_methods/oauth20_account.py
|
22
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Written by Michele Comitini <[email protected]>
License: LGPL v3
Adds support for OAuth 2.0 authentication to web2py.
OAuth 2.0 spec: http://tools.ietf.org/html/rfc6749
"""
import time
import cgi
import urllib2
from urllib import urlencode
from gluon import current, redirect, HTTP
import json
class OAuthAccount(object):
"""
Login will be done via OAuth Framework, instead of web2py's
login form.
You need to override the get_user method to match your auth provider needs.
Example for facebook in your model (eg db.py)::
# define the auth_table before call to auth.define_tables()
auth_table = db.define_table(
auth.settings.table_user_name,
Field('first_name', length=128, default=""),
Field('last_name', length=128, default=""),
Field('username', length=128, default="", unique=True),
Field('password', 'password', length=256,
readable=False, label='Password'),
Field('registration_key', length=128, default= "",
writable=False, readable=False))
auth_table.username.requires = IS_NOT_IN_DB(db, auth_table.username)
auth.define_tables()
CLIENT_ID=\"<put your fb application id here>\"
CLIENT_SECRET=\"<put your fb application secret here>\"
AUTH_URL="http://..."
TOKEN_URL="http://..."
# remember to download and install facebook GraphAPI module in your app
from facebook import GraphAPI, GraphAPIError
from gluon.contrib.login_methods.oauth20_account import OAuthAccount
class FaceBookAccount(OAuthAccount):
'''OAuth impl for FaceBook'''
AUTH_URL="https://graph.facebook.com/oauth/authorize"
TOKEN_URL="https://graph.facebook.com/oauth/access_token"
def __init__(self):
OAuthAccount.__init__(self,
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
auth_url=self.AUTH_URL,
token_url=self.TOKEN_URL,
scope='user_photos,friends_photos')
self.graph = None
def get_user(self):
'''
Returns the user using the Graph API.
'''
if not self.accessToken():
return None
if not self.graph:
self.graph = GraphAPI((self.accessToken()))
user = None
try:
user = self.graph.get_object("me")
except GraphAPIError, e:
self.session.token = None
self.graph = None
if user:
return dict(first_name = user['first_name'],
last_name = user['last_name'],
username = user['id'])
auth.settings.actions_disabled=['register',
'change_password','request_reset_password','profile']
auth.settings.login_form=FaceBookAccount()
Any optional arg in the constructor will be passed asis to remote
server for requests. It can be used for the optional"scope" parameters for Facebook.
"""
def __redirect_uri(self, next=None):
"""
Build the uri used by the authenticating server to redirect
the client back to the page originating the auth request.
Appends the _next action to the generated url so the flows continues.
"""
r = current.request
http_host = r.env.http_host
if r.env.https == 'on':
url_scheme = 'https'
else:
url_scheme = r.env.wsgi_url_scheme
if next:
path_info = next
else:
path_info = r.env.path_info
uri = '%s://%s%s' % (url_scheme, http_host, path_info)
if r.get_vars and not next:
uri += '?' + urlencode(r.get_vars)
return uri
def __build_url_opener(self, uri):
"""
Build the url opener for managing HTTP Basic Athentication
"""
# Create an OpenerDirector with support
# for Basic HTTP Authentication...
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(realm=None,
uri=uri,
user=self.client_id,
passwd=self.client_secret)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
return opener
def accessToken(self):
"""
Return the access token generated by the authenticating server.
If token is already in the session that one will be used.
Otherwise the token is fetched from the auth server.
"""
if current.session.token and 'expires' in current.session.token:
expires = current.session.token['expires']
# reuse token until expiration
if expires == 0 or expires > time.time():
return current.session.token['access_token']
code = current.request.vars.code
if code:
data = dict(client_id=self.client_id,
client_secret=self.client_secret,
redirect_uri=current.session.redirect_uri,
code=code,
grant_type='authorization_code'
)
open_url = None
opener = self.__build_url_opener(self.token_url)
try:
open_url = opener.open(self.token_url, urlencode(data), self.socket_timeout)
except urllib2.HTTPError, e:
tmp = e.read()
raise Exception(tmp)
finally:
if current.session.code:
del current.session.code # throw it away
if open_url:
try:
data = open_url.read()
resp_type = open_url.info().gettype()
# try json style first
if not resp_type or resp_type[:16] == 'application/json':
try:
tokendata = json.loads(data)
current.session.token = tokendata
except Exception, e:
raise Exception("Cannot parse oauth server response %s %s" % (data, e))
else: # try facebook style first with x-www-form-encoded
tokendata = cgi.parse_qs(data)
current.session.token = \
dict([(k, v[-1]) for k, v in tokendata.items()])
if not tokendata: # parsing failed?
raise Exception("Cannot parse oauth server response %s" % data)
# set expiration absolute time try to avoid broken
# implementations where "expires_in" becomes "expires"
if 'expires_in' in current.session.token:
exps = 'expires_in'
elif 'expires' in current.session.token:
exps = 'expires'
else:
exps = None
current.session.token['expires'] = exps and \
int(current.session.token[exps]) + \
time.time()
finally:
opener.close()
return current.session.token['access_token']
current.session.token = None
return None
def __init__(self, g=None,
client_id=None, client_secret=None,
auth_url=None, token_url=None, socket_timeout=60, **args):
"""
first argument is unused. Here only for legacy reasons.
"""
if [client_id, client_secret, auth_url, token_url].count(None) > 0:
raise RuntimeError("""Following args are mandatory:
client_id,
client_secret,
auth_url,
token_url.
""")
self.client_id = client_id
self.client_secret = client_secret
self.auth_url = auth_url
self.token_url = token_url
self.args = args
self.socket_timeout = socket_timeout
def login_url(self, next="/"):
self.__oauth_login(next)
return next
def logout_url(self, next="/"):
del current.session.token
return next
def get_user(self):
"""
Override this method by sublcassing the class.
"""
if not current.session.token:
return None
return dict(first_name='Pinco',
last_name='Pallino',
username='pincopallino')
raise NotImplementedError("Must override get_user()")
# Following code is never executed. It can be used as example
# for overriding in subclasses.
if not self.accessToken():
return None
if not self.graph:
self.graph = GraphAPI((self.accessToken()))
user = None
try:
user = self.graph.get_object("me")
except GraphAPIError:
current.session.token = None
self.graph = None
if user:
return dict(first_name=user['first_name'],
last_name=user['last_name'],
username=user['id'])
def __oauth_login(self, next):
"""
This method redirects the user to the authenticating form
on authentication server if the authentication code
and the authentication token are not available to the
application yet.
Once the authentication code has been received this method is
called to set the access token into the session by calling
accessToken()
"""
token = self.accessToken()
if not token:
current.session.redirect_uri = self.__redirect_uri(next)
data = dict(redirect_uri=current.session.redirect_uri,
response_type='code',
client_id=self.client_id)
if self.args:
data.update(self.args)
auth_request_url = self.auth_url + "?" + urlencode(data)
raise HTTP(302,
"You are not authenticated: you are being redirected to the <a href='" + auth_request_url + "'> authentication server</a>",
Location=auth_request_url)
return
|
Teagan42/home-assistant
|
refs/heads/dev
|
homeassistant/components/vultr/switch.py
|
7
|
"""Support for interacting with Vultr subscriptions."""
import logging
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchDevice
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from . import (
ATTR_ALLOWED_BANDWIDTH,
ATTR_AUTO_BACKUPS,
ATTR_COST_PER_MONTH,
ATTR_CREATED_AT,
ATTR_DISK,
ATTR_IPV4_ADDRESS,
ATTR_IPV6_ADDRESS,
ATTR_MEMORY,
ATTR_OS,
ATTR_REGION,
ATTR_SUBSCRIPTION_ID,
ATTR_SUBSCRIPTION_NAME,
ATTR_VCPUS,
CONF_SUBSCRIPTION,
DATA_VULTR,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Vultr {}"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_SUBSCRIPTION): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Vultr subscription switch."""
vultr = hass.data[DATA_VULTR]
subscription = config.get(CONF_SUBSCRIPTION)
name = config.get(CONF_NAME)
if subscription not in vultr.data:
_LOGGER.error("Subscription %s not found", subscription)
return False
add_entities([VultrSwitch(vultr, subscription, name)], True)
class VultrSwitch(SwitchDevice):
"""Representation of a Vultr subscription switch."""
def __init__(self, vultr, subscription, name):
"""Initialize a new Vultr switch."""
self._vultr = vultr
self._name = name
self.subscription = subscription
self.data = None
@property
def name(self):
"""Return the name of the switch."""
try:
return self._name.format(self.data["label"])
except (TypeError, KeyError):
return self._name
@property
def is_on(self):
"""Return true if switch is on."""
return self.data["power_status"] == "running"
@property
def icon(self):
"""Return the icon of this server."""
return "mdi:server" if self.is_on else "mdi:server-off"
@property
def device_state_attributes(self):
"""Return the state attributes of the Vultr subscription."""
return {
ATTR_ALLOWED_BANDWIDTH: self.data.get("allowed_bandwidth_gb"),
ATTR_AUTO_BACKUPS: self.data.get("auto_backups"),
ATTR_COST_PER_MONTH: self.data.get("cost_per_month"),
ATTR_CREATED_AT: self.data.get("date_created"),
ATTR_DISK: self.data.get("disk"),
ATTR_IPV4_ADDRESS: self.data.get("main_ip"),
ATTR_IPV6_ADDRESS: self.data.get("v6_main_ip"),
ATTR_MEMORY: self.data.get("ram"),
ATTR_OS: self.data.get("os"),
ATTR_REGION: self.data.get("location"),
ATTR_SUBSCRIPTION_ID: self.data.get("SUBID"),
ATTR_SUBSCRIPTION_NAME: self.data.get("label"),
ATTR_VCPUS: self.data.get("vcpu_count"),
}
def turn_on(self, **kwargs):
"""Boot-up the subscription."""
if self.data["power_status"] != "running":
self._vultr.start(self.subscription)
def turn_off(self, **kwargs):
"""Halt the subscription."""
if self.data["power_status"] == "running":
self._vultr.halt(self.subscription)
def update(self):
"""Get the latest data from the device and update the data."""
self._vultr.update()
self.data = self._vultr.data[self.subscription]
|
Maximilian-Reuter/SickRage
|
refs/heads/master
|
lib/unidecode/x018.py
|
252
|
data = (
' @ ', # 0x00
' ... ', # 0x01
', ', # 0x02
'. ', # 0x03
': ', # 0x04
' // ', # 0x05
'', # 0x06
'-', # 0x07
', ', # 0x08
'. ', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'[?]', # 0x0f
'0', # 0x10
'1', # 0x11
'2', # 0x12
'3', # 0x13
'4', # 0x14
'5', # 0x15
'6', # 0x16
'7', # 0x17
'8', # 0x18
'9', # 0x19
'[?]', # 0x1a
'[?]', # 0x1b
'[?]', # 0x1c
'[?]', # 0x1d
'[?]', # 0x1e
'[?]', # 0x1f
'a', # 0x20
'e', # 0x21
'i', # 0x22
'o', # 0x23
'u', # 0x24
'O', # 0x25
'U', # 0x26
'ee', # 0x27
'n', # 0x28
'ng', # 0x29
'b', # 0x2a
'p', # 0x2b
'q', # 0x2c
'g', # 0x2d
'm', # 0x2e
'l', # 0x2f
's', # 0x30
'sh', # 0x31
't', # 0x32
'd', # 0x33
'ch', # 0x34
'j', # 0x35
'y', # 0x36
'r', # 0x37
'w', # 0x38
'f', # 0x39
'k', # 0x3a
'kha', # 0x3b
'ts', # 0x3c
'z', # 0x3d
'h', # 0x3e
'zr', # 0x3f
'lh', # 0x40
'zh', # 0x41
'ch', # 0x42
'-', # 0x43
'e', # 0x44
'i', # 0x45
'o', # 0x46
'u', # 0x47
'O', # 0x48
'U', # 0x49
'ng', # 0x4a
'b', # 0x4b
'p', # 0x4c
'q', # 0x4d
'g', # 0x4e
'm', # 0x4f
't', # 0x50
'd', # 0x51
'ch', # 0x52
'j', # 0x53
'ts', # 0x54
'y', # 0x55
'w', # 0x56
'k', # 0x57
'g', # 0x58
'h', # 0x59
'jy', # 0x5a
'ny', # 0x5b
'dz', # 0x5c
'e', # 0x5d
'i', # 0x5e
'iy', # 0x5f
'U', # 0x60
'u', # 0x61
'ng', # 0x62
'k', # 0x63
'g', # 0x64
'h', # 0x65
'p', # 0x66
'sh', # 0x67
't', # 0x68
'd', # 0x69
'j', # 0x6a
'f', # 0x6b
'g', # 0x6c
'h', # 0x6d
'ts', # 0x6e
'z', # 0x6f
'r', # 0x70
'ch', # 0x71
'zh', # 0x72
'i', # 0x73
'k', # 0x74
'r', # 0x75
'f', # 0x76
'zh', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'H', # 0x81
'X', # 0x82
'W', # 0x83
'M', # 0x84
' 3 ', # 0x85
' 333 ', # 0x86
'a', # 0x87
'i', # 0x88
'k', # 0x89
'ng', # 0x8a
'c', # 0x8b
'tt', # 0x8c
'tth', # 0x8d
'dd', # 0x8e
'nn', # 0x8f
't', # 0x90
'd', # 0x91
'p', # 0x92
'ph', # 0x93
'ss', # 0x94
'zh', # 0x95
'z', # 0x96
'a', # 0x97
't', # 0x98
'zh', # 0x99
'gh', # 0x9a
'ng', # 0x9b
'c', # 0x9c
'jh', # 0x9d
'tta', # 0x9e
'ddh', # 0x9f
't', # 0xa0
'dh', # 0xa1
'ss', # 0xa2
'cy', # 0xa3
'zh', # 0xa4
'z', # 0xa5
'u', # 0xa6
'y', # 0xa7
'bh', # 0xa8
'\'', # 0xa9
'[?]', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'[?]', # 0xae
'[?]', # 0xaf
'[?]', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
|
axbaretto/beam
|
refs/heads/master
|
sdks/python/.tox/docs/lib/python2.7/site-packages/google/protobuf/internal/encoder.py
|
90
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Code for encoding protocol message primitives.
Contains the logic for encoding every logical protocol field type
into one of the 5 physical wire types.
This code is designed to push the Python interpreter's performance to the
limits.
The basic idea is that at startup time, for every field (i.e. every
FieldDescriptor) we construct two functions: a "sizer" and an "encoder". The
sizer takes a value of this field's type and computes its byte size. The
encoder takes a writer function and a value. It encodes the value into byte
strings and invokes the writer function to write those strings. Typically the
writer function is the write() method of a BytesIO.
We try to do as much work as possible when constructing the writer and the
sizer rather than when calling them. In particular:
* We copy any needed global functions to local variables, so that we do not need
to do costly global table lookups at runtime.
* Similarly, we try to do any attribute lookups at startup time if possible.
* Every field's tag is encoded to bytes at startup, since it can't change at
runtime.
* Whatever component of the field size we can compute at startup, we do.
* We *avoid* sharing code if doing so would make the code slower and not sharing
does not burden us too much. For example, encoders for repeated fields do
not just call the encoders for singular fields in a loop because this would
add an extra function call overhead for every loop iteration; instead, we
manually inline the single-value encoder into the loop.
* If a Python function lacks a return statement, Python actually generates
instructions to pop the result of the last statement off the stack, push
None onto the stack, and then return that. If we really don't care what
value is returned, then we can save two instructions by returning the
result of the last statement. It looks funny but it helps.
* We assume that type and bounds checking has happened at a higher level.
"""
__author__ = '[email protected] (Kenton Varda)'
import struct
import six
from google.protobuf.internal import wire_format
# This will overflow and thus become IEEE-754 "infinity". We would use
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
_POS_INF = 1e10000
_NEG_INF = -_POS_INF
def _VarintSize(value):
"""Compute the size of a varint value."""
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10
def _SignedVarintSize(value):
"""Compute the size of a signed varint value."""
if value < 0: return 10
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10
def _TagSize(field_number):
"""Returns the number of bytes required to serialize a tag with this field
number."""
# Just pass in type 0, since the type won't affect the tag+type size.
return _VarintSize(wire_format.PackTag(field_number, 0))
# --------------------------------------------------------------------
# In this section we define some generic sizers. Each of these functions
# takes parameters specific to a particular field type, e.g. int32 or fixed64.
# It returns another function which in turn takes parameters specific to a
# particular field, e.g. the field number and whether it is repeated or packed.
# Look at the next section to see how these are used.
def _SimpleSizer(compute_value_size):
"""A sizer which uses the function compute_value_size to compute the size of
each value. Typically compute_value_size is _VarintSize."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = 0
for element in value:
result += compute_value_size(element)
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += compute_value_size(element)
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + compute_value_size(value)
return FieldSize
return SpecificSizer
def _ModifiedSizer(compute_value_size, modify_value):
"""Like SimpleSizer, but modify_value is invoked on each value before it is
passed to compute_value_size. modify_value is typically ZigZagEncode."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = 0
for element in value:
result += compute_value_size(modify_value(element))
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += compute_value_size(modify_value(element))
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + compute_value_size(modify_value(value))
return FieldSize
return SpecificSizer
def _FixedSizer(value_size):
"""Like _SimpleSizer except for a fixed-size field. The input is the size
of one value."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = len(value) * value_size
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
element_size = value_size + tag_size
def RepeatedFieldSize(value):
return len(value) * element_size
return RepeatedFieldSize
else:
field_size = value_size + tag_size
def FieldSize(value):
return field_size
return FieldSize
return SpecificSizer
# ====================================================================
# Here we declare a sizer constructor for each field type. Each "sizer
# constructor" is a function that takes (field_number, is_repeated, is_packed)
# as parameters and returns a sizer, which in turn takes a field value as
# a parameter and returns its encoded size.
Int32Sizer = Int64Sizer = EnumSizer = _SimpleSizer(_SignedVarintSize)
UInt32Sizer = UInt64Sizer = _SimpleSizer(_VarintSize)
SInt32Sizer = SInt64Sizer = _ModifiedSizer(
_SignedVarintSize, wire_format.ZigZagEncode)
Fixed32Sizer = SFixed32Sizer = FloatSizer = _FixedSizer(4)
Fixed64Sizer = SFixed64Sizer = DoubleSizer = _FixedSizer(8)
BoolSizer = _FixedSizer(1)
def StringSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a string field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
local_len = len
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = local_len(element.encode('utf-8'))
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = local_len(value.encode('utf-8'))
return tag_size + local_VarintSize(l) + l
return FieldSize
def BytesSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a bytes field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
local_len = len
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = local_len(element)
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = local_len(value)
return tag_size + local_VarintSize(l) + l
return FieldSize
def GroupSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a group field."""
tag_size = _TagSize(field_number) * 2
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += element.ByteSize()
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + value.ByteSize()
return FieldSize
def MessageSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a message field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = element.ByteSize()
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = value.ByteSize()
return tag_size + local_VarintSize(l) + l
return FieldSize
# --------------------------------------------------------------------
# MessageSet is special: it needs custom logic to compute its size properly.
def MessageSetItemSizer(field_number):
"""Returns a sizer for extensions of MessageSet.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
static_size = (_TagSize(1) * 2 + _TagSize(2) + _VarintSize(field_number) +
_TagSize(3))
local_VarintSize = _VarintSize
def FieldSize(value):
l = value.ByteSize()
return static_size + local_VarintSize(l) + l
return FieldSize
# --------------------------------------------------------------------
# Map is special: it needs custom logic to compute its size properly.
def MapSizer(field_descriptor):
"""Returns a sizer for a map field."""
# Can't look at field_descriptor.message_type._concrete_class because it may
# not have been initialized yet.
message_type = field_descriptor.message_type
message_sizer = MessageSizer(field_descriptor.number, False, False)
def FieldSize(map_value):
total = 0
for key in map_value:
value = map_value[key]
# It's wasteful to create the messages and throw them away one second
# later since we'll do the same for the actual encode. But there's not an
# obvious way to avoid this within the current design without tons of code
# duplication.
entry_msg = message_type._concrete_class(key=key, value=value)
total += message_sizer(entry_msg)
return total
return FieldSize
# ====================================================================
# Encoders!
def _VarintEncoder():
"""Return an encoder for a basic varint value (does not include tag)."""
def EncodeVarint(write, value):
bits = value & 0x7f
value >>= 7
while value:
write(six.int2byte(0x80|bits))
bits = value & 0x7f
value >>= 7
return write(six.int2byte(bits))
return EncodeVarint
def _SignedVarintEncoder():
"""Return an encoder for a basic signed varint value (does not include
tag)."""
def EncodeSignedVarint(write, value):
if value < 0:
value += (1 << 64)
bits = value & 0x7f
value >>= 7
while value:
write(six.int2byte(0x80|bits))
bits = value & 0x7f
value >>= 7
return write(six.int2byte(bits))
return EncodeSignedVarint
_EncodeVarint = _VarintEncoder()
_EncodeSignedVarint = _SignedVarintEncoder()
def _VarintBytes(value):
"""Encode the given integer as a varint and return the bytes. This is only
called at startup time so it doesn't need to be fast."""
pieces = []
_EncodeVarint(pieces.append, value)
return b"".join(pieces)
def TagBytes(field_number, wire_type):
"""Encode the given tag and return the bytes. Only called at startup."""
return _VarintBytes(wire_format.PackTag(field_number, wire_type))
# --------------------------------------------------------------------
# As with sizers (see above), we have a number of common encoder
# implementations.
def _SimpleEncoder(wire_type, encode_value, compute_value_size):
"""Return a constructor for an encoder for fields of a particular type.
Args:
wire_type: The field's wire type, for encoding tags.
encode_value: A function which encodes an individual value, e.g.
_EncodeVarint().
compute_value_size: A function which computes the size of an individual
value, e.g. _VarintSize().
"""
def SpecificEncoder(field_number, is_repeated, is_packed):
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
size = 0
for element in value:
size += compute_value_size(element)
local_EncodeVarint(write, size)
for element in value:
encode_value(write, element)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
encode_value(write, element)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return encode_value(write, value)
return EncodeField
return SpecificEncoder
def _ModifiedEncoder(wire_type, encode_value, compute_value_size, modify_value):
"""Like SimpleEncoder but additionally invokes modify_value on every value
before passing it to encode_value. Usually modify_value is ZigZagEncode."""
def SpecificEncoder(field_number, is_repeated, is_packed):
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
size = 0
for element in value:
size += compute_value_size(modify_value(element))
local_EncodeVarint(write, size)
for element in value:
encode_value(write, modify_value(element))
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
encode_value(write, modify_value(element))
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return encode_value(write, modify_value(value))
return EncodeField
return SpecificEncoder
def _StructPackEncoder(wire_type, format):
"""Return a constructor for an encoder for a fixed-width field.
Args:
wire_type: The field's wire type, for encoding tags.
format: The format string to pass to struct.pack().
"""
value_size = struct.calcsize(format)
def SpecificEncoder(field_number, is_repeated, is_packed):
local_struct_pack = struct.pack
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value) * value_size)
for element in value:
write(local_struct_pack(format, element))
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
write(local_struct_pack(format, element))
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return write(local_struct_pack(format, value))
return EncodeField
return SpecificEncoder
def _FloatingPointEncoder(wire_type, format):
"""Return a constructor for an encoder for float fields.
This is like StructPackEncoder, but catches errors that may be due to
passing non-finite floating-point values to struct.pack, and makes a
second attempt to encode those values.
Args:
wire_type: The field's wire type, for encoding tags.
format: The format string to pass to struct.pack().
"""
value_size = struct.calcsize(format)
if value_size == 4:
def EncodeNonFiniteOrRaise(write, value):
# Remember that the serialized form uses little-endian byte order.
if value == _POS_INF:
write(b'\x00\x00\x80\x7F')
elif value == _NEG_INF:
write(b'\x00\x00\x80\xFF')
elif value != value: # NaN
write(b'\x00\x00\xC0\x7F')
else:
raise
elif value_size == 8:
def EncodeNonFiniteOrRaise(write, value):
if value == _POS_INF:
write(b'\x00\x00\x00\x00\x00\x00\xF0\x7F')
elif value == _NEG_INF:
write(b'\x00\x00\x00\x00\x00\x00\xF0\xFF')
elif value != value: # NaN
write(b'\x00\x00\x00\x00\x00\x00\xF8\x7F')
else:
raise
else:
raise ValueError('Can\'t encode floating-point values that are '
'%d bytes long (only 4 or 8)' % value_size)
def SpecificEncoder(field_number, is_repeated, is_packed):
local_struct_pack = struct.pack
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value) * value_size)
for element in value:
# This try/except block is going to be faster than any code that
# we could write to check whether element is finite.
try:
write(local_struct_pack(format, element))
except SystemError:
EncodeNonFiniteOrRaise(write, element)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
try:
write(local_struct_pack(format, element))
except SystemError:
EncodeNonFiniteOrRaise(write, element)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
try:
write(local_struct_pack(format, value))
except SystemError:
EncodeNonFiniteOrRaise(write, value)
return EncodeField
return SpecificEncoder
# ====================================================================
# Here we declare an encoder constructor for each field type. These work
# very similarly to sizer constructors, described earlier.
Int32Encoder = Int64Encoder = EnumEncoder = _SimpleEncoder(
wire_format.WIRETYPE_VARINT, _EncodeSignedVarint, _SignedVarintSize)
UInt32Encoder = UInt64Encoder = _SimpleEncoder(
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize)
SInt32Encoder = SInt64Encoder = _ModifiedEncoder(
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize,
wire_format.ZigZagEncode)
# Note that Python conveniently guarantees that when using the '<' prefix on
# formats, they will also have the same size across all platforms (as opposed
# to without the prefix, where their sizes depend on the C compiler's basic
# type sizes).
Fixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<I')
Fixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<Q')
SFixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<i')
SFixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<q')
FloatEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED32, '<f')
DoubleEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED64, '<d')
def BoolEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a boolean field."""
false_byte = b'\x00'
true_byte = b'\x01'
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value))
for element in value:
if element:
write(true_byte)
else:
write(false_byte)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
if element:
write(true_byte)
else:
write(false_byte)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeField(write, value):
write(tag_bytes)
if value:
return write(true_byte)
return write(false_byte)
return EncodeField
def StringEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a string field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
local_len = len
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
encoded = element.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
write(encoded)
return EncodeRepeatedField
else:
def EncodeField(write, value):
encoded = value.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
return write(encoded)
return EncodeField
def BytesEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a bytes field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
local_len = len
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(tag)
local_EncodeVarint(write, local_len(element))
write(element)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(tag)
local_EncodeVarint(write, local_len(value))
return write(value)
return EncodeField
def GroupEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a group field."""
start_tag = TagBytes(field_number, wire_format.WIRETYPE_START_GROUP)
end_tag = TagBytes(field_number, wire_format.WIRETYPE_END_GROUP)
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(start_tag)
element._InternalSerialize(write)
write(end_tag)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(start_tag)
value._InternalSerialize(write)
return write(end_tag)
return EncodeField
def MessageEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a message field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(tag)
local_EncodeVarint(write, element.ByteSize())
element._InternalSerialize(write)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(tag)
local_EncodeVarint(write, value.ByteSize())
return value._InternalSerialize(write)
return EncodeField
# --------------------------------------------------------------------
# As before, MessageSet is special.
def MessageSetItemEncoder(field_number):
"""Encoder for extensions of MessageSet.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
start_bytes = b"".join([
TagBytes(1, wire_format.WIRETYPE_START_GROUP),
TagBytes(2, wire_format.WIRETYPE_VARINT),
_VarintBytes(field_number),
TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)])
end_bytes = TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_EncodeVarint = _EncodeVarint
def EncodeField(write, value):
write(start_bytes)
local_EncodeVarint(write, value.ByteSize())
value._InternalSerialize(write)
return write(end_bytes)
return EncodeField
# --------------------------------------------------------------------
# As before, Map is special.
def MapEncoder(field_descriptor):
"""Encoder for extensions of MessageSet.
Maps always have a wire format like this:
message MapEntry {
key_type key = 1;
value_type value = 2;
}
repeated MapEntry map = N;
"""
# Can't look at field_descriptor.message_type._concrete_class because it may
# not have been initialized yet.
message_type = field_descriptor.message_type
encode_message = MessageEncoder(field_descriptor.number, False, False)
def EncodeField(write, value):
for key in value:
entry_msg = message_type._concrete_class(key=key, value=value[key])
encode_message(write, entry_msg)
return EncodeField
|
anhstudios/swganh
|
refs/heads/develop
|
data/scripts/templates/object/building/poi/shared_creature_lair_forest_sacrifice_beast.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_creature_lair_forest_sacrifice_beast.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
akosel/incubator-airflow
|
refs/heads/master
|
airflow/hooks/S3_hook.py
|
2
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from botocore.exceptions import ClientError
from airflow.exceptions import AirflowException
from airflow.contrib.hooks.aws_hook import AwsHook
from six import BytesIO
from urllib.parse import urlparse
import re
import fnmatch
class S3Hook(AwsHook):
"""
Interact with AWS S3, using the boto3 library.
"""
def get_conn(self):
return self.get_client_type('s3')
@staticmethod
def parse_s3_url(s3url):
parsed_url = urlparse(s3url)
if not parsed_url.netloc:
raise AirflowException('Please provide a bucket_name instead of "%s"' % s3url)
else:
bucket_name = parsed_url.netloc
key = parsed_url.path.strip('/')
return bucket_name, key
def check_for_bucket(self, bucket_name):
"""
Check if bucket_name exists.
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
try:
self.get_conn().head_bucket(Bucket=bucket_name)
return True
except ClientError as e:
self.log.info(e.response["Error"]["Message"])
return False
def get_bucket(self, bucket_name):
"""
Returns a boto3.S3.Bucket object
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
s3 = self.get_resource_type('s3')
return s3.Bucket(bucket_name)
def check_for_prefix(self, bucket_name, prefix, delimiter):
"""
Checks that a prefix exists in a bucket
"""
prefix = prefix + delimiter if prefix[-1] != delimiter else prefix
prefix_split = re.split(r'(\w+[{d}])$'.format(d=delimiter), prefix, 1)
previous_level = prefix_split[0]
plist = self.list_prefixes(bucket_name, previous_level, delimiter)
return False if plist is None else prefix in plist
def list_prefixes(self, bucket_name, prefix='', delimiter='',
page_size=None, max_items=None):
"""
Lists prefixes in a bucket under prefix
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
:param page_size: pagination size
:type page_size: int
:param max_items: maximum items to return
:type max_items: int
"""
config = {
'PageSize': page_size,
'MaxItems': max_items,
}
paginator = self.get_conn().get_paginator('list_objects_v2')
response = paginator.paginate(Bucket=bucket_name,
Prefix=prefix,
Delimiter=delimiter,
PaginationConfig=config)
has_results = False
prefixes = []
for page in response:
if 'CommonPrefixes' in page:
has_results = True
for p in page['CommonPrefixes']:
prefixes.append(p['Prefix'])
if has_results:
return prefixes
def list_keys(self, bucket_name, prefix='', delimiter='',
page_size=None, max_items=None):
"""
Lists keys in a bucket under prefix and not containing delimiter
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
:param page_size: pagination size
:type page_size: int
:param max_items: maximum items to return
:type max_items: int
"""
config = {
'PageSize': page_size,
'MaxItems': max_items,
}
paginator = self.get_conn().get_paginator('list_objects_v2')
response = paginator.paginate(Bucket=bucket_name,
Prefix=prefix,
Delimiter=delimiter,
PaginationConfig=config)
has_results = False
keys = []
for page in response:
if 'Contents' in page:
has_results = True
for k in page['Contents']:
keys.append(k['Key'])
if has_results:
return keys
def check_for_key(self, key, bucket_name=None):
"""
Checks if a key exists in a bucket
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
try:
self.get_conn().head_object(Bucket=bucket_name, Key=key)
return True
except ClientError as e:
self.log.info(e.response["Error"]["Message"])
return False
def get_key(self, key, bucket_name=None):
"""
Returns a boto3.s3.Object
:param key: the path to the key
:type key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
obj = self.get_resource_type('s3').Object(bucket_name, key)
obj.load()
return obj
def read_key(self, key, bucket_name=None):
"""
Reads a key from S3
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
"""
obj = self.get_key(key, bucket_name)
return obj.get()['Body'].read().decode('utf-8')
def select_key(self, key, bucket_name=None,
expression='SELECT * FROM S3Object',
expression_type='SQL',
input_serialization=None,
output_serialization=None):
"""
Reads a key with S3 Select.
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
:param expression: S3 Select expression
:type expression: str
:param expression_type: S3 Select expression type
:type expression_type: str
:param input_serialization: S3 Select input data serialization format
:type input_serialization: dict
:param output_serialization: S3 Select output data serialization format
:type output_serialization: dict
:return: retrieved subset of original data by S3 Select
:rtype: str
.. seealso::
For more details about S3 Select parameters:
http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.select_object_content
"""
if input_serialization is None:
input_serialization = {'CSV': {}}
if output_serialization is None:
output_serialization = {'CSV': {}}
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
response = self.get_conn().select_object_content(
Bucket=bucket_name,
Key=key,
Expression=expression,
ExpressionType=expression_type,
InputSerialization=input_serialization,
OutputSerialization=output_serialization)
return ''.join(event['Records']['Payload']
for event in response['Payload']
if 'Records' in event)
def check_for_wildcard_key(self,
wildcard_key, bucket_name=None, delimiter=''):
"""
Checks that a key matching a wildcard expression exists in a bucket
"""
return self.get_wildcard_key(wildcard_key=wildcard_key,
bucket_name=bucket_name,
delimiter=delimiter) is not None
def get_wildcard_key(self, wildcard_key, bucket_name=None, delimiter=''):
"""
Returns a boto3.s3.Object object matching the wildcard expression
:param wildcard_key: the path to the key
:type wildcard_key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
if not bucket_name:
(bucket_name, wildcard_key) = self.parse_s3_url(wildcard_key)
prefix = re.split(r'[*]', wildcard_key, 1)[0]
klist = self.list_keys(bucket_name, prefix=prefix, delimiter=delimiter)
if klist:
key_matches = [k for k in klist if fnmatch.fnmatch(k, wildcard_key)]
if key_matches:
return self.get_key(key_matches[0], bucket_name)
def load_file(self,
filename,
key,
bucket_name=None,
replace=False,
encrypt=False):
"""
Loads a local file to S3
:param filename: name of the file to load.
:type filename: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists. If replace is False and the key exists, an
error will be raised.
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args = {}
if encrypt:
extra_args['ServerSideEncryption'] = "AES256"
client = self.get_conn()
client.upload_file(filename, bucket_name, key, ExtraArgs=extra_args)
def load_string(self,
string_data,
key,
bucket_name=None,
replace=False,
encrypt=False,
encoding='utf-8'):
"""
Loads a string to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param string_data: str to set as content for the key.
:type string_data: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
self.load_bytes(string_data.encode(encoding),
key=key,
bucket_name=bucket_name,
replace=replace,
encrypt=encrypt)
def load_bytes(self,
bytes_data,
key,
bucket_name=None,
replace=False,
encrypt=False):
"""
Loads bytes to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param bytes_data: bytes to set as content for the key.
:type bytes_data: bytes
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args = {}
if encrypt:
extra_args['ServerSideEncryption'] = "AES256"
filelike_buffer = BytesIO(bytes_data)
client = self.get_conn()
client.upload_fileobj(filelike_buffer, bucket_name, key, ExtraArgs=extra_args)
def copy_object(self,
source_bucket_key,
dest_bucket_key,
source_bucket_name=None,
dest_bucket_name=None,
source_version_id=None):
"""
Creates a copy of an object that is already stored in S3.
Note: the S3 connection used here needs to have access to both
source and destination bucket/key.
:param source_bucket_key: The key of the source object.
It can be either full s3:// style url or relative path from root level.
When it's specified as a full s3:// url, please omit source_bucket_name.
:type source_bucket_key: str
:param dest_bucket_key: The key of the object to copy to.
The convention to specify `dest_bucket_key` is the same
as `source_bucket_key`.
:type dest_bucket_key: str
:param source_bucket_name: Name of the S3 bucket where the source object is in.
It should be omitted when `source_bucket_key` is provided as a full s3:// url.
:type source_bucket_name: str
:param dest_bucket_name: Name of the S3 bucket to where the object is copied.
It should be omitted when `dest_bucket_key` is provided as a full s3:// url.
:type dest_bucket_name: str
:param source_version_id: Version ID of the source object (OPTIONAL)
:type source_version_id: str
"""
if dest_bucket_name is None:
dest_bucket_name, dest_bucket_key = self.parse_s3_url(dest_bucket_key)
else:
parsed_url = urlparse(dest_bucket_key)
if parsed_url.scheme != '' or parsed_url.netloc != '':
raise AirflowException('If dest_bucket_name is provided, ' +
'dest_bucket_key should be relative path ' +
'from root level, rather than a full s3:// url')
if source_bucket_name is None:
source_bucket_name, source_bucket_key = self.parse_s3_url(source_bucket_key)
else:
parsed_url = urlparse(source_bucket_key)
if parsed_url.scheme != '' or parsed_url.netloc != '':
raise AirflowException('If source_bucket_name is provided, ' +
'source_bucket_key should be relative path ' +
'from root level, rather than a full s3:// url')
CopySource = {'Bucket': source_bucket_name,
'Key': source_bucket_key,
'VersionId': source_version_id}
response = self.get_conn().copy_object(Bucket=dest_bucket_name,
Key=dest_bucket_key,
CopySource=CopySource)
return response
def delete_objects(self,
bucket,
keys):
"""
:param bucket: Name of the bucket in which you are going to delete object(s)
:type bucket: str
:param keys: The key(s) to delete from S3 bucket.
When ``keys`` is a string, it's supposed to be the key name of
the single object to delete.
When ``keys`` is a list, it's supposed to be the list of the
keys to delete.
:type keys: str or list
"""
if isinstance(keys, list):
keys = keys
else:
keys = [keys]
delete_dict = {"Objects": [{"Key": k} for k in keys]}
response = self.get_conn().delete_objects(Bucket=bucket,
Delete=delete_dict)
return response
|
lair-framework/drone-whois
|
refs/heads/master
|
drone-whois/__init__.py
|
21
|
__author__ = 'tom'
|
szymex/xbmc-finnish-tv
|
refs/heads/master
|
plugin.video.ruutu/html5lib/filters/optionaltags.py
|
1727
|
from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
|
bdoner/SickRage
|
refs/heads/master
|
sickbeard/clients/rtorrent_client.py
|
15
|
# Author: jkaberg <[email protected]>, based on fuzemans work (https://github.com/RuudBurger/CouchPotatoServer/blob/develop/couchpotato/core/downloaders/rtorrent/main.py)
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import traceback
import sickbeard
from sickbeard import logger
from .generic import GenericClient
from rtorrent import RTorrent
class rTorrentAPI(GenericClient):
def __init__(self, host=None, username=None, password=None):
super(rTorrentAPI, self).__init__('rTorrent', host, username, password)
def _get_auth(self):
self.auth = None
if self.auth is not None:
return self.auth
if not self.host:
return
tp_kwargs = {}
if sickbeard.TORRENT_AUTH_TYPE is not 'none':
tp_kwargs['authtype'] = sickbeard.TORRENT_AUTH_TYPE
if not sickbeard.TORRENT_VERIFY_CERT:
tp_kwargs['check_ssl_cert'] = False
if self.username and self.password:
self.auth = RTorrent(self.host, self.username, self.password, True, tp_kwargs=tp_kwargs)
else:
self.auth = RTorrent(self.host, None, None, True)
return self.auth
def _add_torrent_uri(self, result):
filedata = None
if not self.auth:
return False
if not result:
return False
try:
# Send magnet to rTorrent
torrent = self.auth.load_magnet(result.url, result.hash)
if not torrent:
return False
# Set label
label = sickbeard.TORRENT_LABEL
if result.show.is_anime:
label = sickbeard.TORRENT_LABEL_ANIME
if label:
torrent.set_custom(1, label.lower())
if sickbeard.TORRENT_PATH:
torrent.set_directory(sickbeard.TORRENT_PATH)
# Start torrent
torrent.start()
return True
except Exception as e:
logger.log(traceback.format_exc(), logger.DEBUG)
return False
def _add_torrent_file(self, result):
filedata = None
if not self.auth:
return False
if not result:
return False
# group_name = 'sb_test'.lower() ##### Use provider instead of _test
# if not self._set_torrent_ratio(group_name):
# return False
# Send request to rTorrent
try:
# Send torrent to rTorrent
torrent = self.auth.load_torrent(result.content)
if not torrent:
return False
# Set label
label = sickbeard.TORRENT_LABEL
if result.show.is_anime:
label = sickbeard.TORRENT_LABEL_ANIME
if label:
torrent.set_custom(1, label.lower())
if sickbeard.TORRENT_PATH:
torrent.set_directory(sickbeard.TORRENT_PATH)
# Set Ratio Group
# torrent.set_visible(group_name)
# Start torrent
torrent.start()
return True
except Exception as e:
logger.log(traceback.format_exc(), logger.DEBUG)
return False
def _set_torrent_ratio(self, name):
# if not name:
# return False
#
# if not self.auth:
# return False
#
# views = self.auth.get_views()
#
# if name not in views:
# self.auth.create_group(name)
# group = self.auth.get_group(name)
# ratio = int(float(sickbeard.TORRENT_RATIO) * 100)
#
# try:
# if ratio > 0:
#
# # Explicitly set all group options to ensure it is setup correctly
# group.set_upload('1M')
# group.set_min(ratio)
# group.set_max(ratio)
# group.set_command('d.stop')
# group.enable()
# else:
# # Reset group action and disable it
# group.set_command()
# group.disable()
#
# except:
# return False
return True
def testAuthentication(self):
try:
self._get_auth()
if self.auth is not None:
return True, 'Success: Connected and Authenticated'
else:
return False, 'Error: Unable to get ' + self.name + ' Authentication, check your config!'
except Exception:
return False, 'Error: Unable to connect to ' + self.name
api = rTorrentAPI()
|
bcornwellmott/erpnext
|
refs/heads/develop
|
erpnext/accounts/doctype/payment_entry_reference/payment_entry_reference.py
|
56
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class PaymentEntryReference(Document):
pass
|
guijomatos/SickRage
|
refs/heads/master
|
lib/pyasn1/codec/ber/eoo.py
|
407
|
from pyasn1.type import base, tag
class EndOfOctets(base.AbstractSimpleAsn1Item):
defaultValue = 0
tagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x00)
)
endOfOctets = EndOfOctets()
|
snowch/todo-ng-pouchdb
|
refs/heads/master
|
node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/MSVSUserFile.py
|
2710
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
|
facebookexperimental/eden
|
refs/heads/master
|
eden/hg-server/edenscm/mercurial/winutil.py
|
2
|
# Portions Copyright (c) Facebook, Inc. and its affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# Copyright (c) Mercurial Contributors.
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""Utility functions related to Windows support, but which must be callable
on all platforms.
"""
from typing import Optional
from . import i18n
_ = i18n._
_winreservedchars = ':*?"<>|'
_winreservednames = {
"con",
"prn",
"aux",
"nul",
"com1",
"com2",
"com3",
"com4",
"com5",
"com6",
"com7",
"com8",
"com9",
"lpt1",
"lpt2",
"lpt3",
"lpt4",
"lpt5",
"lpt6",
"lpt7",
"lpt8",
"lpt9",
}
def checkwinfilename(path):
# type: (str) -> Optional[str]
r"""Check that the base-relative path is a valid filename on Windows.
Returns None if the path is ok, or a UI string describing the problem.
>>> checkwinfilename("just/a/normal/path")
>>> checkwinfilename("foo/bar/con.xml")
"filename contains 'con', which is reserved on Windows"
>>> checkwinfilename("foo/con.xml/bar")
"filename contains 'con', which is reserved on Windows"
>>> checkwinfilename("foo/bar/xml.con")
>>> checkwinfilename("foo/bar/AUX/bla.txt")
"filename contains 'AUX', which is reserved on Windows"
>>> checkwinfilename("foo/bar/bla:.txt")
"filename contains ':', which is reserved on Windows"
>>> checkwinfilename("foo/bar/b\07la.txt")
"filename contains '\\x07', which is invalid on Windows"
>>> checkwinfilename("foo/bar/bla ")
"filename ends with ' ', which is not allowed on Windows"
>>> checkwinfilename("../bar")
>>> checkwinfilename("foo\\")
"filename ends with '\\', which is invalid on Windows"
>>> checkwinfilename("foo\\/bar")
"directory name ends with '\\', which is invalid on Windows"
"""
if path.endswith("\\"):
return _("filename ends with '\\', which is invalid on Windows")
if "\\/" in path:
return _("directory name ends with '\\', which is invalid on Windows")
for n in path.replace("\\", "/").split("/"):
if not n:
continue
for c in n:
if c in _winreservedchars:
return _("filename contains '%s', which is reserved " "on Windows") % c
if ord(c) <= 31:
return _("filename contains %r, which is invalid " "on Windows") % c
base = n.split(".")[0]
if base and base.lower() in _winreservednames:
return _("filename contains '%s', which is reserved " "on Windows") % base
t = n[-1:]
if t in ". " and n not in "..":
return _("filename ends with '%s', which is not allowed " "on Windows") % t
|
davidt/reviewboard
|
refs/heads/master
|
reviewboard/reviews/markdown_utils.py
|
2
|
from __future__ import unicode_literals
import warnings
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Model
from django.utils.html import escape
from djblets import markdown as djblets_markdown
from djblets.siteconfig.models import SiteConfiguration
from markdown import markdown
# Keyword arguments used when calling a Markdown renderer function.
MARKDOWN_KWARGS = {
'safe_mode': 'escape',
'output_format': 'xhtml1',
'lazy_ol': False,
'extensions': [
'fenced_code', 'codehilite', 'sane_lists', 'smart_strong', 'nl2br',
'djblets.markdown.extensions.wysiwyg',
],
'extension_configs': {
'codehilite': {
'guess_lang': False,
},
},
}
def markdown_escape(text):
"""Escapes text for use in Markdown.
This will escape the provided text so that none of the characters will
be rendered specially by Markdown.
This is deprecated. Please use djblets.markdown.markdown_escape instead.
"""
warnings.warn('reviewboard.reviews.markdown_utils.markdown_escape is '
'deprecated. Please use djblets.markdown.markdown_escape.',
DeprecationWarning)
return djblets_markdown.markdown_escape(text)
def markdown_unescape(escaped_text):
"""Unescapes Markdown-escaped text.
This will unescape the provided Markdown-formatted text so that any
escaped characters will be unescaped.
This is deprecated. Please use djblets.markdown.markdown_unescape instead.
"""
warnings.warn('reviewboard.reviews.markdown_utils.markdown_unescape is '
'deprecated. Please use djblets.markdown.markdown_unescape.',
DeprecationWarning)
return djblets_markdown.markdown_unescape(escaped_text)
def markdown_escape_field(obj, field_name):
"""Escapes Markdown text in a model or dictionary's field.
This is a convenience around markdown_escape to escape the contents of
a particular field in a model or dictionary.
"""
if isinstance(obj, Model):
setattr(obj, field_name,
djblets_markdown.markdown_escape(getattr(obj, field_name)))
elif isinstance(obj, dict):
obj[field_name] = djblets_markdown.markdown_escape(obj[field_name])
else:
raise TypeError('Unexpected type %r passed to markdown_escape_field'
% obj)
def markdown_unescape_field(obj, field_name):
"""Unescapes Markdown text in a model or dictionary's field.
This is a convenience around markdown_unescape to unescape the contents of
a particular field in a model or dictionary.
"""
if isinstance(obj, Model):
setattr(obj, field_name, markdown_unescape(getattr(obj, field_name)))
elif isinstance(obj, dict):
obj[field_name] = markdown_unescape(obj[field_name])
else:
raise TypeError('Unexpected type %r passed to markdown_unescape_field'
% obj)
def normalize_text_for_edit(user, text, rich_text, escape_html=True):
"""Normalizes text, converting it for editing.
This will normalize text for editing based on the rich_text flag and
the user settings.
If the text is not in Markdown and the user edits in Markdown by default,
this will return the text escaped for edit. Otherwise, the text is
returned as-is.
"""
if text is None:
return ''
if not rich_text and is_rich_text_default_for_user(user):
# This isn't rich text, but it's going to be edited as rich text,
# so escape it.
text = djblets_markdown.markdown_escape(text)
if escape_html:
text = escape(text)
return text
def markdown_render_conditional(text, rich_text):
"""Return the escaped HTML content based on the rich_text flag."""
if rich_text:
return render_markdown(text)
else:
return escape(text)
def is_rich_text_default_for_user(user):
"""Returns whether the user edits in Markdown by default."""
if user.is_authenticated():
try:
return user.get_profile().should_use_rich_text
except ObjectDoesNotExist:
pass
siteconfig = SiteConfiguration.objects.get_current()
return siteconfig.get('default_use_rich_text')
def markdown_set_field_escaped(obj, field, escaped):
"""Escapes or unescapes the specified field in a model or dictionary."""
if escaped:
markdown_escape_field(obj, field)
else:
markdown_unescape_field(obj, field)
def iter_markdown_lines(markdown_html):
"""Iterates over lines of Markdown, normalizing for individual display.
Generated Markdown HTML cannot by itself be handled on a per-line-basis.
Code blocks, for example, will consist of multiple lines of content
contained within a <pre> tag. Likewise, lists will be a bunch of
<li> tags inside a <ul> tag, and individually do not form valid lists.
This function iterates through the Markdown tree and generates
self-contained lines of HTML that can be rendered individually.
This is deprecated. Please use djblets.markdown.iter_markdown_lines
instead.
"""
warnings.warn(
'reviewboard.reviews.markdown_utils.iter_markdown_lines is '
'deprecated. Please use djblets.markdown.iter_markdown_lines.',
DeprecationWarning)
return djblets_markdown.iter_markdown_lines(markdown_html)
def get_markdown_element_tree(markdown_html):
"""Returns an XML element tree for Markdown-generated HTML.
This will build the tree and return all nodes representing the rendered
Markdown content.
This is deprecated. Please use djblets.markdown.get_markdown_element_tree
instead.
"""
warnings.warn(
'reviewboard.reviews.markdown_utils.get_markdown_element_tree is '
'deprecated. Please use djblets.markdown.get_markdown_element_tree.',
DeprecationWarning)
return djblets_markdown.get_markdown_element_tree(markdown_html)
def sanitize_illegal_chars_for_xml(s):
"""Sanitize a string, removing characters illegal in XML.
This will remove a number of characters that would break the XML parser.
They may be in the string due to a copy/paste.
This code is courtesy of the XmlRpcPlugin developers, as documented
here: http://stackoverflow.com/a/22273639
This is deprecated. Please use
djblets.markdown.sanitize_illegal_chars_for_xml instead.
"""
warnings.warn(
'reviewboard.reviews.markdown_utils.sanitize_illegal_chars_for_xml '
'is deprecated. Please use '
'djblets.markdown.sanitize_illegal_chars_for_xml.',
DeprecationWarning)
return djblets_markdown.sanitize_illegal_chars_for_xml(s)
def render_markdown(text):
"""Renders Markdown text to HTML.
The Markdown text will be sanitized to prevent injecting custom HTML.
It will also enable a few plugins for code highlighting and sane lists.
"""
if isinstance(text, bytes):
text = text.decode('utf-8')
return markdown(text, **MARKDOWN_KWARGS)
def render_markdown_from_file(f):
"""Renders Markdown text to HTML.
The Markdown text will be sanitized to prevent injecting custom HTML.
It will also enable a few plugins for code highlighting and sane lists.
"""
return djblets_markdown.render_markdown_from_file(f, **MARKDOWN_KWARGS)
|
sololuz/cibb-web
|
refs/heads/master
|
app/core/utils/date.py
|
1
|
from datetime import datetime, timedelta
from django.utils.timezone import utc
def now():
""" returns the current date and time in UTC format (datetime object) """
return datetime.utcnow().replace(tzinfo=utc)
def now_after(**kwargs):
""" returns the current date and time plus the time (seconds, minutes, hours, days, years) specified """
return now() + timedelta(**kwargs)
def ago(**kwargs):
""" returns the current date and time minus the time (seconds, minutes, hours, days, years) specified """
return now() - timedelta(**kwargs)
def after(date, **kwargs):
"""
returns the result of the calculation of the date param plus the time (seconds, minutes, hours, days, years) specified
:paramm datetime: datetime object to which add more time
"""
return date + timedelta(**kwargs)
|
lmazuel/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-datafactory/azure/mgmt/datafactory/models/odbc_linked_service.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .linked_service import LinkedService
class OdbcLinkedService(LinkedService):
"""Open Database Connectivity (ODBC) linked service.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param connect_via: The integration runtime reference.
:type connect_via:
~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:param description: Linked service description.
:type description: str
:param parameters: Parameters for linked service.
:type parameters: dict[str,
~azure.mgmt.datafactory.models.ParameterSpecification]
:param annotations: List of tags that can be used for describing the
Dataset.
:type annotations: list[object]
:param type: Constant filled by server.
:type type: str
:param connection_string: The non-access credential portion of the
connection string as well as an optional encrypted credential.
:type connection_string: ~azure.mgmt.datafactory.models.SecretBase
:param authentication_type: Type of authentication used to connect to the
ODBC data store. Possible values are: Anonymous and Basic. Type: string
(or Expression with resultType string).
:type authentication_type: object
:param credential: The access credential portion of the connection string
specified in driver-specific property-value format.
:type credential: ~azure.mgmt.datafactory.models.SecretBase
:param user_name: User name for Basic authentication. Type: string (or
Expression with resultType string).
:type user_name: object
:param password: Password for Basic authentication.
:type password: ~azure.mgmt.datafactory.models.SecretBase
:param encrypted_credential: The encrypted credential used for
authentication. Credentials are encrypted using the integration runtime
credential manager. Type: string (or Expression with resultType string).
:type encrypted_credential: object
"""
_validation = {
'type': {'required': True},
'connection_string': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'connect_via': {'key': 'connectVia', 'type': 'IntegrationRuntimeReference'},
'description': {'key': 'description', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{ParameterSpecification}'},
'annotations': {'key': 'annotations', 'type': '[object]'},
'type': {'key': 'type', 'type': 'str'},
'connection_string': {'key': 'typeProperties.connectionString', 'type': 'SecretBase'},
'authentication_type': {'key': 'typeProperties.authenticationType', 'type': 'object'},
'credential': {'key': 'typeProperties.credential', 'type': 'SecretBase'},
'user_name': {'key': 'typeProperties.userName', 'type': 'object'},
'password': {'key': 'typeProperties.password', 'type': 'SecretBase'},
'encrypted_credential': {'key': 'typeProperties.encryptedCredential', 'type': 'object'},
}
def __init__(self, connection_string, additional_properties=None, connect_via=None, description=None, parameters=None, annotations=None, authentication_type=None, credential=None, user_name=None, password=None, encrypted_credential=None):
super(OdbcLinkedService, self).__init__(additional_properties=additional_properties, connect_via=connect_via, description=description, parameters=parameters, annotations=annotations)
self.connection_string = connection_string
self.authentication_type = authentication_type
self.credential = credential
self.user_name = user_name
self.password = password
self.encrypted_credential = encrypted_credential
self.type = 'Odbc'
|
gdgellatly/OCB1
|
refs/heads/7.0
|
openerp/workflow/instance.py
|
61
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import workitem
def create(cr, ident, wkf_id):
(uid,res_type,res_id) = ident
cr.execute('insert into wkf_instance (res_type,res_id,uid,wkf_id) values (%s,%s,%s,%s) RETURNING id', (res_type,res_id,uid,wkf_id))
id_new = cr.fetchone()[0]
cr.execute('select * from wkf_activity where flow_start=True and wkf_id=%s', (wkf_id,))
res = cr.dictfetchall()
stack = []
workitem.create(cr, res, id_new, ident, stack=stack)
update(cr, id_new, ident)
return id_new
def delete(cr, ident):
(uid,res_type,res_id) = ident
cr.execute('delete from wkf_instance where res_id=%s and res_type=%s', (res_id,res_type))
def validate(cr, inst_id, ident, signal, force_running=False):
cr.execute("select * from wkf_workitem where inst_id=%s", (inst_id,))
stack = []
for witem in cr.dictfetchall():
stack = []
workitem.process(cr, witem, ident, signal, force_running, stack=stack)
# An action is returned
_update_end(cr, inst_id, ident)
return stack and stack[0] or False
def update(cr, inst_id, ident):
cr.execute("select * from wkf_workitem where inst_id=%s", (inst_id,))
for witem in cr.dictfetchall():
stack = []
workitem.process(cr, witem, ident, stack=stack)
return _update_end(cr, inst_id, ident)
def _update_end(cr, inst_id, ident):
cr.execute('select wkf_id from wkf_instance where id=%s', (inst_id,))
wkf_id = cr.fetchone()[0]
cr.execute('select state,flow_stop from wkf_workitem w left join wkf_activity a on (a.id=w.act_id) where w.inst_id=%s', (inst_id,))
ok=True
for r in cr.fetchall():
if (r[0]<>'complete') or not r[1]:
ok=False
break
if ok:
cr.execute('select distinct a.name from wkf_activity a left join wkf_workitem w on (a.id=w.act_id) where w.inst_id=%s', (inst_id,))
act_names = cr.fetchall()
cr.execute("update wkf_instance set state='complete' where id=%s", (inst_id,))
cr.execute("update wkf_workitem set state='complete' where subflow_id=%s", (inst_id,))
cr.execute("select i.id,w.osv,i.res_id from wkf_instance i left join wkf w on (i.wkf_id=w.id) where i.id IN (select inst_id from wkf_workitem where subflow_id=%s)", (inst_id,))
for i in cr.fetchall():
for act_name in act_names:
validate(cr, i[0], (ident[0],i[1],i[2]), 'subflow.'+act_name[0])
return ok
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
shodoco/bcc
|
refs/heads/master
|
examples/tracing/strlen_hist.py
|
4
|
#!/usr/bin/python
#
# strlen_hist.py Histogram of system-wide strlen return values
#
# A basic example of using uprobes along with a histogram to show
# distributions.
#
# Runs until ctrl-c is pressed.
#
# Copyright (c) PLUMgrid, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# Example output:
# $ sudo ./strlen_hist.py
# 22:12:52
# strlen return: : count distribution
# 0 -> 1 : 2106 |**************** |
# 2 -> 3 : 1172 |********* |
# 4 -> 7 : 3892 |****************************** |
# 8 -> 15 : 5096 |****************************************|
# 16 -> 31 : 2201 |***************** |
# 32 -> 63 : 547 |**** |
# 64 -> 127 : 106 | |
# 128 -> 255 : 13 | |
# 256 -> 511 : 27 | |
# 512 -> 1023 : 6 | |
# 1024 -> 2047 : 10 | |
# ^C$
#
from __future__ import print_function
import bcc
import time
text = """
#include <uapi/linux/ptrace.h>
BPF_HISTOGRAM(dist);
int count(struct pt_regs *ctx) {
dist.increment(bpf_log2l(PT_REGS_RC(ctx)));
return 0;
}
"""
b = bcc.BPF(text=text)
sym="strlen"
b.attach_uretprobe(name="c", sym=sym, fn_name="count")
dist = b["dist"]
try:
while True:
time.sleep(1)
print("%-8s\n" % time.strftime("%H:%M:%S"), end="")
dist.print_log2_hist(sym + " return:")
dist.clear()
except KeyboardInterrupt:
pass
|
eunchong/build
|
refs/heads/master
|
scripts/common/archive_utils_unittest.py
|
2
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import shutil
import sys
import tempfile
import unittest
import zipfile
BASE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', '..')
sys.path.append(os.path.join(BASE_DIR, 'scripts'))
sys.path.append(os.path.join(BASE_DIR, 'site_config'))
from common import archive_utils
DIR_LIST = ['foo',
os.path.join('fee', 'foo'),
os.path.join('fee', 'faa'),
os.path.join('fee', 'fie'),
os.path.join('foo', 'fee', 'faa')]
TEMP_FILES = ['foo.txt',
'bar.txt',
os.path.join('foo', 'buzz.txt'),
os.path.join('foo', 'bing'),
os.path.join('fee', 'foo', 'bar'),
os.path.join('fee', 'faa', 'bar'),
os.path.join('fee', 'fie', 'fo'),
os.path.join('foo', 'fee', 'faa', 'boo.txt')]
TEMP_FILES_WITH_WILDCARDS = ['foo.txt',
'bar.txt',
os.path.join('foo', '*'),
os.path.join('fee', '*', 'bar'),
os.path.join('fee', '*', 'fo'),
os.path.join('foo', 'fee', 'faa', 'boo.txt')]
# Sample FILES.cfg-style contents.
TEST_FILES_CFG = [
{
'filename': 'allany.txt',
'arch': ['32bit', '64bit', 'arm'],
'buildtype': ['dev', 'official'],
'filegroup': ['default', 'allany'],
},
{
'filename': 'allany2.txt',
'buildtype': ['dev', 'official'],
'filegroup': ['default', 'allany'],
},
{
'filename': 'subdirectory/allany.txt',
'arch': ['32bit', '64bit'],
'buildtype': ['dev', 'official'],
'filegroup': ['default', 'allany'],
},
{
'filename': 'official64.txt',
'arch': ['64bit'],
'buildtype': ['official'],
},
{
'filename': 'dev32.txt',
'arch': ['32bit'],
'buildtype': ['dev'],
},
{
'filename': 'archive_allany.txt',
'arch': ['32bit', '64bit'],
'buildtype': ['dev', 'official'],
'archive': 'static_archive.zip',
'filegroup': ['default', 'allany'],
},
{
'filename': 'subdirectory/archive_allany.txt',
'arch': ['32bit', '64bit'],
'buildtype': ['dev', 'official'],
'archive': 'static_archive.zip',
'filegroup': ['default', 'allany'],
},
{
'filename': 'subdirectory/archive_dev32.txt',
'arch': ['32bit'],
'buildtype': ['dev'],
'archive': 'static_archive.zip',
},
{
'filename': 'allany_dev_optional.txt',
'arch': ['32bit', '64bit'],
'buildtype': ['dev', 'official'],
'optional': ['dev'],
'filegroup': ['default', 'allany'],
},
{
'filename': 'dev64_direct_archive.txt',
'arch': ['64bit'],
'buildtype': ['dev'],
'archive': 'renamed_direct_archive.txt',
'direct_archive': 1,
},
{
'filename': 'dev64_implied_direct_archive.txt',
'arch': ['64bit'],
'buildtype': ['dev'],
'archive': 'dev64_implied_direct_archive.txt',
},
]
def CreateTestFilesCfg(path):
files_cfg = os.path.join(path, archive_utils.FILES_FILENAME)
f = open(files_cfg, 'w')
f.write('FILES = %s' % str(TEST_FILES_CFG))
f.close()
return files_cfg
def CreateFileSetInDir(out_dir, file_list):
for f in file_list:
dir_part = os.path.dirname(f)
if dir_part:
dir_path = os.path.join(out_dir, dir_part)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
temp_file = open(os.path.join(out_dir, f), 'w')
temp_file.write('contents')
temp_file.close()
def BuildTestFilesTree(test_path):
for temp_file in TEMP_FILES:
temp_path = os.path.join(test_path, temp_file)
dir_name = os.path.dirname(temp_path)
if not os.path.exists(temp_path):
relative_dir_name = os.path.dirname(temp_file)
if relative_dir_name and not os.path.exists(dir_name):
os.makedirs(dir_name)
open(temp_path, 'a')
def FetchSvn(cfg_path, svn=None):
if not svn:
svn = pysvn.Client()
f, files_cfg = tempfile.mkstemp()
os.write(f, svn.cat(cfg_path))
os.close(f)
return files_cfg
def DiffFilesCfg(cfg_path, svn):
"""Parse local FILES.cfg and show changes so they can be manually verified."""
print '\nDiff parsing "%s" ...' % cfg_path
d = difflib.Differ()
def CompareLists(svnlist, newlist, msg):
diffs = []
for x in d.compare(svnlist, newlist):
if x.startswith('- '):
diffs.append(' DELETION: %s' % x[2:])
elif x.startswith('+ '):
diffs.append(' ADDITION: %s' % x[2:])
if diffs:
print msg
print '\n'.join(diffs)
svn_cfg = FetchSvn(RealFilesCfgTest.SVNBASE + cfg_path, svn)
svnparser = archive_utils.FilesCfgParser(svn_cfg, None, None)
os.unlink(svn_cfg)
newparser = archive_utils.FilesCfgParser(options.src_base + cfg_path, None,
None)
# Determine the "parsable values" in the two versions.
archs = []
buildtypes = []
groups = []
# pylint: disable=W0212
for item in newparser._files_cfg + svnparser._files_cfg:
# pylint: enable=W0212
if item.get('arch'):
archs.extend(item['arch'])
if item.get('buildtype'):
buildtypes.extend(item['buildtype'])
if item.get('filegroup'):
groups.extend(item['filegroup'])
archs = set(archs)
buildtypes = set(buildtypes)
groups = set(groups)
# Legacy list handling (i.e. default filegroup).
print '\nChecking ParseLegacyList() ...'
for arch, buildtype in itertools.product(archs, buildtypes):
msg = '%s:%s' % (arch, buildtype)
newparser.arch = svnparser.arch = arch
newparser.buildtype = svnparser.buildtype = buildtype
svn_legacy_list = svnparser.ParseLegacyList()
new_legacy_list = newparser.ParseLegacyList()
CompareLists(svn_legacy_list, new_legacy_list, msg)
print '\nChecking ParseGroup() ...'
for group, arch, buildtype in itertools.product(groups, archs, buildtypes):
msg = '%s:%s:%s' % (group, arch, buildtype)
newparser.arch = svnparser.arch = arch
newparser.buildtype = svnparser.buildtype = buildtype
svn_group_list = svnparser.ParseGroup(group)
new_group_list = newparser.ParseGroup(group)
CompareLists(svn_group_list, new_group_list, msg)
print '\nChecking Archives() ...'
for arch, buildtype in itertools.product(archs, buildtypes):
newparser.arch = svnparser.arch = arch
newparser.buildtype = svnparser.buildtype = buildtype
svn_archive_lists = svnparser.ParseArchiveLists()
new_archive_lists = newparser.ParseArchiveLists()
archives = set(svn_archive_lists.keys() + new_archive_lists.keys())
for archive in archives:
msg = '%s:%s:%s' % (archive, arch, buildtype)
CompareLists([x['filename'] for x in svn_archive_lists.get(archive, [])],
[x['filename'] for x in new_archive_lists.get(archive, [])],
msg)
class ArchiveUtilsTest(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.src_dir = os.path.join(self.temp_dir, 'src')
self.build_dir = os.path.join(self.temp_dir, 'build')
self.tool_dir = os.path.join(self.src_dir, 'tools')
os.makedirs(self.src_dir)
os.makedirs(self.build_dir)
os.makedirs(self.tool_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
# copied from python2.7 version of unittest
# TODO(sbc): remove once python2.7 is required.
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (repr(member),
repr(container))
self.fail(self._formatMessage(msg, standardMsg))
# copied from python2.7 version of unittest
# TODO(sbc): remove once python2.7 is required.
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default
message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (repr(member),
repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def verifyZipFile(self, zip_dir, zip_file_path, archive_name, expected_files):
# Extract the files from the archive
extract_dir = os.path.join(zip_dir, 'extract')
os.makedirs(extract_dir)
zip_file = zipfile.ZipFile(zip_file_path)
# The extractall method is supported from V2.6
if hasattr(zip_file, 'extractall'):
zip_file.extractall(extract_dir) # pylint: disable=E1101
# Check that all expected files are there
def FindFiles(arg, dirname, names):
subdir = dirname[len(arg):].strip(os.path.sep)
extracted_files.extend([os.path.join(subdir, name) for name in names if
os.path.isfile(os.path.join(dirname, name))])
extracted_files = []
archive_path = os.path.join(extract_dir, archive_name)
os.path.walk(archive_path, FindFiles, archive_path)
self.assertEquals(len(expected_files), len(extracted_files))
for f in extracted_files:
self.assertIn(f, expected_files)
else:
test_result = zip_file.testzip()
self.assertTrue(not test_result)
zip_file.close()
def testParseFilesList(self):
files_cfg = CreateTestFilesCfg(self.temp_dir)
arch = '64bit'
buildtype = 'official'
files_list = archive_utils.ParseFilesList(files_cfg, buildtype, arch)
# Verify FILES.cfg was parsed correctly.
for i in TEST_FILES_CFG:
if buildtype not in i['buildtype']:
continue
if i.get('arch') and arch not in i['arch']:
continue
# 'archive' flagged files shouldn't be included in the default parse.
if i.get('archive'):
self.assertNotIn(i['filename'], files_list)
else:
self.assertIn(i['filename'], files_list)
files_list.remove(i['filename'])
# No duplicate files.
self.assertEqual(files_list.count(i['filename']), 0)
# No unexpected files.
self.assertEqual(len(files_list), 0)
def testParseLegacyList(self):
files_cfg = CreateTestFilesCfg(self.temp_dir)
arch = '64bit'
buildtype = 'official'
fparser = archive_utils.FilesCfgParser(files_cfg, buildtype, arch)
files_list = fparser.ParseLegacyList()
# Verify FILES.cfg was parsed correctly.
for i in TEST_FILES_CFG:
if buildtype not in i['buildtype']:
continue
if i.get('arch') and arch not in i['arch']:
continue
# 'archive' flagged files shouldn't be included in the default parse.
if i.get('archive'):
self.assertNotIn(i['filename'], files_list)
else:
self.assertIn(i['filename'], files_list)
files_list.remove(i['filename'])
# No duplicate files.
self.assertEqual(files_list.count(i['filename']), 0)
# No unexpected files.
self.assertEqual(len(files_list), 0)
def testParseArchiveLists(self):
ARCHIVENAME = 'static_archive.zip'
files_cfg = CreateTestFilesCfg(self.temp_dir)
arch = '64bit'
buildtype = 'official'
fparser = archive_utils.FilesCfgParser(files_cfg, buildtype, arch)
archives = fparser.ParseArchiveLists()
self.assertEqual(archives.keys(), [ARCHIVENAME])
self.assertEqual([x['filename'] for x in archives[ARCHIVENAME]],
['archive_allany.txt', 'subdirectory/archive_allany.txt'])
# 32bit dev has additional files under the same archive name.
arch = '32bit'
buildtype = 'dev'
fparser = archive_utils.FilesCfgParser(files_cfg, buildtype, arch)
archives = fparser.ParseArchiveLists()
self.assertEqual(archives.keys(), [ARCHIVENAME])
self.assertEqual([x['filename'] for x in archives[ARCHIVENAME]],
['archive_allany.txt', 'subdirectory/archive_allany.txt',
'subdirectory/archive_dev32.txt'])
def testOptionalFiles(self):
files_cfg = CreateTestFilesCfg(self.temp_dir)
optional_fn = 'allany_dev_optional.txt'
arch = '64bit'
buildtype = 'dev'
fparser = archive_utils.FilesCfgParser(files_cfg, buildtype, arch)
self.assertTrue(fparser.IsOptional(optional_fn))
# It's only optional for 'dev' builds.
buildtype = 'official'
fparser = archive_utils.FilesCfgParser(files_cfg, buildtype, arch)
self.assertFalse(fparser.IsOptional(optional_fn))
def testDirectArchive(self):
files_cfg = CreateTestFilesCfg(self.temp_dir)
arch = '64bit'
buildtype = 'dev'
fparser = archive_utils.FilesCfgParser(files_cfg, buildtype, arch)
archives = fparser.ParseArchiveLists()
self.assertTrue(fparser.IsDirectArchive(
archives['renamed_direct_archive.txt']))
self.assertTrue(fparser.IsDirectArchive(
archives['dev64_implied_direct_archive.txt']))
self.assertFalse(fparser.IsDirectArchive(archives['static_archive.zip']))
def testParserChange(self):
"""Changing parser criteria should be the same as creating a new one."""
files_cfg = CreateTestFilesCfg(self.temp_dir)
arch = '64bit'
buildtype = 'dev'
oldfparser = archive_utils.FilesCfgParser(files_cfg, buildtype, arch)
old_dev_list = oldfparser.ParseLegacyList()
buildtype = 'official'
oldfparser.buildtype = buildtype
old_official_list = oldfparser.ParseLegacyList()
# The changed parser should return different ParseLegacyList.
self.assertNotEqual(sorted(old_dev_list), sorted(old_official_list))
newfparser = archive_utils.FilesCfgParser(files_cfg, buildtype, arch)
new_official_list = newfparser.ParseLegacyList()
# The new parser and changed parser should return the same data.
self.assertEqual(sorted(old_official_list), sorted(new_official_list))
old_allany_list = oldfparser.ParseGroup('allany')
new_allany_list = oldfparser.ParseGroup('allany')
self.assertEqual(sorted(old_allany_list), sorted(new_allany_list))
def testExtractDirsFromPaths(self):
path_list = TEMP_FILES[:]
expected_dir_list = DIR_LIST[:]
expected_dir_list.sort()
dir_list = archive_utils.ExtractDirsFromPaths(path_list)
dir_list.sort()
self.assertEquals(expected_dir_list, dir_list)
def testExpandWildcards(self):
path_list = TEMP_FILES_WITH_WILDCARDS[:]
expected_path_list = TEMP_FILES[:]
expected_path_list.sort()
BuildTestFilesTree(self.temp_dir)
expanded_path_list = archive_utils.ExpandWildcards(self.temp_dir, path_list)
expanded_path_list.sort()
self.assertEquals(expected_path_list, expanded_path_list)
def testCreateArchive(self):
files_cfg = CreateTestFilesCfg(self.tool_dir)
CreateFileSetInDir(self.build_dir, [i['filename'] for i in TEST_FILES_CFG])
archive_name = 'test'
arch = '64bit'
buildtype = 'official'
fparser = archive_utils.FilesCfgParser(files_cfg, buildtype, arch)
files_list = fparser.ParseLegacyList()
zip_dir, zip_file_path = archive_utils.CreateArchive(
self.build_dir, self.temp_dir, files_list, archive_name)
self.assertTrue(zip_dir)
self.assertTrue(zip_file_path)
self.assertTrue(os.path.exists(zip_file_path))
self.assertEqual(os.path.basename(zip_file_path), archive_name)
self.verifyZipFile(zip_dir, zip_file_path, os.path.basename(zip_dir),
files_list)
# Creating the archive twice is wasteful, but shouldn't fail (e.g. due to
# conflicts with existing zip_dir or zip_file_path). This also tests the
# condition on the bots where they don't clean up their staging directory
# between runs.
zip_dir, zip_file_path = archive_utils.CreateArchive(
self.build_dir, self.temp_dir, files_list, archive_name)
self.assertTrue(zip_dir)
self.assertTrue(zip_file_path)
self.assertTrue(os.path.exists(zip_file_path))
self.verifyZipFile(zip_dir, zip_file_path, os.path.basename(zip_dir),
files_list)
def testCreateZipExtArchive(self):
files_cfg = CreateTestFilesCfg(self.tool_dir)
CreateFileSetInDir(self.build_dir, [i['filename'] for i in TEST_FILES_CFG])
archive_name = 'test_with_ext.zip'
arch = '64bit'
buildtype = 'official'
fparser = archive_utils.FilesCfgParser(files_cfg, buildtype, arch)
files_list = fparser.ParseLegacyList()
zip_dir, zip_file_path = archive_utils.CreateArchive(
self.build_dir, self.temp_dir, files_list, archive_name)
self.assertTrue(zip_dir)
self.assertTrue(zip_file_path)
self.assertTrue(os.path.exists(zip_file_path))
self.assertEqual(os.path.basename(zip_file_path), archive_name)
self.verifyZipFile(zip_dir, zip_file_path, os.path.basename(zip_dir),
files_list)
# Creating the archive twice is wasteful, but shouldn't fail (e.g. due to
# conflicts with existing zip_dir or zip_file_path). This also tests the
# condition on the bots where they don't clean up their staging directory
# between runs.
zip_dir, zip_file_path = archive_utils.CreateArchive(
self.build_dir, self.temp_dir, files_list, archive_name)
self.assertTrue(zip_dir)
self.assertTrue(zip_file_path)
self.assertTrue(os.path.exists(zip_file_path))
self.verifyZipFile(zip_dir, zip_file_path, os.path.basename(zip_dir),
files_list)
def testCreateEmptyArchive(self):
files_cfg = CreateTestFilesCfg(self.tool_dir)
archive_name = 'test_empty'
arch = '64bit'
buildtype = 'nosuchtype'
fparser = archive_utils.FilesCfgParser(files_cfg, buildtype, arch)
files_list = fparser.ParseLegacyList()
zip_dir, zip_file_path = archive_utils.CreateArchive(
self.build_dir, self.temp_dir, files_list, archive_name)
self.assertFalse(zip_dir)
self.assertFalse(zip_file_path)
self.assertFalse(os.path.exists(zip_file_path))
class RealFilesCfgTest(unittest.TestCase):
"""Basic sanity checks for the real FILES.cfg files."""
SVNBASE = 'svn://svn.chromium.org/chrome/trunk/src'
WIN_PATH = '/chrome/tools/build/win/FILES.cfg'
LINUX_PATH = '/chrome/tools/build/linux/FILES.cfg'
MAC_PATH = '/chrome/tools/build/mac/FILES.cfg'
CROS_PATH = '/chrome/tools/build/chromeos/FILES.cfg'
def setUp(self):
self.files_cfg = None
self.svn = pysvn.Client()
def tearDown(self):
if self.files_cfg:
os.unlink(self.files_cfg)
def ParseFilesCfg(self, cfg_path):
if cfg_path.startswith('svn://'):
# Store the svn file so it will be automatically cleaned up in tearDown().
self.files_cfg = FetchSvn(cfg_path, self.svn)
cfg_path = self.files_cfg
# There should always be some 32bit, official and dev files (otherwise
# there's nothing to archive).
arch = '32bit'
buildtype = 'official'
fparser = archive_utils.FilesCfgParser(cfg_path, buildtype, arch)
files_list = fparser.ParseLegacyList()
self.assertTrue(files_list)
fparser.buildtype = 'dev'
files_list = fparser.ParseLegacyList()
self.assertTrue(files_list)
# Arbitrary buildtype shouldn't return anything.
fparser.buildtype = 'bogus'
files_list = fparser.ParseLegacyList()
self.assertFalse(files_list)
# Check for incomplete/incorrect settings.
# buildtype must exist and be in ['dev', 'official']
self.assertFalse([f for f in fparser._files_cfg # pylint: disable=W0212
if not f['buildtype']
or set(f['buildtype']) - set(['dev', 'official'])])
def testWinParse(self):
self.ParseFilesCfg(options.src_base + RealFilesCfgTest.WIN_PATH)
def testWinParseSymbols(self):
files_cfg = options.src_base + RealFilesCfgTest.WIN_PATH
# There should be some official build symbols.
fparser = archive_utils.FilesCfgParser(files_cfg, 'official', '32bit')
official_list = fparser.ParseGroup('symsrc')
self.assertTrue(official_list)
# Windows symbols should be the same regardless of arch.
fparser = archive_utils.FilesCfgParser(files_cfg, 'official', '64bit')
official64_list = fparser.ParseGroup('symsrc')
self.assertEqual(official64_list, official_list)
def testMacParse(self):
self.ParseFilesCfg(options.src_base + RealFilesCfgTest.MAC_PATH)
def testLinuxParse(self):
self.ParseFilesCfg(options.src_base + RealFilesCfgTest.LINUX_PATH)
def testChromeosParse(self):
self.ParseFilesCfg(options.src_base + RealFilesCfgTest.CROS_PATH)
if __name__ == '__main__':
option_parser = optparse.OptionParser()
option_parser.add_option('--realfiles', action='store_true',
help='Also run tests on FILES.cfg files from chromium sources.')
option_parser.add_option('--realfiles-only', action='store_true',
help='Only run tests on FILES.cfg files from chromium sources.')
option_parser.add_option('--src-base', default=RealFilesCfgTest.SVNBASE,
help='Base file or svn path to the chromium sources.')
option_parser.add_option('--diffparse', action='store_true',
help='Compare parsing local FILES.cfg and latest SVN version. '
'(Requires a --realfiles* and --src-base flag.) '
'Use this to make sure any changes in file groupings, archive '
'contents, etc. are intentional.')
options, unused_args = option_parser.parse_args()
errors = False
if not options.realfiles_only:
suite = unittest.TestLoader().loadTestsFromTestCase(ArchiveUtilsTest)
# Run with a bit more output.
result = unittest.TextTestRunner(verbosity=2).run(suite)
if not errors:
errors = not result.wasSuccessful()
# These tests are a little slow due to the svn download, so only run them if
# explicitly requested.
if options.realfiles or options.realfiles_only:
import pysvn # pylint: disable=F0401
suite = unittest.TestLoader().loadTestsFromTestCase(RealFilesCfgTest)
result = unittest.TextTestRunner(verbosity=2).run(suite)
if not errors:
errors = not result.wasSuccessful()
if options.diffparse:
import difflib # pylint: disable=F0401
import itertools # pylint: disable=F0401
if options.src_base == RealFilesCfgTest.SVNBASE:
print ('WARNING: --diffparse requires --src-base set to your local src '
'path. Skipping because nothing to compare.')
else:
# Turn off stdout buffering to allow progress messages during slow svn.
sys.stdout.flush()
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
svn_client = pysvn.Client()
DiffFilesCfg(RealFilesCfgTest.WIN_PATH, svn_client)
DiffFilesCfg(RealFilesCfgTest.LINUX_PATH, svn_client)
DiffFilesCfg(RealFilesCfgTest.MAC_PATH, svn_client)
DiffFilesCfg(RealFilesCfgTest.CROS_PATH, svn_client)
# Specify error return so caller (e.g. shell script) can easily detect
# failures.
sys.exit(errors)
|
gioman/QGIS
|
refs/heads/master
|
python/plugins/processing/algs/qgis/ExtentFromLayer.py
|
1
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ExtentFromLayer.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtCore import QVariant
from qgis.core import (QgsField,
QgsPoint,
QgsGeometry,
QgsFeature,
QgsWkbTypes,
QgsProcessingUtils,
QgsFields)
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterBoolean
from processing.core.outputs import OutputVector
from processing.tools import dataobjects
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class ExtentFromLayer(GeoAlgorithm):
INPUT_LAYER = 'INPUT_LAYER'
BY_FEATURE = 'BY_FEATURE'
OUTPUT = 'OUTPUT'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'ftools', 'layer_extent.png'))
def tags(self):
return self.tr('extent,envelope,bounds,bounding,boundary,layer').split(',')
def group(self):
return self.tr('Vector general tools')
def name(self):
return 'polygonfromlayerextent'
def displayName(self):
return self.tr('Polygon from layer extent')
def defineCharacteristics(self):
self.addParameter(ParameterVector(self.INPUT_LAYER,
self.tr('Input layer')))
self.addParameter(ParameterBoolean(self.BY_FEATURE,
self.tr('Calculate extent for each feature separately'), False))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Extent'), datatype=[dataobjects.TYPE_VECTOR_POLYGON]))
def processAlgorithm(self, context, feedback):
layer = QgsProcessingUtils.mapLayerFromString(self.getParameterValue(self.INPUT_LAYER), context)
byFeature = self.getParameterValue(self.BY_FEATURE)
fields = QgsFields()
fields.append(QgsField('MINX', QVariant.Double))
fields.append(QgsField('MINY', QVariant.Double))
fields.append(QgsField('MAXX', QVariant.Double))
fields.append(QgsField('MAXY', QVariant.Double))
fields.append(QgsField('CNTX', QVariant.Double))
fields.append(QgsField('CNTY', QVariant.Double))
fields.append(QgsField('AREA', QVariant.Double))
fields.append(QgsField('PERIM', QVariant.Double))
fields.append(QgsField('HEIGHT', QVariant.Double))
fields.append(QgsField('WIDTH', QVariant.Double))
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(fields, QgsWkbTypes.Polygon, layer.crs(), context)
if byFeature:
self.featureExtent(layer, context, writer, feedback)
else:
self.layerExtent(layer, writer, feedback)
del writer
def layerExtent(self, layer, writer, feedback):
rect = layer.extent()
minx = rect.xMinimum()
miny = rect.yMinimum()
maxx = rect.xMaximum()
maxy = rect.yMaximum()
height = rect.height()
width = rect.width()
cntx = minx + width / 2.0
cnty = miny + height / 2.0
area = width * height
perim = 2 * width + 2 * height
rect = [QgsPoint(minx, miny), QgsPoint(minx, maxy), QgsPoint(maxx,
maxy), QgsPoint(maxx, miny), QgsPoint(minx, miny)]
geometry = QgsGeometry().fromPolygon([rect])
feat = QgsFeature()
feat.setGeometry(geometry)
attrs = [
minx,
miny,
maxx,
maxy,
cntx,
cnty,
area,
perim,
height,
width,
]
feat.setAttributes(attrs)
writer.addFeature(feat)
def featureExtent(self, layer, context, writer, feedback):
features = QgsProcessingUtils.getFeatures(layer, context)
total = 100.0 / QgsProcessingUtils.featureCount(layer, context)
feat = QgsFeature()
for current, f in enumerate(features):
rect = f.geometry().boundingBox()
minx = rect.xMinimum()
miny = rect.yMinimum()
maxx = rect.xMaximum()
maxy = rect.yMaximum()
height = rect.height()
width = rect.width()
cntx = minx + width / 2.0
cnty = miny + height / 2.0
area = width * height
perim = 2 * width + 2 * height
rect = [QgsPoint(minx, miny), QgsPoint(minx, maxy), QgsPoint(maxx,
maxy), QgsPoint(maxx, miny), QgsPoint(minx, miny)]
geometry = QgsGeometry().fromPolygon([rect])
feat.setGeometry(geometry)
attrs = [
minx,
miny,
maxx,
maxy,
cntx,
cnty,
area,
perim,
height,
width,
]
feat.setAttributes(attrs)
writer.addFeature(feat)
feedback.setProgress(int(current * total))
|
drakuna/odoo
|
refs/heads/master
|
addons/purchase/migrations/9.0.1.2/pre-create-properties.py
|
101
|
# -*- coding: utf-8 -*-
def convert_field(cr, model, field, target_model):
table = model.replace('.', '_')
cr.execute("""SELECT 1
FROM information_schema.columns
WHERE table_name = %s
AND column_name = %s
""", (table, field))
if not cr.fetchone():
return
cr.execute("SELECT id FROM ir_model_fields WHERE model=%s AND name=%s", (model, field))
[fields_id] = cr.fetchone()
cr.execute("""
INSERT INTO ir_property(name, type, fields_id, company_id, res_id, value_reference)
SELECT %(field)s, 'many2one', %(fields_id)s, company_id, CONCAT('{model},', id),
CONCAT('{target_model},', {field})
FROM {table} t
WHERE {field} IS NOT NULL
AND NOT EXISTS(SELECT 1
FROM ir_property
WHERE fields_id=%(fields_id)s
AND company_id=t.company_id
AND res_id=CONCAT('{model},', t.id))
""".format(**locals()), locals())
cr.execute('ALTER TABLE "{0}" DROP COLUMN "{1}" CASCADE'.format(table, field))
def migrate(cr, version):
convert_field(cr, 'res.partner', 'property_purchase_currency_id', 'res.currency')
convert_field(cr, 'product.template',
'property_account_creditor_price_difference', 'account.account')
|
xq262144/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/django/contrib/gis/tests/geo3d/tests.py
|
109
|
from __future__ import absolute_import, unicode_literals
import os
import re
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.tests.utils import postgis
from django.test import TestCase
from django.utils._os import upath
from django.utils.unittest import skipUnless
if HAS_GEOS:
from django.contrib.gis.db.models import Union, Extent3D
from django.contrib.gis.geos import GEOSGeometry, LineString, Point, Polygon
from .models import (City3D, Interstate2D, Interstate3D, InterstateProj2D,
InterstateProj3D, Point2D, Point3D, MultiPoint3D, Polygon2D, Polygon3D)
if HAS_GDAL:
from django.contrib.gis.utils import LayerMapping, LayerMapError
data_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), '..', 'data'))
city_file = os.path.join(data_path, 'cities', 'cities.shp')
vrt_file = os.path.join(data_path, 'test_vrt', 'test_vrt.vrt')
# The coordinates of each city, with Z values corresponding to their
# altitude in meters.
city_data = (
('Houston', (-95.363151, 29.763374, 18)),
('Dallas', (-96.801611, 32.782057, 147)),
('Oklahoma City', (-97.521157, 34.464642, 380)),
('Wellington', (174.783117, -41.315268, 14)),
('Pueblo', (-104.609252, 38.255001, 1433)),
('Lawrence', (-95.235060, 38.971823, 251)),
('Chicago', (-87.650175, 41.850385, 181)),
('Victoria', (-123.305196, 48.462611, 15)),
)
# Reference mapping of city name to its altitude (Z value).
city_dict = dict((name, coords) for name, coords in city_data)
# 3D freeway data derived from the National Elevation Dataset:
# http://seamless.usgs.gov/products/9arc.php
interstate_data = (
('I-45',
'LINESTRING(-95.3708481 29.7765870 11.339,-95.3694580 29.7787980 4.536,-95.3690305 29.7797359 9.762,-95.3691886 29.7812450 12.448,-95.3696447 29.7850144 10.457,-95.3702511 29.7868518 9.418,-95.3706724 29.7881286 14.858,-95.3711632 29.7896157 15.386,-95.3714525 29.7936267 13.168,-95.3717848 29.7955007 15.104,-95.3717719 29.7969804 16.516,-95.3717305 29.7982117 13.923,-95.3717254 29.8000778 14.385,-95.3719875 29.8013539 15.160,-95.3720575 29.8026785 15.544,-95.3721321 29.8040912 14.975,-95.3722074 29.8050998 15.688,-95.3722779 29.8060430 16.099,-95.3733818 29.8076750 15.197,-95.3741563 29.8103686 17.268,-95.3749458 29.8129927 19.857,-95.3763564 29.8144557 15.435)',
( 11.339, 4.536, 9.762, 12.448, 10.457, 9.418, 14.858,
15.386, 13.168, 15.104, 16.516, 13.923, 14.385, 15.16 ,
15.544, 14.975, 15.688, 16.099, 15.197, 17.268, 19.857,
15.435),
),
)
# Bounding box polygon for inner-loop of Houston (in projected coordinate
# system 32140), with elevation values from the National Elevation Dataset
# (see above).
bbox_data = (
'POLYGON((941527.97 4225693.20,962596.48 4226349.75,963152.57 4209023.95,942051.75 4208366.38,941527.97 4225693.20))',
(21.71, 13.21, 9.12, 16.40, 21.71)
)
@skipUnless(HAS_GEOS and HAS_GDAL and postgis, "Geos, GDAL and postgis are required.")
class Geo3DTest(TestCase):
"""
Only a subset of the PostGIS routines are 3D-enabled, and this TestCase
tries to test the features that can handle 3D and that are also
available within GeoDjango. For more information, see the PostGIS docs
on the routines that support 3D:
http://postgis.refractions.net/documentation/manual-1.4/ch08.html#PostGIS_3D_Functions
"""
def _load_interstate_data(self):
# Interstate (2D / 3D and Geographic/Projected variants)
for name, line, exp_z in interstate_data:
line_3d = GEOSGeometry(line, srid=4269)
line_2d = LineString([l[:2] for l in line_3d.coords], srid=4269)
# Creating a geographic and projected version of the
# interstate in both 2D and 3D.
Interstate3D.objects.create(name=name, line=line_3d)
InterstateProj3D.objects.create(name=name, line=line_3d)
Interstate2D.objects.create(name=name, line=line_2d)
InterstateProj2D.objects.create(name=name, line=line_2d)
def _load_city_data(self):
for name, pnt_data in city_data:
City3D.objects.create(name=name, point=Point(*pnt_data, srid=4326))
def _load_polygon_data(self):
bbox_wkt, bbox_z = bbox_data
bbox_2d = GEOSGeometry(bbox_wkt, srid=32140)
bbox_3d = Polygon(tuple((x, y, z) for (x, y), z in zip(bbox_2d[0].coords, bbox_z)), srid=32140)
Polygon2D.objects.create(name='2D BBox', poly=bbox_2d)
Polygon3D.objects.create(name='3D BBox', poly=bbox_3d)
def test_3d_hasz(self):
"""
Make sure data is 3D and has expected Z values -- shouldn't change
because of coordinate system.
"""
self._load_interstate_data()
for name, line, exp_z in interstate_data:
interstate = Interstate3D.objects.get(name=name)
interstate_proj = InterstateProj3D.objects.get(name=name)
for i in [interstate, interstate_proj]:
self.assertTrue(i.line.hasz)
self.assertEqual(exp_z, tuple(i.line.z))
self._load_city_data()
for name, pnt_data in city_data:
city = City3D.objects.get(name=name)
z = pnt_data[2]
self.assertTrue(city.point.hasz)
self.assertEqual(z, city.point.z)
def test_3d_polygons(self):
"""
Test the creation of polygon 3D models.
"""
self._load_polygon_data()
p3d = Polygon3D.objects.get(name='3D BBox')
self.assertTrue(p3d.poly.hasz)
self.assertIsInstance(p3d.poly, Polygon)
self.assertEqual(p3d.poly.srid, 32140)
def test_3d_layermapping(self):
"""
Testing LayerMapping on 3D models.
"""
point_mapping = {'point' : 'POINT'}
mpoint_mapping = {'mpoint' : 'MULTIPOINT'}
# The VRT is 3D, but should still be able to map sans the Z.
lm = LayerMapping(Point2D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point2D.objects.count())
# The city shapefile is 2D, and won't be able to fill the coordinates
# in the 3D model -- thus, a LayerMapError is raised.
self.assertRaises(LayerMapError, LayerMapping,
Point3D, city_file, point_mapping, transform=False)
# 3D model should take 3D data just fine.
lm = LayerMapping(Point3D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point3D.objects.count())
# Making sure LayerMapping.make_multi works right, by converting
# a Point25D into a MultiPoint25D.
lm = LayerMapping(MultiPoint3D, vrt_file, mpoint_mapping, transform=False)
lm.save()
self.assertEqual(3, MultiPoint3D.objects.count())
def test_kml(self):
"""
Test GeoQuerySet.kml() with Z values.
"""
self._load_city_data()
h = City3D.objects.kml(precision=6).get(name='Houston')
# KML should be 3D.
# `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';`
ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$')
self.assertTrue(ref_kml_regex.match(h.kml))
def test_geojson(self):
"""
Test GeoQuerySet.geojson() with Z values.
"""
self._load_city_data()
h = City3D.objects.geojson(precision=6).get(name='Houston')
# GeoJSON should be 3D
# `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';`
ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$')
self.assertTrue(ref_json_regex.match(h.geojson))
def test_union(self):
"""
Testing the Union aggregate of 3D models.
"""
# PostGIS query that returned the reference EWKT for this test:
# `SELECT ST_AsText(ST_Union(point)) FROM geo3d_city3d;`
self._load_city_data()
ref_ewkt = 'SRID=4326;MULTIPOINT(-123.305196 48.462611 15,-104.609252 38.255001 1433,-97.521157 34.464642 380,-96.801611 32.782057 147,-95.363151 29.763374 18,-95.23506 38.971823 251,-87.650175 41.850385 181,174.783117 -41.315268 14)'
ref_union = GEOSGeometry(ref_ewkt)
union = City3D.objects.aggregate(Union('point'))['point__union']
self.assertTrue(union.hasz)
self.assertEqual(ref_union, union)
def test_extent(self):
"""
Testing the Extent3D aggregate for 3D models.
"""
self._load_city_data()
# `SELECT ST_Extent3D(point) FROM geo3d_city3d;`
ref_extent3d = (-123.305196, -41.315268, 14,174.783117, 48.462611, 1433)
extent1 = City3D.objects.aggregate(Extent3D('point'))['point__extent3d']
extent2 = City3D.objects.extent3d()
def check_extent3d(extent3d, tol=6):
for ref_val, ext_val in zip(ref_extent3d, extent3d):
self.assertAlmostEqual(ref_val, ext_val, tol)
for e3d in [extent1, extent2]:
check_extent3d(e3d)
def test_perimeter(self):
"""
Testing GeoQuerySet.perimeter() on 3D fields.
"""
self._load_polygon_data()
# Reference query for values below:
# `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;`
ref_perim_3d = 76859.2620451
ref_perim_2d = 76859.2577803
tol = 6
self.assertAlmostEqual(ref_perim_2d,
Polygon2D.objects.perimeter().get(name='2D BBox').perimeter.m,
tol)
self.assertAlmostEqual(ref_perim_3d,
Polygon3D.objects.perimeter().get(name='3D BBox').perimeter.m,
tol)
def test_length(self):
"""
Testing GeoQuerySet.length() on 3D fields.
"""
# ST_Length_Spheroid Z-aware, and thus does not need to use
# a separate function internally.
# `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]')
# FROM geo3d_interstate[2d|3d];`
self._load_interstate_data()
tol = 3
ref_length_2d = 4368.1721949481
ref_length_3d = 4368.62547052088
self.assertAlmostEqual(ref_length_2d,
Interstate2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
Interstate3D.objects.length().get(name='I-45').length.m,
tol)
# Making sure `ST_Length3D` is used on for a projected
# and 3D model rather than `ST_Length`.
# `SELECT ST_Length(line) FROM geo3d_interstateproj2d;`
ref_length_2d = 4367.71564892392
# `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;`
ref_length_3d = 4368.16897234101
self.assertAlmostEqual(ref_length_2d,
InterstateProj2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
InterstateProj3D.objects.length().get(name='I-45').length.m,
tol)
def test_scale(self):
"""
Testing GeoQuerySet.scale() on Z values.
"""
self._load_city_data()
# Mapping of City name to reference Z values.
zscales = (-3, 4, 23)
for zscale in zscales:
for city in City3D.objects.scale(1.0, 1.0, zscale):
self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z)
def test_translate(self):
"""
Testing GeoQuerySet.translate() on Z values.
"""
self._load_city_data()
ztranslations = (5.23, 23, -17)
for ztrans in ztranslations:
for city in City3D.objects.translate(0, 0, ztrans):
self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z)
|
chiefspace/udemy-rest-api
|
refs/heads/master
|
udemy_rest_api_section6/env/lib/python3.4/site-packages/setuptools/script template.py
|
486
|
# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r
__requires__ = """%(spec)r"""
import pkg_resources
pkg_resources.run_script("""%(spec)r""", """%(script_name)r""")
|
rubyinhell/brython
|
refs/heads/master
|
www/src/Lib/test/double_const.py
|
203
|
from test.support import TestFailed
# A test for SF bug 422177: manifest float constants varied way too much in
# precision depending on whether Python was loading a module for the first
# time, or reloading it from a precompiled .pyc. The "expected" failure
# mode is that when test_import imports this after all .pyc files have been
# erased, it passes, but when test_import imports this from
# double_const.pyc, it fails. This indicates a woeful loss of precision in
# the marshal format for doubles. It's also possible that repr() doesn't
# produce enough digits to get reasonable precision for this box.
PI = 3.14159265358979324
TWOPI = 6.28318530717958648
PI_str = "3.14159265358979324"
TWOPI_str = "6.28318530717958648"
# Verify that the double x is within a few bits of eval(x_str).
def check_ok(x, x_str):
assert x > 0.0
x2 = eval(x_str)
assert x2 > 0.0
diff = abs(x - x2)
# If diff is no larger than 3 ULP (wrt x2), then diff/8 is no larger
# than 0.375 ULP, so adding diff/8 to x2 should have no effect.
if x2 + (diff / 8.) != x2:
raise TestFailed("Manifest const %s lost too much precision " % x_str)
check_ok(PI, PI_str)
check_ok(TWOPI, TWOPI_str)
|
richardcs/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/cloudstack/cs_portforward.py
|
73
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_portforward
short_description: Manages port forwarding rules on Apache CloudStack based clouds.
description:
- Create, update and remove port forwarding rules.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
ip_address:
description:
- Public IP address the rule is assigned to.
required: true
vm:
description:
- Name of virtual machine which we make the port forwarding rule for.
- Required if C(state=present).
state:
description:
- State of the port forwarding rule.
default: present
choices: [ present, absent ]
protocol:
description:
- Protocol of the port forwarding rule.
default: tcp
choices: [ tcp, udp ]
public_port:
description:
- Start public port for this rule.
required: true
public_end_port:
description:
- End public port for this rule.
- If not specified equal C(public_port).
private_port:
description:
- Start private port for this rule.
required: true
private_end_port:
description:
- End private port for this rule.
- If not specified equal C(private_port).
open_firewall:
description:
- Whether the firewall rule for public port should be created, while creating the new rule.
- Use M(cs_firewall) for managing firewall rules.
default: false
vm_guest_ip:
description:
- VM guest NIC secondary IP address for the port forwarding rule.
default: false
network:
description:
- Name of the network.
version_added: "2.3"
vpc:
description:
- Name of the VPC.
version_added: "2.3"
domain:
description:
- Domain the C(vm) is related to.
account:
description:
- Account the C(vm) is related to.
project:
description:
- Name of the project the C(vm) is located in.
zone:
description:
- Name of the zone in which the virtual machine is in.
- If not set, default zone is used.
poll_async:
description:
- Poll async jobs until job has finished.
default: true
tags:
description:
- List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
- "To delete all tags, set a empty list e.g. C(tags: [])."
aliases: [ tag ]
version_added: "2.4"
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: 1.2.3.4:80 -> web01:8080
local_action:
module: cs_portforward
ip_address: 1.2.3.4
vm: web01
public_port: 80
private_port: 8080
- name: forward SSH and open firewall
local_action:
module: cs_portforward
ip_address: '{{ public_ip }}'
vm: '{{ inventory_hostname }}'
public_port: '{{ ansible_ssh_port }}'
private_port: 22
open_firewall: true
- name: forward DNS traffic, but do not open firewall
local_action:
module: cs_portforward
ip_address: 1.2.3.4
vm: '{{ inventory_hostname }}'
public_port: 53
private_port: 53
protocol: udp
- name: remove ssh port forwarding
local_action:
module: cs_portforward
ip_address: 1.2.3.4
public_port: 22
private_port: 22
state: absent
'''
RETURN = '''
---
id:
description: UUID of the public IP address.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
ip_address:
description: Public IP address.
returned: success
type: string
sample: 1.2.3.4
protocol:
description: Protocol.
returned: success
type: string
sample: tcp
private_port:
description: Start port on the virtual machine's IP address.
returned: success
type: int
sample: 80
private_end_port:
description: End port on the virtual machine's IP address.
returned: success
type: int
public_port:
description: Start port on the public IP address.
returned: success
type: int
sample: 80
public_end_port:
description: End port on the public IP address.
returned: success
type: int
sample: 80
tags:
description: Tags related to the port forwarding.
returned: success
type: list
sample: []
vm_name:
description: Name of the virtual machine.
returned: success
type: string
sample: web-01
vm_display_name:
description: Display name of the virtual machine.
returned: success
type: string
sample: web-01
vm_guest_ip:
description: IP of the virtual machine.
returned: success
type: string
sample: 10.101.65.152
vpc:
description: Name of the VPC.
returned: success
type: string
sample: my_vpc
network:
description: Name of the network.
returned: success
type: string
sample: dmz
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import AnsibleCloudStack, cs_argument_spec, cs_required_together
class AnsibleCloudStackPortforwarding(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackPortforwarding, self).__init__(module)
self.returns = {
'virtualmachinedisplayname': 'vm_display_name',
'virtualmachinename': 'vm_name',
'ipaddress': 'ip_address',
'vmguestip': 'vm_guest_ip',
'publicip': 'public_ip',
'protocol': 'protocol',
}
# these values will be casted to int
self.returns_to_int = {
'publicport': 'public_port',
'publicendport': 'public_end_port',
'privateport': 'private_port',
'privateendport': 'private_end_port',
}
self.portforwarding_rule = None
def get_portforwarding_rule(self):
if not self.portforwarding_rule:
protocol = self.module.params.get('protocol')
public_port = self.module.params.get('public_port')
args = {
'ipaddressid': self.get_ip_address(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
}
portforwarding_rules = self.query_api('listPortForwardingRules', **args)
if portforwarding_rules and 'portforwardingrule' in portforwarding_rules:
for rule in portforwarding_rules['portforwardingrule']:
if (protocol == rule['protocol'] and
public_port == int(rule['publicport'])):
self.portforwarding_rule = rule
break
return self.portforwarding_rule
def present_portforwarding_rule(self):
portforwarding_rule = self.get_portforwarding_rule()
if portforwarding_rule:
portforwarding_rule = self.update_portforwarding_rule(portforwarding_rule)
else:
portforwarding_rule = self.create_portforwarding_rule()
if portforwarding_rule:
portforwarding_rule = self.ensure_tags(resource=portforwarding_rule, resource_type='PortForwardingRule')
self.portforwarding_rule = portforwarding_rule
return portforwarding_rule
def create_portforwarding_rule(self):
args = {
'protocol': self.module.params.get('protocol'),
'publicport': self.module.params.get('public_port'),
'publicendport': self.get_or_fallback('public_end_port', 'public_port'),
'privateport': self.module.params.get('private_port'),
'privateendport': self.get_or_fallback('private_end_port', 'private_port'),
'openfirewall': self.module.params.get('open_firewall'),
'vmguestip': self.get_vm_guest_ip(),
'ipaddressid': self.get_ip_address(key='id'),
'virtualmachineid': self.get_vm(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'networkid': self.get_network(key='id'),
}
portforwarding_rule = None
self.result['changed'] = True
if not self.module.check_mode:
portforwarding_rule = self.query_api('createPortForwardingRule', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
return portforwarding_rule
def update_portforwarding_rule(self, portforwarding_rule):
args = {
'protocol': self.module.params.get('protocol'),
'publicport': self.module.params.get('public_port'),
'publicendport': self.get_or_fallback('public_end_port', 'public_port'),
'privateport': self.module.params.get('private_port'),
'privateendport': self.get_or_fallback('private_end_port', 'private_port'),
'vmguestip': self.get_vm_guest_ip(),
'ipaddressid': self.get_ip_address(key='id'),
'virtualmachineid': self.get_vm(key='id'),
'networkid': self.get_network(key='id'),
}
if self.has_changed(args, portforwarding_rule):
self.result['changed'] = True
if not self.module.check_mode:
# API broken in 4.2.1?, workaround using remove/create instead of update
# portforwarding_rule = self.query_api('updatePortForwardingRule', **args)
self.absent_portforwarding_rule()
portforwarding_rule = self.query_api('createPortForwardingRule', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
return portforwarding_rule
def absent_portforwarding_rule(self):
portforwarding_rule = self.get_portforwarding_rule()
if portforwarding_rule:
self.result['changed'] = True
args = {
'id': portforwarding_rule['id'],
}
if not self.module.check_mode:
res = self.query_api('deletePortForwardingRule', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'portforwardingrule')
return portforwarding_rule
def get_result(self, portforwarding_rule):
super(AnsibleCloudStackPortforwarding, self).get_result(portforwarding_rule)
if portforwarding_rule:
for search_key, return_key in self.returns_to_int.items():
if search_key in portforwarding_rule:
self.result[return_key] = int(portforwarding_rule[search_key])
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
ip_address=dict(required=True),
protocol=dict(choices=['tcp', 'udp'], default='tcp'),
public_port=dict(type='int', required=True),
public_end_port=dict(type='int'),
private_port=dict(type='int', required=True),
private_end_port=dict(type='int'),
state=dict(choices=['present', 'absent'], default='present'),
open_firewall=dict(type='bool', default=False),
vm_guest_ip=dict(),
vm=dict(),
vpc=dict(),
network=dict(),
zone=dict(),
domain=dict(),
account=dict(),
project=dict(),
poll_async=dict(type='bool', default=True),
tags=dict(type='list', aliases=['tag']),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_pf = AnsibleCloudStackPortforwarding(module)
state = module.params.get('state')
if state in ['absent']:
pf_rule = acs_pf.absent_portforwarding_rule()
else:
pf_rule = acs_pf.present_portforwarding_rule()
result = acs_pf.get_result(pf_rule)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
essanpupil/cashflow
|
refs/heads/master
|
cash/urls.py
|
1
|
from django.conf.urls import url
from . import views
app_name = 'cash'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^new_activity$', views.NewActivity.as_view(), name='new_activity'),
url(r'^activity_list$', views.ActivityList.as_view(), name='activity_list'),
]
|
hurricup/intellij-community
|
refs/heads/master
|
python/testData/quickFixes/PyRemoveArgumentQuickFixTest/duplicateKWArg_after.py
|
80
|
def foo(**args):
pass
a = {}
b = {}
foo(**a)
|
savi-dev/horizon
|
refs/heads/master
|
horizon/conf/panel_template/urls.py
|
46
|
from django.conf.urls.defaults import patterns, url
from .views import IndexView
urlpatterns = patterns('',
url(r'^$', IndexView.as_view(), name='index'),
)
|
sbidoul/odoo
|
refs/heads/8.0
|
addons/crm/wizard/crm_phonecall_to_phonecall.py
|
337
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import time
class crm_phonecall2phonecall(osv.osv_memory):
_name = 'crm.phonecall2phonecall'
_description = 'Phonecall To Phonecall'
_columns = {
'name' : fields.char('Call summary', required=True, select=1),
'user_id' : fields.many2one('res.users',"Assign To"),
'contact_name':fields.char('Contact'),
'phone':fields.char('Phone'),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="['|',('section_id','=',False),('section_id','=',section_id),\
('object_id.model', '=', 'crm.phonecall')]"),
'date': fields.datetime('Date'),
'section_id':fields.many2one('crm.case.section','Sales Team'),
'action': fields.selection([('schedule','Schedule a call'), ('log','Log a call')], 'Action', required=True),
'partner_id' : fields.many2one('res.partner', "Partner"),
'note':fields.text('Note')
}
def action_cancel(self, cr, uid, ids, context=None):
"""
Closes Phonecall to Phonecall form
"""
return {'type':'ir.actions.act_window_close'}
def action_schedule(self, cr, uid, ids, context=None):
value = {}
if context is None:
context = {}
phonecall = self.pool.get('crm.phonecall')
phonecall_ids = context and context.get('active_ids') or []
for this in self.browse(cr, uid, ids, context=context):
phocall_ids = phonecall.schedule_another_phonecall(cr, uid, phonecall_ids, this.date, this.name, \
this.user_id and this.user_id.id or False, \
this.section_id and this.section_id.id or False, \
this.categ_id and this.categ_id.id or False, \
action=this.action, context=context)
return phonecall.redirect_phonecall_view(cr, uid, phocall_ids[phonecall_ids[0]], context=context)
def default_get(self, cr, uid, fields, context=None):
"""
This function gets default values
"""
res = super(crm_phonecall2phonecall, self).default_get(cr, uid, fields, context=context)
record_id = context and context.get('active_id', False) or False
res.update({'action': 'schedule', 'date': time.strftime('%Y-%m-%d %H:%M:%S')})
if record_id:
phonecall = self.pool.get('crm.phonecall').browse(cr, uid, record_id, context=context)
categ_id = False
data_obj = self.pool.get('ir.model.data')
try:
res_id = data_obj._get_id(cr, uid, 'crm', 'categ_phone2')
categ_id = data_obj.browse(cr, uid, res_id, context=context).res_id
except ValueError:
pass
if 'name' in fields:
res.update({'name': phonecall.name})
if 'user_id' in fields:
res.update({'user_id': phonecall.user_id and phonecall.user_id.id or False})
if 'date' in fields:
res.update({'date': False})
if 'section_id' in fields:
res.update({'section_id': phonecall.section_id and phonecall.section_id.id or False})
if 'categ_id' in fields:
res.update({'categ_id': categ_id})
if 'partner_id' in fields:
res.update({'partner_id': phonecall.partner_id and phonecall.partner_id.id or False})
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
zchq88/DLminiProject
|
refs/heads/master
|
sentiment_network/Sentiment Classification.py
|
1
|
# 定义如何显示评论
def pretty_print_review_and_label(i):
print(labels[i] + "\t:\t" + reviews[i][:80] + "...")
# What we know!获取评论数据
g = open('reviews.txt', 'r')
reviews = list(map(lambda x: x[:-1], g.readlines()))
g.close()
# What we WANT to know!获取评价数据
g = open('labels.txt', 'r')
labels = list(map(lambda x: x[:-1].upper(), g.readlines()))
g.close()
from collections import Counter
import numpy as np
import time
import sys
'''
# 定义词频计数器
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
# 统计不同结果词频
for i in range(len(reviews)):
if (labels[i] == 'POSITIVE'):
for word in reviews[i].split(" "):
positive_counts[word] += 1
total_counts[word] += 1
else:
for word in reviews[i].split(" "):
negative_counts[word] += 1
total_counts[word] += 1
# 定义好坏比例统计数器
pos_neg_ratios = Counter()
# 计算不同结果词频比率
for term, cnt in list(total_counts.most_common()):
if (cnt > 100):
pos_neg_ratio = positive_counts[term] / float(negative_counts[term] + 1)
pos_neg_ratios[term] = pos_neg_ratio
# 标准化比率正态化
for word, ratio in pos_neg_ratios.most_common():
if (ratio > 1):
pos_neg_ratios[word] = np.log(ratio)
else:
pos_neg_ratios[word] = -np.log((1 / (ratio + 0.01)))
vocab = set(total_counts.keys())
vocab_size = len(vocab)
# 定义输入标准化的字典
layer_0 = np.zeros((1, vocab_size))
# 定义词的序列字典
word2index = {}
for i, word in enumerate(vocab):
word2index[word] = i
# 输入文字根据word2index转词频
def update_input_layer(review):
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
for word in review.split(" "):
layer_0[0][word2index[word]] += 1
# 定义输出数据转数字
def get_target_for_label(label):
if (label == 'POSITIVE'):
return 1
else:
return 0
'''
# 定义神经网络
class SentimentNetwork:
def __init__(self, reviews, labels, min_count=10, polarity_cutoff=0.1, hidden_nodes=10, learning_rate=0.1):
# set our random number generator
np.random.seed(1)
##project6初始化筛选特征
self.pre_process_data(reviews, polarity_cutoff, min_count)
self.init_network(len(self.review_vocab), hidden_nodes, 1, learning_rate)
# 根据评论和标签初始化词的字典和标签字典
def pre_process_data(self, reviews, polarity_cutoff, min_count):
# project6计算不同结果的词频比率
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
for i in range(len(reviews)):
if (labels[i] == 'POSITIVE'):
for word in reviews[i].split(" "):
positive_counts[word] += 1
total_counts[word] += 1
else:
for word in reviews[i].split(" "):
negative_counts[word] += 1
total_counts[word] += 1
pos_neg_ratios = Counter()
for term, cnt in list(total_counts.most_common()):
if (cnt >= 50):
pos_neg_ratio = positive_counts[term] / float(negative_counts[term] + 1)
pos_neg_ratios[term] = pos_neg_ratio
for word, ratio in pos_neg_ratios.most_common():
if (ratio > 1):
pos_neg_ratios[word] = np.log(ratio)
else:
pos_neg_ratios[word] = -np.log((1 / (ratio + 0.01)))
'''
review_vocab = set()
for review in reviews:
for word in review.split(" "):
review_vocab.add(word)
self.review_vocab = list(review_vocab)
'''
# project6筛选特征
review_vocab = set()
for review in reviews:
for word in review.split(" "):
if (total_counts[word] > min_count): # 如果词频大于min_count
if (word in pos_neg_ratios.keys()): # 并且词的的正态化比率大于polarity_cutoff
if ((pos_neg_ratios[word] >= polarity_cutoff) or (pos_neg_ratios[word] <= -polarity_cutoff)):
review_vocab.add(word) # 加入特征
else:
review_vocab.add(word)
self.review_vocab = list(review_vocab)
label_vocab = set()
for label in labels:
label_vocab.add(label)
self.label_vocab = list(label_vocab)
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
self.word2index = {}
for i, word in enumerate(self.review_vocab):
self.word2index[word] = i
self.label2index = {}
for i, label in enumerate(self.label_vocab):
self.label2index[label] = i
# 初始化网络超参数
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_0_1 = np.zeros((self.input_nodes, self.hidden_nodes))
self.weights_1_2 = np.random.normal(0.0, self.output_nodes ** -0.5,
(self.hidden_nodes, self.output_nodes))
self.learning_rate = learning_rate
self.layer_0 = np.zeros((1, input_nodes))
# project5 增加layer_1
self.layer_1 = np.zeros((1, hidden_nodes))
# 评论转词频矩阵
def update_input_layer(self, review):
# clear out previous state, reset the layer to be all 0s
self.layer_0 *= 0
for word in review.split(" "):
if (word in self.word2index.keys()):
self.layer_0[0][self.word2index[word]] = 1 # project4减少噪声权重,不统计词频
# 标签转01输出
def get_target_for_label(self, label):
if (label == 'POSITIVE'):
return 1
else:
return 0
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self, output):
return output * (1 - output)
def train(self, training_reviews_raw, training_labels):
# project5减少噪声权重,统计每个评论中那些词出现过
training_reviews = list()
for review in training_reviews_raw:
indices = set()
for word in review.split(" "):
if (word in self.word2index.keys()):
indices.add(self.word2index[word])
training_reviews.append(list(indices))
assert (len(training_reviews) == len(training_labels))
correct_so_far = 0
start = time.time()
for i in range(len(training_reviews)):
review = training_reviews[i]
label = training_labels[i]
#### Implement the forward pass here ####
### Forward pass ###
# Input Layer
# project5输入修改
# self.update_input_layer(review)
# Hidden layer
# layer_1 = self.layer_0.dot(self.weights_0_1)
# project5减少噪声权重,统计每个评论中那些词出现过
self.layer_1 *= 0
for index in review:
self.layer_1 += self.weights_0_1[index]
# Output layer
layer_2 = self.sigmoid(self.layer_1.dot(self.weights_1_2))
#### Implement the backward pass here ####
### Backward pass ###
layer_2_error = layer_2 - self.get_target_for_label(
label) # Output layer error is the difference between desired target and actual output.
layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)
layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer
layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error
self.weights_1_2 -= self.layer_1.T.dot(
layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step
'''
self.weights_0_1 -= self.layer_0.T.dot(
layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step
'''
for index in review:
self.weights_0_1[index] -= layer_1_delta[
0] * self.learning_rate # update input-to-hidden weights with gradient descent step
if (np.abs(layer_2_error) < 0.5):
correct_so_far += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write(
"\rProgress:" + str(100 * i / float(len(training_reviews)))[:4] + "% Speed(reviews/sec):" + str(
reviews_per_second)[0:5] + " #Correct:" + str(correct_so_far) + " #Trained:" + str(
i + 1) + " Training Accuracy:" + str(correct_so_far * 100 / float(i + 1))[:4] + "%")
def test(self, testing_reviews, testing_labels):
correct = 0
start = time.time()
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if (pred == testing_labels[i]):
correct += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i / float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ "% #Correct:" + str(correct) + " #Tested:" + str(i + 1) + " Testing Accuracy:" + str(
correct * 100 / float(i + 1))[:4] + "%")
def run(self, review):
# Input Layer
self.update_input_layer(review.lower())
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
if (layer_2[0] > 0.5):
return "POSITIVE"
else:
return "NEGATIVE"
mlp = SentimentNetwork(reviews[:-1000], labels[:-1000], min_count=20, polarity_cutoff=0.5, learning_rate=0.0001)
mlp.train(reviews[:-3000], labels[:-3000])
print('')
mlp.test(reviews[-1000:], labels[-1000:])
|
dario61081/koalixcrm
|
refs/heads/master
|
koalixcrm/crm/factories/factory_purchase_order.py
|
2
|
# -*- coding: utf-8 -*-
import factory
from koalixcrm.crm.models import PurchaseOrder
from koalixcrm.crm.factories.factory_supplier import StandardSupplierFactory
from koalixcrm.crm.factories.factory_sales_document import StandardSalesDocumentFactory
class StandardPurchaseOrderFactory(StandardSalesDocumentFactory):
class Meta:
model = PurchaseOrder
supplier = factory.SubFactory(StandardSupplierFactory)
status = "C"
|
rushiagr/keystone
|
refs/heads/master
|
keystone/contrib/ec2/routers.py
|
22
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from keystone.common import json_home
from keystone.common import wsgi
from keystone.contrib.ec2 import controllers
build_resource_relation = functools.partial(
json_home.build_v3_extension_resource_relation, extension_name='OS-EC2',
extension_version='1.0')
build_parameter_relation = functools.partial(
json_home.build_v3_extension_parameter_relation, extension_name='OS-EC2',
extension_version='1.0')
class Ec2Extension(wsgi.ExtensionRouter):
def add_routes(self, mapper):
ec2_controller = controllers.Ec2Controller()
# validation
mapper.connect(
'/ec2tokens',
controller=ec2_controller,
action='authenticate',
conditions=dict(method=['POST']))
# crud
mapper.connect(
'/users/{user_id}/credentials/OS-EC2',
controller=ec2_controller,
action='create_credential',
conditions=dict(method=['POST']))
mapper.connect(
'/users/{user_id}/credentials/OS-EC2',
controller=ec2_controller,
action='get_credentials',
conditions=dict(method=['GET']))
mapper.connect(
'/users/{user_id}/credentials/OS-EC2/{credential_id}',
controller=ec2_controller,
action='get_credential',
conditions=dict(method=['GET']))
mapper.connect(
'/users/{user_id}/credentials/OS-EC2/{credential_id}',
controller=ec2_controller,
action='delete_credential',
conditions=dict(method=['DELETE']))
class Ec2ExtensionV3(wsgi.V3ExtensionRouter):
def add_routes(self, mapper):
ec2_controller = controllers.Ec2ControllerV3()
# validation
self._add_resource(
mapper, ec2_controller,
path='/ec2tokens',
post_action='authenticate',
rel=build_resource_relation(resource_name='ec2tokens'))
# crud
self._add_resource(
mapper, ec2_controller,
path='/users/{user_id}/credentials/OS-EC2',
get_action='ec2_list_credentials',
post_action='ec2_create_credential',
rel=build_resource_relation(resource_name='user_credentials'),
path_vars={
'user_id': json_home.Parameters.USER_ID,
})
self._add_resource(
mapper, ec2_controller,
path='/users/{user_id}/credentials/OS-EC2/{credential_id}',
get_action='ec2_get_credential',
delete_action='ec2_delete_credential',
rel=build_resource_relation(resource_name='user_credential'),
path_vars={
'credential_id':
build_parameter_relation(parameter_name='credential_id'),
'user_id': json_home.Parameters.USER_ID,
})
|
LouTheBrew/troposphere
|
refs/heads/master
|
troposphere/redshift.py
|
20
|
# Copyright (c) 2014, Guillem Anguera <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty
from .validators import boolean, integer
class Cluster(AWSObject):
resource_type = "AWS::Redshift::Cluster"
props = {
'AllowVersionUpgrade': (boolean, False),
'AutomatedSnapshotRetentionPeriod': (integer, False),
'AvailabilityZone': (basestring, False),
'ClusterParameterGroupName': (basestring, False),
'ClusterSecurityGroups': (list, False),
'ClusterSubnetGroupName': (basestring, False),
'ClusterType': (basestring, True),
'ClusterVersion': (basestring, False),
'DBName': (basestring, True),
'ElasticIp': (basestring, False),
'Encrypted': (boolean, False),
'HsmClientCertificateIdentifier': (basestring, False),
'HsmConfigurationIdentifier': (basestring, False),
'MasterUsername': (basestring, True),
'MasterUserPassword': (basestring, True),
'NodeType': (basestring, True),
'NumberOfNodes': (integer, False), # Conditional
'OwnerAccount': (basestring, False),
'Port': (integer, False),
'PreferredMaintenanceWindow': (basestring, False),
'PubliclyAccessible': (boolean, False),
'SnapshotClusterIdentifier': (basestring, False),
'SnapshotIdentifier': (basestring, False),
'VpcSecurityGroupIds': (list, False),
}
class AmazonRedshiftParameter(AWSProperty):
props = {
'ParameterName': (basestring, True),
'ParameterValue': (basestring, True),
}
class ClusterParameterGroup(AWSObject):
resource_type = "AWS::Redshift::ClusterParameterGroup"
props = {
'Description': (basestring, True),
'ParameterGroupFamily': (basestring, True),
'Parameters': ([AmazonRedshiftParameter], False),
}
class ClusterSecurityGroup(AWSObject):
resource_type = "AWS::Redshift::ClusterSecurityGroup"
props = {
'Description': (basestring, True),
}
class ClusterSecurityGroupIngress(AWSObject):
resource_type = "AWS::Redshift::ClusterSecurityGroupIngress"
props = {
'ClusterSecurityGroupName': (basestring, True),
'CIDRIP': (basestring, False),
'EC2SecurityGroupName': (basestring, False),
'EC2SecurityGroupOwnerId': (basestring, False),
}
class ClusterSubnetGroup(AWSObject):
resource_type = "AWS::Redshift::ClusterSubnetGroup"
props = {
'Description': (basestring, True),
'SubnetIds': (list, True),
}
|
Arcanfel/whatToPlay
|
refs/heads/master
|
settings.py
|
1
|
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Django settings for google-app-engine-django project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'appengine' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'hvhxfm5u=^*v&doo#oq8x*eg8+1&9sxbye@=umutgn^t_sg_nx'
# Ensure that email is not sent via SMTP by default to match the standard App
# Engine SDK behaviour. If you want to sent email via SMTP then add the name of
# your mailserver here.
EMAIL_HOST = ''
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'openidgae.middleware.OpenIDMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.middleware.doc.XViewMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
# 'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
# 'django.core.context_processors.media', # 0.97 only.
# 'django.core.context_processors.request',
)
ROOT_URLCONF = 'urls'
ROOT_PATH = os.path.dirname(__file__)
TEMPLATE_DIRS = (
os.path.join(ROOT_PATH, 'templates')
)
INSTALLED_APPS = (
'openidgae',
'appengine_django',
# 'django.contrib.auth',
# 'django.contrib.contenttypes',
# 'django.contrib.sessions',
# 'django.contrib.sites',
)
|
carloscrespog/HookTemperature
|
refs/heads/master
|
node_modules/hook.io/node_modules/npm/node_modules/node-gyp/legacy/tools/gyp/pylib/gyp/generator/gypd.py
|
912
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluations already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. This is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provide a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.iteritems():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()
|
curtisstpierre/django
|
refs/heads/master
|
tests/i18n/patterns/urls/disabled.py
|
499
|
from django.conf.urls import url
from django.conf.urls.i18n import i18n_patterns
from django.views.generic import TemplateView
view = TemplateView.as_view(template_name='dummy.html')
urlpatterns = i18n_patterns(
url(r'^prefixed/$', view, name='prefixed'),
)
|
akurtakov/Pydev
|
refs/heads/master
|
plugins/org.python.pydev.refactoring/tests/python/codegenerator/constructorfield/testConstructorField2.py
|
8
|
class A:
try:
print "foo"
finally:
print "done."
attribute = "hello"
def my_method(self):
print self.attribute
a = A()
a.my_method()
##c
'''
<config>
<classSelection>0</classSelection>
<attributeSelection>
<int>0</int>
</attributeSelection>
<offsetStrategy>1</offsetStrategy>
</config>
'''
##r
class A:
def __init__(self, attribute):
self.attribute = attribute
try:
print "foo"
finally:
print "done."
attribute = "hello"
def my_method(self):
print self.attribute
a = A()
a.my_method()
|
0000-bigtree/construct
|
refs/heads/master
|
construct/protocols/layer3/icmpv4.py
|
9
|
"""
Internet Control Message Protocol for IPv4 (TCP/IP protocol stack)
"""
from construct import *
from ipv4 import IpAddress
from binascii import unhexlify
import six
echo_payload = Struct("echo_payload",
UBInt16("identifier"),
UBInt16("sequence"),
Bytes("data", 32), # length is implementation dependent...
# is anyone using more than 32 bytes?
)
dest_unreachable_payload = Struct("dest_unreachable_payload",
Padding(2),
UBInt16("next_hop_mtu"),
IpAddress("host"),
Bytes("echo", 8),
)
dest_unreachable_code = Enum(Byte("code"),
Network_unreachable_error = 0,
Host_unreachable_error = 1,
Protocol_unreachable_error = 2,
Port_unreachable_error = 3,
The_datagram_is_too_big = 4,
Source_route_failed_error = 5,
Destination_network_unknown_error = 6,
Destination_host_unknown_error = 7,
Source_host_isolated_error = 8,
Desination_administratively_prohibited = 9,
Host_administratively_prohibited2 = 10,
Network_TOS_unreachable = 11,
Host_TOS_unreachable = 12,
)
icmp_header = Struct("icmp_header",
Enum(Byte("type"),
Echo_reply = 0,
Destination_unreachable = 3,
Source_quench = 4,
Redirect = 5,
Alternate_host_address = 6,
Echo_request = 8,
Router_advertisement = 9,
Router_solicitation = 10,
Time_exceeded = 11,
Parameter_problem = 12,
Timestamp_request = 13,
Timestamp_reply = 14,
Information_request = 15,
Information_reply = 16,
Address_mask_request = 17,
Address_mask_reply = 18,
_default_ = Pass,
),
Switch("code", lambda ctx: ctx.type,
{
"Destination_unreachable" : dest_unreachable_code,
},
default = Byte("code"),
),
UBInt16("crc"),
Switch("payload", lambda ctx: ctx.type,
{
"Echo_reply" : echo_payload,
"Echo_request" : echo_payload,
"Destination_unreachable" : dest_unreachable_payload,
},
default = Pass
)
)
if __name__ == "__main__":
cap1 = unhexlify(six.b("0800305c02001b006162636465666768696a6b6c6d6e6f70717273747576776162"
"63646566676869"))
cap2 = unhexlify(six.b("0000385c02001b006162636465666768696a6b6c6d6e6f70717273747576776162"
"63646566676869"))
cap3 = unhexlify(six.b("0301000000001122aabbccdd0102030405060708"))
print (icmp_header.parse(cap1))
print (icmp_header.parse(cap2))
print (icmp_header.parse(cap3))
|
krafczyk/spack
|
refs/heads/develop
|
lib/spack/spack/util/log_parse.py
|
4
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from __future__ import print_function
import sys
from six import StringIO
from ctest_log_parser import CTestLogParser, BuildError, BuildWarning
import llnl.util.tty as tty
from llnl.util.tty.color import cescape, colorize
__all__ = ['parse_log_events', 'make_log_context']
def parse_log_events(stream, context=6, jobs=None, profile=False):
"""Extract interesting events from a log file as a list of LogEvent.
Args:
stream (str or fileobject): build log name or file object
context (int): lines of context to extract around each log event
jobs (int): number of jobs to parse with; default ncpus
profile (bool): print out profile information for parsing
Returns:
(tuple): two lists containig ``BuildError`` and
``BuildWarning`` objects.
This is a wrapper around ``ctest_log_parser.CTestLogParser`` that
lazily constructs a single ``CTestLogParser`` object. This ensures
that all the regex compilation is only done once.
"""
if parse_log_events.ctest_parser is None:
parse_log_events.ctest_parser = CTestLogParser(profile=profile)
result = parse_log_events.ctest_parser.parse(stream, context, jobs)
if profile:
parse_log_events.ctest_parser.print_timings()
return result
#: lazily constructed CTest log parser
parse_log_events.ctest_parser = None
def _wrap(text, width):
"""Break text into lines of specific width."""
lines = []
pos = 0
while pos < len(text):
lines.append(text[pos:pos + width])
pos += width
return lines
def make_log_context(log_events, width=None):
"""Get error context from a log file.
Args:
log_events (list of LogEvent): list of events created by
``ctest_log_parser.parse()``
width (int or None): wrap width; ``0`` for no limit; ``None`` to
auto-size for terminal
Returns:
str: context from the build log with errors highlighted
Parses the log file for lines containing errors, and prints them out
with line numbers and context. Errors are highlighted with '>>' and
with red highlighting (if color is enabled).
Events are sorted by line number before they are displayed.
"""
error_lines = set(e.line_no for e in log_events)
log_events = sorted(log_events, key=lambda e: e.line_no)
num_width = len(str(max(error_lines))) + 4
line_fmt = '%%-%dd%%s' % num_width
indent = ' ' * (5 + num_width)
if width is None:
_, width = tty.terminal_size()
if width <= 0:
width = sys.maxsize
wrap_width = width - num_width - 6
out = StringIO()
next_line = 1
for event in log_events:
start = event.start
if isinstance(event, BuildError):
color = 'R'
elif isinstance(event, BuildWarning):
color = 'Y'
else:
color = 'W'
if next_line != 1 and start > next_line:
out.write('\n ...\n\n')
if start < next_line:
start = next_line
for i in range(start, event.end):
# wrap to width
lines = _wrap(event[i], wrap_width)
lines[1:] = [indent + l for l in lines[1:]]
wrapped_line = line_fmt % (i, '\n'.join(lines))
if i in error_lines:
out.write(colorize(
' @%s{>> %s}\n' % (color, cescape(wrapped_line))))
else:
out.write(' %s\n' % wrapped_line)
next_line = event.end
return out.getvalue()
|
sicekit/sicekit
|
refs/heads/master
|
robots/login.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script to log the robot in to a wiki account.
Suggestion is to make a special account to use for robot use only. Make
sure this robot account is well known on your home wikipedia before using.
Parameters:
-all Try to log in on all sites where a username is defined in
user-config.py.
-pass Useful in combination with -all when you have accounts for
several sites and use the same password for all of them.
Asks you for the password, then logs in on all given sites.
-pass:XXXX Uses XXXX as password. Be careful if you use this
parameter because your password will be shown on your
screen, and will probably be saved in your command line
history. This is NOT RECOMMENDED for use on computers
where others have either physical or remote access.
Use -pass instead.
-sysop Log in with your sysop account.
-force Ignores if the user is already logged in, and tries to log in.
-v -v Shows http requests made when logging in. This might leak
(doubly private data (password, session id), so make sure to check the
verbose) output. Using -log is recommended: this will output a lot of
data
If not given as parameter, the script will ask for your username and password
(password entry will be hidden), log in to your home wiki using this
combination, and store the resulting cookies (containing your password hash,
so keep it secured!) in a file in the login-data subdirectory.
All scripts in this library will be looking for this cookie file and will use the
login information if it is present.
To log out, throw away the XX-login.data file that is created in the login-data
subdirectory.
"""
#
# (C) Rob W.W. Hooft, 2003
#
# Distributed under the terms of the MIT license.
#
__version__='$Id: login.py 7034 2009-07-09 10:11:29Z alexsh $'
import re
import urllib2
import wikipedia, config
# On some wikis you are only allowed to run a bot if there is a link to
# the bot's user page in a specific list.
botList = {
'wikipedia': {
'en': u'Wikipedia:Registered bots',
# Disabled because they are now using a template system which
# we can't check with our current code.
#'simple': u'Wikipedia:Bots',
},
'gentoo': {
'en': u'Help:Bots',
}
}
class LoginManager:
def __init__(self, password = None, sysop = False, site = None, username=None, verbose=False):
self.site = site or wikipedia.getSite()
if username:
self.username=username
# perform writeback.
if site.family.name not in config.usernames:
config.usernames[site.family.name]={}
config.usernames[site.family.name][self.site.lang]=username
else:
if sysop:
try:
self.username = config.sysopnames[self.site.family.name][self.site.lang]
except:
raise wikipedia.NoUsername(u'ERROR: Sysop username for %s:%s is undefined.\nIf you have a sysop account for that site, please add such a line to user-config.py:\n\nsysopnames[\'%s\'][\'%s\'] = \'myUsername\'' % (self.site.family.name, self.site.lang, self.site.family.name, self.site.lang))
else:
try:
self.username = config.usernames[self.site.family.name][self.site.lang]
except:
raise wikipedia.NoUsername(u'ERROR: Username for %s:%s is undefined.\nIf you have an account for that site, please add such a line to user-config.py:\n\nusernames[\'%s\'][\'%s\'] = \'myUsername\'' % (self.site.family.name, self.site.lang, self.site.family.name, self.site.lang))
self.password = password
self.verbose = verbose
if getattr(config, 'password_file', ''):
self.readPassword()
def botAllowed(self):
"""
Checks whether the bot is listed on a specific page to comply with
the policy on the respective wiki.
"""
if self.site.family.name in botList and self.site.language() in botList[self.site.family.name]:
botListPageTitle = wikipedia.translate(self.site.language(), botList)
botListPage = wikipedia.Page(self.site, botListPageTitle)
for linkedPage in botListPage.linkedPages():
if linkedPage.titleWithoutNamespace() == self.username:
return True
return False
else:
# No bot policies on other
return True
def getCookie(self, api = config.use_api_login, remember=True, captcha = None):
"""
Login to the site.
remember Remember login (default: True)
captchaId A dictionary containing the captcha id and answer, if any
Returns cookie data if succesful, None otherwise.
"""
if api:
predata = {
'action': 'login',
'lgname': self.username.encode(self.site.encoding()),
'lgpassword': self.password,
'lgdomain': self.site.family.ldapDomain,
}
address = self.site.api_address()
else:
predata = {
"wpName": self.username.encode(self.site.encoding()),
"wpPassword": self.password,
"wpDomain": self.site.family.ldapDomain, # VistaPrint fix
"wpLoginattempt": "Aanmelden & Inschrijven", # dutch button label seems to work for all wikis
"wpRemember": str(int(bool(remember))),
"wpSkipCookieCheck": '1'
}
if captcha:
predata["wpCaptchaId"] = captcha['id']
predata["wpCaptchaWord"] = captcha['answer']
login_address = self.site.login_address()
address = login_address + '&action=submit'
if self.site.hostname() in config.authenticate.keys():
headers = {
"Content-type": "application/x-www-form-urlencoded",
"User-agent": wikipedia.useragent
}
data = self.site.urlEncode(predata)
if self.verbose:
fakepredata = predata
fakepredata['wpPassword'] = u'XXXX'
wikipedia.output(u"urllib2.urlopen(urllib2.Request('%s', %s, %s)):" % (self.site.protocol() + '://' + self.site.hostname() + address, self.site.urlEncode(fakepredata), headers))
response = urllib2.urlopen(urllib2.Request(self.site.protocol() + '://' + self.site.hostname() + address, data, headers))
data = response.read()
if self.verbose:
fakedata = re.sub(r"(session|Token)=..........", r"session=XXXXXXXXXX", data)
trans = config.transliterate
config.transliterate = False #transliteration breaks for some reason
wikipedia.output(fakedata.decode(self.site.encoding()))
config.transliterate = trans
wikipedia.cj.save(wikipedia.COOKIEFILE)
return "Ok"
else:
response, data = self.site.postData(address, self.site.urlEncode(predata))
if self.verbose:
fakepredata = predata
fakepredata['wpPassword'] = fakepredata['lgpassword'] = u'XXXXX'
wikipedia.output(u"self.site.postData(%s, %s)" % (address, self.site.urlEncode(fakepredata)))
fakeresponsemsg = re.sub(r"(session|Token)=..........", r"session=XXXXXXXXXX", response.msg.__str__())
wikipedia.output(u"%s/%s\n%s" % (response.status, response.reason, fakeresponsemsg))
wikipedia.output(u"%s" % data)
Reat=re.compile(': (.*?);')
L = []
for eat in response.msg.getallmatchingheaders('set-cookie'):
m = Reat.search(eat)
if m:
L.append(m.group(1))
got_token = got_user = False
for Ldata in L:
if 'Token=' in Ldata:
got_token = True
if 'User=' in Ldata or 'UserName=' in Ldata:
got_user = True
if got_token and got_user:
return "\n".join(L)
elif not captcha:
solve = self.site.solveCaptcha(data)
if solve:
return self.getCookie(api = api, remember = remember, captcha = solve)
return None
def storecookiedata(self, data):
"""
Stores cookie data.
The argument data is the raw data, as returned by getCookie().
Returns nothing."""
filename = wikipedia.config.datafilepath('login-data',
'%s-%s-%s-login.data'
% (self.site.family.name, self.site.lang, self.username))
f = open(filename, 'w')
f.write(data)
f.close()
def readPassword(self):
"""
Reads passwords from a file. DO NOT FORGET TO REMOVE READ
ACCESS FOR OTHER USERS!!! Use chmod 600 password-file.
All lines below should be valid Python tuples in the form
(code, family, username, password) or (username, password)
to set a default password for an username. Default usernames
should occur above specific usernames.
Example:
("my_username", "my_default_password")
("my_sysop_user", "my_sysop_password")
("en", "wikipedia", "my_en_user", "my_en_pass")
"""
file = open(wikipedia.config.datafilepath(config.password_file))
for line in file:
if not line.strip(): continue
entry = eval(line)
if len(entry) == 2: #for default userinfo
if entry[0] == self.username: self.password = entry[1]
elif len(entry) == 4: #for userinfo included code and family
if entry[0] == self.site.lang and \
entry[1] == self.site.family.name and \
entry[2] == self.username:
self.password = entry[3]
file.close()
def login(self, api = config.use_api_login, retry = False):
if not self.password:
# As we don't want the password to appear on the screen, we set
# password = True
self.password = wikipedia.input(u'Password for user %s on %s:' % (self.username, self.site), password = True)
self.password = self.password.encode(self.site.encoding())
wikipedia.output(u"Logging in to %s as %s" % (self.site, self.username))
try:
cookiedata = self.getCookie(api = api)
except NotImplementedError:
wikipedia.output('API disabled because this site does not support.')
config.use_api_login = api = False
cookiedata = self.getCookie(api = api)
if cookiedata:
self.storecookiedata(cookiedata)
wikipedia.output(u"Should be logged in now")
# Show a warning according to the local bot policy
if not self.botAllowed():
wikipedia.output(u'*** Your username is not listed on [[%s]].\n*** Please make sure you are allowed to use the robot before actually using it!' % botList[self.site.family.name][self.site.lang])
return True
else:
wikipedia.output(u"Login failed. Wrong password or CAPTCHA answer?")
if api:
wikipedia.output(u"API login failed, retrying using standard webpage.")
return self.login(api = False, retry = retry)
if retry:
self.password = None
return self.login(api = api, retry = True)
else:
return False
def showCaptchaWindow(self, url):
pass
def main():
username = password = None
sysop = False
logall = False
forceLogin = False
verbose = False
for arg in wikipedia.handleArgs():
if arg.startswith("-pass"):
if len(arg) == 5:
password = wikipedia.input(u'Password for all accounts:', password = True)
else:
password = arg[6:]
elif arg == "-sysop":
sysop = True
elif arg == "-all":
logall = True
elif arg == "-force":
forceLogin = True
else:
wikipedia.showHelp('login')
return
if wikipedia.verbose > 1:
wikipedia.output(u"WARNING: Using -v -v on login.py might leak private data. When sharing, please double check your password is not readable and log out your bots session.")
verbose = True # only use this verbose when running from login.py
if logall:
if sysop:
namedict = config.sysopnames
else:
namedict = config.usernames
for familyName in namedict.iterkeys():
for lang in namedict[familyName].iterkeys():
try:
site = wikipedia.getSite( code=lang, fam=familyName )
if not forceLogin and site.loggedInAs(sysop = sysop) is not None:
wikipedia.output(u'Already logged in on %s' % site)
else:
loginMan = LoginManager(password, sysop = sysop, site = site, verbose=verbose)
loginMan.login()
except wikipedia.NoSuchSite:
wikipedia.output(lang+ u'.' + familyName + u' is not a valid site, please remove it from your config')
else:
loginMan = LoginManager(password, sysop = sysop, verbose=verbose)
loginMan.login()
if __name__ == "__main__":
try:
main()
finally:
wikipedia.stopme()
|
utecuy/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/tests/test_bulk_assertions.py
|
173
|
import ddt
import itertools
from xmodule.tests import BulkAssertionTest, BulkAssertionError
STATIC_PASSING_ASSERTIONS = (
('assertTrue', True),
('assertFalse', False),
('assertIs', 1, 1),
('assertEqual', 1, 1),
('assertEquals', 1, 1),
('assertIsNot', 1, 2),
('assertIsNone', None),
('assertIsNotNone', 1),
('assertIn', 1, (1, 2, 3)),
('assertNotIn', 5, (1, 2, 3)),
('assertIsInstance', 1, int),
('assertNotIsInstance', '1', int),
('assertItemsEqual', [1, 2, 3], [3, 2, 1])
)
STATIC_FAILING_ASSERTIONS = (
('assertTrue', False),
('assertFalse', True),
('assertIs', 1, 2),
('assertEqual', 1, 2),
('assertEquals', 1, 2),
('assertIsNot', 1, 1),
('assertIsNone', 1),
('assertIsNotNone', None),
('assertIn', 5, (1, 2, 3)),
('assertNotIn', 1, (1, 2, 3)),
('assertIsInstance', '1', int),
('assertNotIsInstance', 1, int),
('assertItemsEqual', [1, 1, 1], [1, 1])
)
CONTEXT_PASSING_ASSERTIONS = (
('assertRaises', KeyError, {}.__getitem__, '1'),
('assertRaisesRegexp', KeyError, "1", {}.__getitem__, '1'),
)
CONTEXT_FAILING_ASSERTIONS = (
('assertRaises', ValueError, lambda: None),
('assertRaisesRegexp', KeyError, "2", {}.__getitem__, '1'),
)
@ddt.ddt
class TestBulkAssertionTestCase(BulkAssertionTest):
# We have to use assertion methods from the base UnitTest class,
# so we make a number of super calls that skip BulkAssertionTest.
# pylint: disable=bad-super-call
def _run_assertion(self, assertion_tuple):
"""
Run the supplied tuple of (assertion, *args) as a method on this class.
"""
assertion, args = assertion_tuple[0], assertion_tuple[1:]
getattr(self, assertion)(*args)
def _raw_assert(self, assertion_name, *args, **kwargs):
"""
Run an un-modified assertion.
"""
# Use super(BulkAssertionTest) to make sure we get un-adulturated assertions
return getattr(super(BulkAssertionTest, self), 'assert' + assertion_name)(*args, **kwargs)
@ddt.data(*(STATIC_PASSING_ASSERTIONS + CONTEXT_PASSING_ASSERTIONS))
def test_passing_asserts_passthrough(self, assertion_tuple):
self._run_assertion(assertion_tuple)
@ddt.data(*(STATIC_FAILING_ASSERTIONS + CONTEXT_FAILING_ASSERTIONS))
def test_failing_asserts_passthrough(self, assertion_tuple):
with self._raw_assert('Raises', AssertionError) as context:
self._run_assertion(assertion_tuple)
self._raw_assert('NotIsInstance', context.exception, BulkAssertionError)
@ddt.data(*CONTEXT_PASSING_ASSERTIONS)
@ddt.unpack
def test_passing_context_assertion_passthrough(self, assertion, *args):
assertion_args = []
args = list(args)
exception = args.pop(0)
while not callable(args[0]):
assertion_args.append(args.pop(0))
function = args.pop(0)
with getattr(self, assertion)(exception, *assertion_args):
function(*args)
@ddt.data(*CONTEXT_FAILING_ASSERTIONS)
@ddt.unpack
def test_failing_context_assertion_passthrough(self, assertion, *args):
assertion_args = []
args = list(args)
exception = args.pop(0)
while not callable(args[0]):
assertion_args.append(args.pop(0))
function = args.pop(0)
with self._raw_assert('Raises', AssertionError) as context:
with getattr(self, assertion)(exception, *assertion_args):
function(*args)
self._raw_assert('NotIsInstance', context.exception, BulkAssertionError)
@ddt.data(*list(itertools.product(
CONTEXT_PASSING_ASSERTIONS,
CONTEXT_FAILING_ASSERTIONS,
CONTEXT_FAILING_ASSERTIONS
)))
@ddt.unpack
def test_bulk_assert(self, passing_assertion, failing_assertion1, failing_assertion2):
contextmanager = self.bulk_assertions()
contextmanager.__enter__()
self._run_assertion(passing_assertion)
self._run_assertion(failing_assertion1)
self._run_assertion(failing_assertion2)
with self._raw_assert('Raises', BulkAssertionError) as context:
contextmanager.__exit__(None, None, None)
self._raw_assert('Equals', len(context.exception.errors), 2)
@ddt.data(*list(itertools.product(
CONTEXT_FAILING_ASSERTIONS
)))
@ddt.unpack
def test_nested_bulk_asserts(self, failing_assertion):
with self._raw_assert('Raises', BulkAssertionError) as context:
with self.bulk_assertions():
self._run_assertion(failing_assertion)
with self.bulk_assertions():
self._run_assertion(failing_assertion)
self._run_assertion(failing_assertion)
self._raw_assert('Equal', len(context.exception.errors), 3)
@ddt.data(*list(itertools.product(
CONTEXT_PASSING_ASSERTIONS,
CONTEXT_FAILING_ASSERTIONS,
CONTEXT_FAILING_ASSERTIONS
)))
@ddt.unpack
def test_bulk_assert_closed(self, passing_assertion, failing_assertion1, failing_assertion2):
with self._raw_assert('Raises', BulkAssertionError) as context:
with self.bulk_assertions():
self._run_assertion(passing_assertion)
self._run_assertion(failing_assertion1)
self._raw_assert('Equals', len(context.exception.errors), 1)
with self._raw_assert('Raises', AssertionError) as context:
self._run_assertion(failing_assertion2)
self._raw_assert('NotIsInstance', context.exception, BulkAssertionError)
|
xiaoyongaa/ALL
|
refs/heads/master
|
网络编程第四周/cash.py
|
1
|
import os
stat=os.stat("G:\疾风之刃").st_size
print(stat)
|
shakamunyi/nova
|
refs/heads/master
|
nova/virt/xenapi/host.py
|
2
|
# Copyright (c) 2012 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for host-related functions (start, reboot, etc).
"""
import re
from oslo.config import cfg
from oslo.serialization import jsonutils
from nova.compute import arch
from nova.compute import hv_type
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova import objects
from nova.openstack.common import log as logging
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class Host(object):
"""Implements host related operations."""
def __init__(self, session, virtapi):
self._session = session
self._virtapi = virtapi
def host_power_action(self, action):
"""Reboots or shuts down the host."""
args = {"action": jsonutils.dumps(action)}
methods = {"reboot": "host_reboot", "shutdown": "host_shutdown"}
response = call_xenhost(self._session, methods[action], args)
return response.get("power_action", response)
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
if not mode:
return 'off_maintenance'
host_list = [host_ref for host_ref in
self._session.host.get_all()
if host_ref != self._session.host_ref]
migrations_counter = vm_counter = 0
ctxt = context.get_admin_context()
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
for host_ref in host_list:
try:
# Ensure only guest instances are migrated
uuid = vm_rec['other_config'].get('nova_uuid')
if not uuid:
name = vm_rec['name_label']
uuid = _uuid_find(ctxt, host, name)
if not uuid:
LOG.info(_LI('Instance %(name)s running on '
'%(host)s could not be found in '
'the database: assuming it is a '
'worker VM and skip ping migration '
'to a new host'),
{'name': name, 'host': host})
continue
instance = objects.Instance.get_by_uuid(ctxt, uuid)
vm_counter = vm_counter + 1
aggregate = objects.AggregateList.get_by_host(
ctxt, host, key=pool_states.POOL_FLAG)
if not aggregate:
msg = _('Aggregate for host %(host)s count not be'
' found.') % dict(host=host)
raise exception.NotFound(msg)
dest = _host_find(ctxt, self._session, aggregate[0],
host_ref)
instance.host = dest
instance.task_state = task_states.MIGRATING
instance.save()
self._session.VM.pool_migrate(vm_ref, host_ref,
{"live": "true"})
migrations_counter = migrations_counter + 1
instance.vm_state = vm_states.ACTIVE
instance.save()
break
except self._session.XenAPI.Failure:
LOG.exception(_LE('Unable to migrate VM %(vm_ref)s '
'from %(host)s'),
{'vm_ref': vm_ref, 'host': host})
instance.host = host
instance.vm_state = vm_states.ACTIVE
instance.save()
if vm_counter == migrations_counter:
return 'on_maintenance'
else:
raise exception.NoValidHost(reason='Unable to find suitable '
'host for VMs evacuation')
def set_host_enabled(self, enabled):
"""Sets the compute host's ability to accept new instances."""
# Since capabilities are gone, use service table to disable a node
# in scheduler
cntxt = context.get_admin_context()
service = objects.Service.get_by_args(cntxt, CONF.host,
'nova-compute')
service.disabled = not enabled
service.disabled_reason = 'set by xenapi host_state'
service.save()
args = {"enabled": jsonutils.dumps(enabled)}
response = call_xenhost(self._session, "set_host_enabled", args)
return response.get("status", response)
def get_host_uptime(self):
"""Returns the result of calling "uptime" on the target host."""
response = call_xenhost(self._session, "host_uptime", {})
return response.get("uptime", response)
class HostState(object):
"""Manages information about the XenServer host this compute
node is running on.
"""
def __init__(self, session):
super(HostState, self).__init__()
self._session = session
self._stats = {}
self.update_status()
def _get_passthrough_devices(self):
"""Get a list pci devices that are available for pci passthtough.
We use a plugin to get the output of the lspci command runs on dom0.
From this list we will extract pci devices that are using the pciback
kernel driver.
:returns: a list of pci devices on the node
"""
def _compile_hex(pattern):
"""Return a compiled regular expression pattern into which we have
replaced occurrences of hex by [\da-fA-F].
"""
return re.compile(pattern.replace("hex", r"[\da-fA-F]"))
def _parse_pci_device_string(dev_string):
"""Exctract information from the device string about the slot, the
vendor and the product ID. The string is as follow:
"Slot:\tBDF\nClass:\txxxx\nVendor:\txxxx\nDevice:\txxxx\n..."
Return a dictionary with informations about the device.
"""
slot_regex = _compile_hex(r"Slot:\t"
r"((?:hex{4}:)?" # Domain: (optional)
r"hex{2}:" # Bus:
r"hex{2}\." # Device.
r"hex{1})") # Function
vendor_regex = _compile_hex(r"\nVendor:\t(hex+)")
product_regex = _compile_hex(r"\nDevice:\t(hex+)")
slot_id = slot_regex.findall(dev_string)
vendor_id = vendor_regex.findall(dev_string)
product_id = product_regex.findall(dev_string)
if not slot_id or not vendor_id or not product_id:
raise exception.NovaException(
_("Failed to parse information about"
" a pci device for passthrough"))
type_pci = self._session.call_plugin_serialized(
'xenhost', 'get_pci_type', slot_id[0])
return {'label': '_'.join(['label',
vendor_id[0],
product_id[0]]),
'vendor_id': vendor_id[0],
'product_id': product_id[0],
'address': slot_id[0],
'dev_id': '_'.join(['pci', slot_id[0]]),
'dev_type': type_pci,
'status': 'available'}
# Devices are separated by a blank line. That is why we
# use "\n\n" as separator.
lspci_out = self._session.call_plugin_serialized(
'xenhost', 'get_pci_device_details')
pci_list = lspci_out.split("\n\n")
# For each device of the list, check if it uses the pciback
# kernel driver and if it does, get informations and add it
# to the list of passthrough_devices. Ignore it if the driver
# is not pciback.
passthrough_devices = []
for dev_string_info in pci_list:
if "Driver:\tpciback" in dev_string_info:
new_dev = _parse_pci_device_string(dev_string_info)
passthrough_devices.append(new_dev)
return passthrough_devices
def get_host_stats(self, refresh=False):
"""Return the current state of the host. If 'refresh' is
True, run the update first.
"""
if refresh or not self._stats:
self.update_status()
return self._stats
def update_status(self):
"""Since under Xenserver, a compute node runs on a given host,
we can get host status information using xenapi.
"""
LOG.debug("Updating host stats")
data = call_xenhost(self._session, "host_data", {})
if data:
sr_ref = vm_utils.scan_default_sr(self._session)
sr_rec = self._session.SR.get_record(sr_ref)
total = int(sr_rec["physical_size"])
used = int(sr_rec["physical_utilisation"])
data["disk_total"] = total
data["disk_used"] = used
data["disk_allocated"] = int(sr_rec["virtual_allocation"])
data["disk_available"] = total - used
data["supported_instances"] = to_supported_instances(
data.get("host_capabilities")
)
data["cpu_model"] = to_cpu_model(
data.get("host_cpu_info")
)
host_memory = data.get('host_memory', None)
if host_memory:
data["host_memory_total"] = host_memory.get('total', 0)
data["host_memory_overhead"] = host_memory.get('overhead', 0)
data["host_memory_free"] = host_memory.get('free', 0)
data["host_memory_free_computed"] = host_memory.get(
'free-computed', 0)
del data['host_memory']
if (data['host_hostname'] !=
self._stats.get('host_hostname', data['host_hostname'])):
LOG.error(_LE('Hostname has changed from %(old)s to %(new)s. '
'A restart is required to take effect.') %
{'old': self._stats['host_hostname'],
'new': data['host_hostname']})
data['host_hostname'] = self._stats['host_hostname']
data['hypervisor_hostname'] = data['host_hostname']
vcpus_used = 0
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
vcpus_used = vcpus_used + int(vm_rec['VCPUs_max'])
data['vcpus_used'] = vcpus_used
data['pci_passthrough_devices'] = self._get_passthrough_devices()
self._stats = data
def to_supported_instances(host_capabilities):
if not host_capabilities:
return []
result = []
for capability in host_capabilities:
try:
# 'capability'is unicode but we want arch/ostype
# to be strings to match the standard constants
capability = str(capability)
ostype, _version, guestarch = capability.split("-")
guestarch = arch.canonicalize(guestarch)
ostype = vm_mode.canonicalize(ostype)
result.append((guestarch, hv_type.XEN, ostype))
except ValueError:
LOG.warning(_LW("Failed to extract instance support from %s"),
capability)
return result
def to_cpu_model(host_cpu_info):
# The XenAPI driver returns data in the format
#
# {"physical_features": "0098e3fd-bfebfbff-00000001-28100800",
# "modelname": "Intel(R) Xeon(R) CPU X3430 @ 2.40GHz",
# "vendor": "GenuineIntel",
# "features": "0098e3fd-bfebfbff-00000001-28100800",
# "family": 6,
# "maskable": "full",
# "cpu_count": 4,
# "socket_count": "1",
# "flags": "fpu de tsc msr pae mce cx8 apic sep mtrr mca cmov
# pat clflush acpi mmx fxsr sse sse2 ss ht nx
# constant_tsc nonstop_tsc aperfmperf pni vmx est
# ssse3 sse4_1 sse4_2 popcnt hypervisor ida
# tpr_shadow vnmi flexpriority ept vpid",
# "stepping": 5,
# "model": 30,
# "features_after_reboot": "0098e3fd-bfebfbff-00000001-28100800",
# "speed": "2394.086"}
if host_cpu_info is None:
return None
cpu_info = dict()
# TODO(berrange) the data we're putting in model is not
# exactly comparable to what libvirt puts in model. The
# libvirt model names are a well defined short string
# which is really an aliass for a particular set of
# feature flags. The Xen model names are raw printable
# strings from the kernel with no specific semantics
cpu_info["model"] = host_cpu_info["modelname"]
cpu_info["vendor"] = host_cpu_info["vendor"]
# TODO(berrange) perhaps we could fill in 'arch' field too
# by looking at 'host_capabilities' for the Xen host ?
topology = dict()
topology["sockets"] = int(host_cpu_info["socket_count"])
topology["cores"] = (int(host_cpu_info["cpu_count"]) /
int(host_cpu_info["socket_count"]))
# TODO(berrange): if 'ht' is present in the 'flags' list
# is it possible to infer that the 'cpu_count' is in fact
# sockets * cores * threads ? Unclear if 'ht' would remain
# visible when threads are disabled in BIOS ?
topology["threads"] = 1
cpu_info["topology"] = topology
cpu_info["features"] = host_cpu_info["flags"].split(" ")
return cpu_info
def call_xenhost(session, method, arg_dict):
"""There will be several methods that will need this general
handling for interacting with the xenhost plugin, so this abstracts
out that behavior.
"""
# Create a task ID as something that won't match any instance ID
try:
result = session.call_plugin('xenhost', method, args=arg_dict)
if not result:
return ''
return jsonutils.loads(result)
except ValueError:
LOG.exception(_LE("Unable to get updated status"))
return None
except session.XenAPI.Failure as e:
LOG.error(_LE("The call to %(method)s returned "
"an error: %(e)s."), {'method': method, 'e': e})
return e.details[1]
def _uuid_find(context, host, name_label):
"""Return instance uuid by name_label."""
for i in objects.InstanceList.get_by_host(context, host):
if i.name == name_label:
return i.uuid
return None
def _host_find(context, session, src_aggregate, host_ref):
"""Return the host from the xenapi host reference.
:param src_aggregate: the aggregate that the compute host being put in
maintenance (source of VMs) belongs to
:param host_ref: the hypervisor host reference (destination of VMs)
:return: the compute host that manages host_ref
"""
# NOTE: this would be a lot simpler if nova-compute stored
# CONF.host in the XenServer host's other-config map.
# TODO(armando-migliaccio): improve according the note above
uuid = session.host.get_uuid(host_ref)
for compute_host, host_uuid in src_aggregate.metadetails.iteritems():
if host_uuid == uuid:
return compute_host
raise exception.NoValidHost(reason='Host %(host_uuid)s could not be found '
'from aggregate metadata: %(metadata)s.' %
{'host_uuid': uuid,
'metadata': src_aggregate.metadetails})
|
shinyvince/augmented-traffic-control
|
refs/heads/master
|
atc/django-atc-demo-ui/atc_demo_ui/views.py
|
18
|
#
# Copyright (c) 2014, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
#
from atc_demo_ui.settings import atc_demo_ui_settings
from django.shortcuts import render_to_response
from django.template import RequestContext
def index(request):
context = {'atc_demo_ui_settings': atc_demo_ui_settings}
return render_to_response(
'atc_demo_ui/index.html',
context,
RequestContext(request)
)
|
SanketDG/coala
|
refs/heads/master
|
coalib/bearlib/aspects/Smell.py
|
4
|
from coalib.bearlib.aspects import Taste, Root
@Root.subaspect
class Smell:
"""
Symptom in a piece of code that possibly indicates a deeper problem.
`Smells` are certain structures in a code that indicate violation of
fundamental design principles. They are usually not bugs; they are not
technically incorrect and do not currently prevent the program from
functioning.
"""
class Docs:
example = """
=begin
Example of Ruby code with data clumps and methods with too many
parameters.
=end
class Dummy
def x(y1, y2, y3, y4, y5, y6, y7, y8, a); end
def y(y1, y2, y3, y4, y5, y6, y7, y8); end
def z(y1, y2, y3, y4, y5, y6, y7, y8); end
end
"""
example_language = 'Ruby'
importance_reason = """
Even though they are not necessarily bugs, code smells increase the
risk of bugs or failure in the future and may slow down development.
"""
fix_suggestions = """
There are several `refactoring techniques` that can be used to deal
with `code smells` including:
* Composing methods
* Moving features between objects
* Organizing data
* Simplifying conditional expressions
* Simplifying method calls
* Dealing with generalisation
See <https://sourcemaking.com/refactoring/refactorings> for more
information.
"""
@Smell.subaspect
class ClassSmell:
"""
Code smells related to classes' definition.
Class-level code smells indicate poorly defined classes (including too
large classes or God object, data clump feature envy etc...) in your
source code.
"""
class Docs:
example = """
class Warehouse
def sale_price(item)
item.price - item.rebate
end
end
# sale_price refers to item more than self.
"""
example_language = 'Ruby'
importance_reason = """
These classes should be refactored for better readability and
maintainability of your source code.
"""
fix_suggestions = """
When a class is wearing too many (functional) hats (too large
classes), you should probably think about splitting it up:
* Extract class
* Extract subclass
* Extract interface
"""
@Smell.subaspect
class MethodSmell:
"""
Code smells related to a method or function definition.
Method-level code smells indicate poorly defined method and or
functions (too long method or functions, or functions with too many
parameters) in your source code.
"""
class Docs:
example = """
def do_nothing(var1, var2, var3, var4, var5, var6, var7):
pass
"""
example_language = 'Python'
importance_reason = """
Make your functions and methods unambiguous, easy to read and debug
by reducing the number of parameters and length of your methods and
functions.
"""
fix_suggestions = """
A fix for this would simply consist of redefining the functions
(and or method), making them shorter and reducing the number of
parameters (maybe by creating more functions or using libraries).
"""
@MethodSmell.subaspect
class MethodLength:
"""
Number of lines of code in a function or method definition.
Depending on the value of `method_length_count_comment`,
comments are considered. The `rule of 30
<https://dzone.com/articles/rule-30-%E2%80%93-when-method-class-or>`_
suggests that the maximum number of lines for a method is 30. ``PMD``
defines a default value of 100 lines per method, `checkstlye`` 150 and
60 (when comments are not considered), ``rubocop`` 10.
"""
class Docs:
example = """
def _is_positive(num):
if num > 0:
return True
else:
return False
# This function can be defined as follow:
def is_positive(num):
return num > 0
"""
example_language = 'Python'
importance_reason = """
It is really important is to stay DRY ("Don't Repeat Yourself") and
respect separation of concerns. Long methods are sometimes faster and
easier to write, and don't lead to maintenance problems. But most of
the time they are an easy way to detect that something *may* be wrong
in the code, and that special care is required while maintaining it.
"""
fix_suggestions = """
Refactoring methods into smaller more generic methods, making code more
compact by using inline and language specific syntax tricks,
implementing methods or functions to avoid redundant operation(s),
making use of methods provided in libraries rather than reimplementing
them.
"""
max_method_length = Taste[int](
'Represents the max number of lines for a method or a function\'s'
'definition.',
(10, 30, 50, 60, 100), default=30)
method_length_count_comment = Taste[bool](
'Allows when set to `True` to considered comments while calculating'
'methods\' length.',
(30, 60, 100, 150), default=60)
@MethodSmell.subaspect
class ParameterListLength:
"""
Number of parameter a function or method has.
In the book "Code Complete", ISBN 0735619670 it is suggested that the
maximum number of parameter per function or method is 7; ``rubocop``
suggests 5.
"""
class Docs:
example = """
def func(a, b, c, d, e, f, g, h, i, j, k):
pass
"""
example_language = 'Python'
importance_reason = """
Methods that take too many parameters are difficult to read, maintain
and work with, and callers of those method often have an awkward time
assembling all of the data and the resulting code is usually not pretty.
"""
fix_suggestions = """
This can be fixed by:
Instead of passing a group of data received from an object as
parameters, pass the object itself to the method.
Sometimes you can merge several parameters into a single object etc.
"""
max_parameters = Taste[int](
'Represents the max number of parameters for a function or a method.',
(5, 7), default=7)
@ClassSmell.subaspect
class DataClump:
"""
Identical groups of variables found in many different part of a program.
"""
class Docs:
example = """
public static void main(String args[]) {
String firstName = args[0];
String lastName = args[1];
Integer age = new Integer(args[2]);
String gender = args[3];
String occupation = args[4];
String city = args[5];
welcomeNew(firstName,lastName,age,gender,occupation,city);
}
public static void welcomeNew(String firstName, String lastName,
Integer age, String gender,
String occupation, String city){
System.out.printf(
"Welcome %s %s, a %d-year-old %s from %s who works as a %s",
firstName, lastName, age, gender, city, occupation
);
}
"""
example_language = 'Java'
importance_reason = """
Data clumps make code difficult to read, understand, and reuse.
It also spoils their architecture.
"""
fix_suggestions = """
Formally group the different variables together into a single object.
"""
@ClassSmell.subaspect
class ClassSize:
"""
Class size refers to the size of a class.
A class's size is based on:
* the number of fields it contains,
* the number of methods,
* and the number of lines of code.
"""
class Docs:
example = """
// This is large class given that the `max_class_length` is 20
public class Employee
{
private float salary;
private float bonusPercentage;
private EmployeeType employeeType;
public Employee(float salary, float bonusPercentage,
EmployeeType employeeType)
{
this.salary = salary;
this.bonusPercentage = bonusPercentage;
this.employeeType = employeeType;
}
public float CalculateSalary()
{
switch (employeeType)
{
case EmployeeType.Worker:
return salary;
case EmployeeType.Supervisor:
return salary + (bonusPercentage * 0.5F);
case EmployeeType.Manager:
return salary + (bonusPercentage * 0.7F);
}
return 0.0F;
}
}
"""
example_language = 'Java'
importance_reason = """
Refactoring large classes spares developers from the need to remember
a large number of attributes and methods. Splitting these classes
avoids duplication, and makes the code shorter and easier to maintain.
Sometimes a Lazy Class (a class that does too little) is created in
order to delineate intentions for future development, In this case,
try to maintain a balance between clarity and simplicity in your code;
and they should be deleted if they serve no purpose.
"""
fix_suggestions = """
Usually splitting up large classes into other classes or giving in
line Class treatment to component that are near useless can solve this
problem.
"""
@ClassSize.subaspect
class ClassLength:
"""
Number of lines of code in class' definition.
"""
class Docs:
example = """
# Here is an example of a large class (in terms of number of lines) if
# we assume that the maximum number of lines per class defintion is 10
class Student:
def __init__(self, first_name, last_name, dob,
matricule, school, faculty, department,
level, courses):
self.first_name = first_name
self.last_name = last_name
self.dob = dob
self.matricule = matricule
self.school = school
self.faculty = faculty
self.department = department
self.level = level
self.courses = courses
"""
example_language = 'Python 3'
importance_reason = """
Too large classes are difficult to read and maintain, and can easily
introduce bugs or duplication in our code base.
"""
fix_suggestions = """
Usually splitting up those classes into other classes solves the
problem.
"""
max_class_length = Taste[int](
'Represents the max number of lines for a class\'s definition.',
(999, 900), default=900)
@ClassSize.subaspect
class ClassConstants:
"""
Number of constants in a class.
"""
class Docs:
example = """
// Here is an example of class with too many constants if we assume
// that the maximum number of constants per class is 4
class Aclass {
final public int a = 1;
final public int b = 2;
final public String c = "coala";
final public String d = "aspectsYEAH";
final public Boolean e = true;
public Aclass(){}
}
"""
example_language = 'Java'
importance_reason = """
Avoids having too many constants to spare developers from the neeed
to remember too many of them.
"""
fix_suggestions = """
`ClassConstants` issues can be fixed by using data classes.
"""
max_constants = Taste[int](
'Represents the max number of constants for a class',
(3,), default=3)
@ClassSize.subaspect
class ClassInstanceVariables:
"""
Number of instance variables in a class.
"""
class Docs:
example = """
# Here is an example of a class with a large number of instance
# variables if we assume that the maximun nubmer of instance variables
# per class is 5.
class Student:
def __init__(self, first_name, last_name, dob,
matricule, school, faculty, department,
level, courses):
self.first_name = first_name
self.last_name = last_name
self.dob = dob
self.matricule = matricule
self.school = school
self.faculty = faculty
self.department = department
self.level = level
self.courses = courses
"""
example_language = 'Python 3'
importance_reason = """
Refactoring these classes spares developers from the need to remember
a large number of attributes.
"""
fix_suggestions = """
Usually splitting up those classes into other classes solves the
problem.
"""
max_instance_variables = Taste[int](
'Represents the max number of instance variables for a class',
(3,), default=3)
@ClassSize.subaspect
class ClassMethods:
"""
Number of class methods a class has.
"""
class Docs:
example = """
# Here is an example of a class with too many methods
# Assuming that the maximum per class is 5.
class AClass:
def x(self): pass
def y(self): pass
def z(self): pass
def p(self): pass
def q(self): pass
def r(self): pass
"""
example_language = 'Python 3'
importance_reason = """
Refactoring these classes spares developers from the need to remember
a large number of methods.
"""
fix_suggestions = """
Usually splitting up those classes into other classes solves the
problem.
"""
max_methods = Taste[int](
'Represents the max number of methods for a class',
(3,), default=3)
@ClassSmell.subaspect
class FeatureEnvy:
"""
Classes that excessively use methods of other classes.
"""
class Docs:
example = """
public class Phone {
private final String unformattedNumber;
public Phone(String unformattedNumber) {
this.unformattedNumber = unformattedNumber;
}
public String getAreaCode() {
return unformattedNumber.substring(0,3);
}
public String getPrefix() {
return unformattedNumber.substring(3,6);
}
public String getNumber() {
return unformattedNumber.substring(6,10);
}
}
public class Customer {
private Phone mobilePhone;
public String getMobilePhoneNumber() {
return "(" +
mobilePhone.getAreaCode() + ") " +
mobilePhone.getPrefix() + "-" +
mobilePhone.getNumber();
}
}
"""
example_language = 'Java'
importance_reason = """
This smell may occur after fields are moved to a data class; which
makes the code less readable, and difficult to debug.
"""
fix_suggestions = """
Move the operations on data to the newly defined class(given that
the fields of one class were moved to this class) as well.
"""
@Smell.subaspect
class Naming:
"""
`Naming` refers to the naming conventions to use for identifiers.
"""
class Docs:
example = """
dummyVar = None # lowerCamelCase naming convention
DummyVar = None # UpperCamelCase naming convention
dummy_var = None # snake_case naming convention
"""
example_language = 'Python 3'
importance_reason = """
Consistent use of naming convention, make the code easy to read
and debug.
"""
fix_suggestions = """
Use the appropriate naming convention for each data type.
"""
variable_naming_convention = Taste[str](
'Naming convention to use for variables\'s identifiers',
('lowerCamelCase', 'snake_case', 'kebab-case', 'UpperCamelCase'),
default='snake_case')
function_naming_convention = Taste[str](
'Naming convention to use for functions\'s or methods\'s identifiers',
('lowerCamelCase', 'snake_case', 'kebab-case', 'UpperCamelcase'),
default='snake_case')
class_naming_convention = Taste[str](
'Naming convention to use for classes\'s identifiers',
('lowerCamelCase', 'snake_case', 'kebab-case', 'UpperCamelCase'),
default='UpperCamelCase')
max_identifier_length = Taste[int](
'The maximum number of character for an identifier.',
(31,), default=31)
@Smell.subaspect
class Complexity:
"""
Complexity of a code based on different complexity metrics.
"""
class Docs:
example = """
* McCabe's complexity
* Halstead complexity
* Elshof complexity
* Data complexity
etc...
Here is an example of complex code:
https://github.com/sympy/sympy/blob/master/sympy/solvers/solvers.py
"""
example_language = 'reStructuredText'
importance_reason = """
Complex programs are difficult to read and maintain. Reducing a code's
complexity improves its organization.
"""
fix_suggestions = """
Implementing simple methods, avoiding too many branches, avoiding too
much multilevel inheritance etc... can fix this.
"""
@Complexity.subaspect
class CylomaticComplexity:
"""
Number of linearly independent paths through a program's source code.
The `Cyclomatic complexity
<https://wikipedia.org/wiki/Cyclomatic_complexity>`_ was developed by
Thomas J. McCabe in 1976 and it is based on a control flow representation
of the program.
"""
class Docs:
example = """
// The cyclomatic complexity of this program is 4.
int foo (int a, int b) {
if (a > 17 && b < 42 && a+b < 55) {
return 1;
}
return 2;
}
"""
example_language = 'C'
importance_reason = """
Very complex code are difficult to read, debug and maintain.
It is always a good idea to keep things as simple as possible.
"""
fix_suggestions = """
This can be solved by breaking down complex functions into smaller
onces.
"""
cyclomatic_complexity = Taste[int](
'This the maximum number of embedded branches or embedded loops'
' allowed.',
(6,), default=6)
@Complexity.subaspect
class MaintainabilityIndex:
"""
Software metric which measure how maintainable is a program.
The `maintainablity index
<www.projectcodemeter.com/cost_estimation/help/GL_maintainability.htm>`_
is always in the range 0-100 and is ranked
as follow:
* A `MI` in the range 0-9 maps to a code extremely difficult to maintain.
* A `MI` in the range 10-19 maps to a maintainable code.
* A `MI` in the range 20-100 maps to a code highly maintainable.
"""
class Docs:
example = """
'''
The maintainability index for the following piece of code is 100.
'''
def preorder(node):
if tree:
print(node.key)
preorder(node.left)
preorder(node.right)
"""
example_language = 'Python'
importance_reason = """
Complex codes are difficult to maintain.
"""
fix_suggestions = """
This can be solved by writing simpler functions and methods.
"""
maintainability_index = Taste[int](
'Maintainability index of your code',
tuple(range(100)), default=10)
|
google-research/dreamer
|
refs/heads/master
|
dreamer/control/random_episodes.py
|
1
|
# Copyright 2019 The Dreamer Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dreamer import control
def random_episodes(
env_ctor, num_episodes, num_steps, outdir=None, isolate_envs='none'):
# If using environment processes or threads, we should also use them here to
# avoid loading their dependencies into the global name space. This way,
# their imports will be isolated from the main process and later created envs
# do not inherit them via global state but import their own copies.
env, _ = control.create_env(env_ctor, isolate_envs)
env = control.wrappers.CollectDataset(env, outdir)
episodes = [] if outdir else None
while num_episodes > 0 or num_steps > 0:
policy = lambda env, obs: env.action_space.sample()
done = False
obs = env.reset()
while not done:
action = policy(env, obs)
obs, _, done, info = env.step(action)
episode = env._get_episode()
episodes.append(episode)
num_episodes -= 1
num_steps -= len(episode['reward'])
try:
env.close()
except AttributeError:
pass
return episodes
|
lmazuel/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-monitor/setup.py
|
1
|
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
try:
from azure_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
cmdclass = {}
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-mgmt-monitor"
PACKAGE_PPRINT_NAME = "Monitor"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.rst', encoding='utf-8') as f:
readme = f.read()
with open('HISTORY.rst', encoding='utf-8') as f:
history = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + history,
license='MIT License',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=["tests"]),
install_requires=[
'msrestazure>=0.4.20,<2.0.0',
'azure-common~=1.1',
],
cmdclass=cmdclass
)
|
santidediego/LearningDjango
|
refs/heads/master
|
lib/python3.5/site-packages/django/core/cache/backends/filebased.py
|
428
|
"File-based cache backend"
import errno
import glob
import hashlib
import io
import os
import random
import tempfile
import time
import zlib
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.core.files.move import file_move_safe
from django.utils.encoding import force_bytes
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
class FileBasedCache(BaseCache):
cache_suffix = '.djcache'
def __init__(self, dir, params):
super(FileBasedCache, self).__init__(params)
self._dir = os.path.abspath(dir)
self._createdir()
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
if self.has_key(key, version):
return False
self.set(key, value, timeout, version)
return True
def get(self, key, default=None, version=None):
fname = self._key_to_file(key, version)
if os.path.exists(fname):
try:
with io.open(fname, 'rb') as f:
if not self._is_expired(f):
return pickle.loads(zlib.decompress(f.read()))
except IOError as e:
if e.errno == errno.ENOENT:
pass # Cache file was removed after the exists check
return default
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
self._createdir() # Cache dir can be deleted at any time.
fname = self._key_to_file(key, version)
self._cull() # make some room if necessary
fd, tmp_path = tempfile.mkstemp(dir=self._dir)
renamed = False
try:
with io.open(fd, 'wb') as f:
expiry = self.get_backend_timeout(timeout)
f.write(pickle.dumps(expiry, -1))
f.write(zlib.compress(pickle.dumps(value), -1))
file_move_safe(tmp_path, fname, allow_overwrite=True)
renamed = True
finally:
if not renamed:
os.remove(tmp_path)
def delete(self, key, version=None):
self._delete(self._key_to_file(key, version))
def _delete(self, fname):
if not fname.startswith(self._dir) or not os.path.exists(fname):
return
try:
os.remove(fname)
except OSError as e:
# ENOENT can happen if the cache file is removed (by another
# process) after the os.path.exists check.
if e.errno != errno.ENOENT:
raise
def has_key(self, key, version=None):
fname = self._key_to_file(key, version)
if os.path.exists(fname):
with io.open(fname, 'rb') as f:
return not self._is_expired(f)
return False
def _cull(self):
"""
Removes random cache entries if max_entries is reached at a ratio
of num_entries / cull_frequency. A value of 0 for CULL_FREQUENCY means
that the entire cache will be purged.
"""
filelist = self._list_cache_files()
num_entries = len(filelist)
if num_entries < self._max_entries:
return # return early if no culling is required
if self._cull_frequency == 0:
return self.clear() # Clear the cache when CULL_FREQUENCY = 0
# Delete a random selection of entries
filelist = random.sample(filelist,
int(num_entries / self._cull_frequency))
for fname in filelist:
self._delete(fname)
def _createdir(self):
if not os.path.exists(self._dir):
try:
os.makedirs(self._dir, 0o700)
except OSError as e:
if e.errno != errno.EEXIST:
raise EnvironmentError(
"Cache directory '%s' does not exist "
"and could not be created'" % self._dir)
def _key_to_file(self, key, version=None):
"""
Convert a key into a cache file path. Basically this is the
root cache path joined with the md5sum of the key and a suffix.
"""
key = self.make_key(key, version=version)
self.validate_key(key)
return os.path.join(self._dir, ''.join(
[hashlib.md5(force_bytes(key)).hexdigest(), self.cache_suffix]))
def clear(self):
"""
Remove all the cache files.
"""
if not os.path.exists(self._dir):
return
for fname in self._list_cache_files():
self._delete(fname)
def _is_expired(self, f):
"""
Takes an open cache file and determines if it has expired,
deletes the file if it is has passed its expiry time.
"""
exp = pickle.load(f)
if exp is not None and exp < time.time():
f.close() # On Windows a file has to be closed before deleting
self._delete(f.name)
return True
return False
def _list_cache_files(self):
"""
Get a list of paths to all the cache files. These are all the files
in the root cache dir that end on the cache_suffix.
"""
if not os.path.exists(self._dir):
return []
filelist = [os.path.join(self._dir, fname) for fname
in glob.glob1(self._dir, '*%s' % self.cache_suffix)]
return filelist
|
dmitry-sobolev/ansible
|
refs/heads/devel
|
lib/ansible/plugins/cache/memcached.py
|
79
|
# (c) 2014, Brian Coca, Josh Drake, et al
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import os
import time
from multiprocessing import Lock
from itertools import chain
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.cache import BaseCacheModule
try:
import memcache
except ImportError:
raise AnsibleError("python-memcached is required for the memcached fact cache")
class ProxyClientPool(object):
"""
Memcached connection pooling for thread/fork safety. Inspired by py-redis
connection pool.
Available connections are maintained in a deque and released in a FIFO manner.
"""
def __init__(self, *args, **kwargs):
self.max_connections = kwargs.pop('max_connections', 1024)
self.connection_args = args
self.connection_kwargs = kwargs
self.reset()
def reset(self):
self.pid = os.getpid()
self._num_connections = 0
self._available_connections = collections.deque(maxlen=self.max_connections)
self._locked_connections = set()
self._lock = Lock()
def _check_safe(self):
if self.pid != os.getpid():
with self._lock:
if self.pid == os.getpid():
# bail out - another thread already acquired the lock
return
self.disconnect_all()
self.reset()
def get_connection(self):
self._check_safe()
try:
connection = self._available_connections.popleft()
except IndexError:
connection = self.create_connection()
self._locked_connections.add(connection)
return connection
def create_connection(self):
if self._num_connections >= self.max_connections:
raise RuntimeError("Too many memcached connections")
self._num_connections += 1
return memcache.Client(*self.connection_args, **self.connection_kwargs)
def release_connection(self, connection):
self._check_safe()
self._locked_connections.remove(connection)
self._available_connections.append(connection)
def disconnect_all(self):
for conn in chain(self._available_connections, self._locked_connections):
conn.disconnect_all()
def __getattr__(self, name):
def wrapped(*args, **kwargs):
return self._proxy_client(name, *args, **kwargs)
return wrapped
def _proxy_client(self, name, *args, **kwargs):
conn = self.get_connection()
try:
return getattr(conn, name)(*args, **kwargs)
finally:
self.release_connection(conn)
class CacheModuleKeys(collections.MutableSet):
"""
A set subclass that keeps track of insertion time and persists
the set in memcached.
"""
PREFIX = 'ansible_cache_keys'
def __init__(self, cache, *args, **kwargs):
self._cache = cache
self._keyset = dict(*args, **kwargs)
def __contains__(self, key):
return key in self._keyset
def __iter__(self):
return iter(self._keyset)
def __len__(self):
return len(self._keyset)
def add(self, key):
self._keyset[key] = time.time()
self._cache.set(self.PREFIX, self._keyset)
def discard(self, key):
del self._keyset[key]
self._cache.set(self.PREFIX, self._keyset)
def remove_by_timerange(self, s_min, s_max):
for k in self._keyset.keys():
t = self._keyset[k]
if s_min < t < s_max:
del self._keyset[k]
self._cache.set(self.PREFIX, self._keyset)
class CacheModule(BaseCacheModule):
def __init__(self, *args, **kwargs):
if C.CACHE_PLUGIN_CONNECTION:
connection = C.CACHE_PLUGIN_CONNECTION.split(',')
else:
connection = ['127.0.0.1:11211']
self._timeout = C.CACHE_PLUGIN_TIMEOUT
self._prefix = C.CACHE_PLUGIN_PREFIX
self._cache = ProxyClientPool(connection, debug=0)
self._keys = CacheModuleKeys(self._cache, self._cache.get(CacheModuleKeys.PREFIX) or [])
def _make_key(self, key):
return "{0}{1}".format(self._prefix, key)
def _expire_keys(self):
if self._timeout > 0:
expiry_age = time.time() - self._timeout
self._keys.remove_by_timerange(0, expiry_age)
def get(self, key):
value = self._cache.get(self._make_key(key))
# guard against the key not being removed from the keyset;
# this could happen in cases where the timeout value is changed
# between invocations
if value is None:
self.delete(key)
raise KeyError
return value
def set(self, key, value):
self._cache.set(self._make_key(key), value, time=self._timeout, min_compress_len=1)
self._keys.add(key)
def keys(self):
self._expire_keys()
return list(iter(self._keys))
def contains(self, key):
self._expire_keys()
return key in self._keys
def delete(self, key):
self._cache.delete(self._make_key(key))
self._keys.discard(key)
def flush(self):
for key in self.keys():
self.delete(key)
def copy(self):
return self._keys.copy()
def __getstate__(self):
return dict()
def __setstate__(self, data):
self.__init__()
|
castroflavio/ryu
|
refs/heads/master
|
ryu/tests/mininet/l3/icmp/test_icmp.py
|
63
|
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import struct
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller import dpset
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_2
from ryu.lib.mac import haddr_to_str
LOG = logging.getLogger(__name__)
class RunTestMininet(app_manager.RyuApp):
_CONTEXTS = {'dpset': dpset.DPSet}
OFP_VERSIONS = [ofproto_v1_2.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(RunTestMininet, self).__init__(*args, **kwargs)
def _add_flow(self, dp, match, actions):
inst = [dp.ofproto_parser.OFPInstructionActions(
dp.ofproto.OFPIT_APPLY_ACTIONS, actions)]
mod = dp.ofproto_parser.OFPFlowMod(
dp, cookie=0, cookie_mask=0, table_id=0,
command=dp.ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=0xff, buffer_id=0xffffffff,
out_port=dp.ofproto.OFPP_ANY, out_group=dp.ofproto.OFPG_ANY,
flags=0, match=match, instructions=inst)
dp.send_msg(mod)
def _define_flow(self, dp):
in_port = 1
out_port = 2
# port:1 -> port:2
match = dp.ofproto_parser.OFPMatch()
match.set_in_port(in_port)
actions = [dp.ofproto_parser.OFPActionOutput(out_port, 0)]
self._add_flow(dp, match, actions)
# port:1 -> port:2
match = dp.ofproto_parser.OFPMatch()
match.set_in_port(out_port)
actions = [dp.ofproto_parser.OFPActionOutput(in_port, 0)]
self._add_flow(dp, match, actions)
@set_ev_cls(dpset.EventDP, dpset.DPSET_EV_DISPATCHER)
def handler_datapath(self, ev):
if ev.enter:
self._define_flow(ev.dp)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
dst, src, eth_type = struct.unpack_from('!6s6sH', buffer(msg.data), 0)
in_port = msg.match.fields[0].value
LOG.info("----------------------------------------")
LOG.info("* PacketIn")
LOG.info("in_port=%d, eth_type: %s", in_port, hex(eth_type))
LOG.info("packet reason=%d buffer_id=%d", msg.reason, msg.buffer_id)
LOG.info("packet in datapath_id=%s src=%s dst=%s",
msg.datapath.id, haddr_to_str(src), haddr_to_str(dst))
|
vornne/pw_module_system
|
refs/heads/pw
|
header_debug.py
|
3
|
from header_common import reg
from header_operations import *
import header_lazy_evaluation as lazy
####################################################################################################################
# Short helper functions to generate operations for displaying debugging output.
####################################################################################################################
register_begin = 80
register_end = 100
current_register = register_begin
register_names = {}
# internal use only
def increment():
global current_register
if current_register >= register_end:
raise Exception("Too many variables for debug output.")
current_register += 1
# example: dbg.var(":player_id"),
def var(name, display_name=None):
global register_names
if display_name is None:
display_name = str.strip(name, ":$")
register_names[current_register] = display_name
op = (assign, reg(current_register), name)
increment()
return op
# example: dbg.op("pos2_x", position_get_x, pos2),
def op(name, operation, *args):
global register_names
register_names[current_register] = name
op = [operation, reg(current_register)]
op.extend(args)
increment()
return tuple(op)
# internal use only
def generate_string(message, reset):
global current_register
global register_names
string_list = ["@"]
if message is not None:
string_list.append(message)
string_list.append(" - ")
for i in xrange(register_begin, current_register):
if i > register_begin:
string_list.append(", ")
string_list.append("{0}: {{reg{1}}}".format(register_names[i], i))
debug_string = ''.join(string_list)
if reset is True:
current_register = register_begin
return debug_string
# example: dbg.display(),
def display(message=None, reset=True):
return (display_message, generate_string(message, reset))
# example: dbg.log(),
def log(message=None, reset=True):
return (server_add_message_to_log, generate_string(message, reset))
# example: dbg.update_pres(),
def update_pres(reset=True):
global current_register
global register_names
update_block = []
for i in xrange(register_begin, current_register):
update_block.append((str_store_string, i, "@{0}: {{reg{1}}}".format(register_names[i], i)))
update_block.extend([
(assign, "$g_dbg_presentation_registers_end", current_register),
(try_begin),
(neg|is_presentation_active, "prsnt_dbg_overlay"),
(start_presentation, "prsnt_dbg_overlay"),
(try_end),
])
if reset is True:
current_register = register_begin
return lazy.block(update_block)
# example: dbg.vars(":player_id", ":agent_id", ":gold"),
def vars(*args):
return lazy.block([var(arg) for arg in args])
# example: dbg.vars_display(":agent_id", ":horse_agent_id", ":distance"),
def vars_display(*args):
return lazy.block([var(arg) for arg in args] + [display()])
# initialize all debug registers in case some are not set
def reset_all(value=-989):
return lazy.block((assign, reg(r), value)
for r in range(register_begin, register_end + 1))
|
ratnania/caid
|
refs/heads/master
|
caid/boxsplines/mesh.py
|
1
|
import numpy as np
from numpy import array, asarray, pi, sin, cos
import matplotlib.pyplot as plt
from scipy.spatial import Delaunay
nm = 2
ni = 6
radius = 1.
angle = pi / 3.
center = array([0,0])
centers = [center]
points = []
def create_points(X, pts):
for m in range(1, nm):
for i in range(0, ni):
P = m * radius * array([cos(i*angle)-X[0], sin(i*angle)-X[1]])
pts.append(P)
return pts
centers = create_points(center, centers)
for X in centers:
points = create_points(X, points)
points = array(points)
tri = Delaunay(points)
barycenters = []
for T in tri.simplices:
pts = points[T]
A = array(pts[0,:]) ; B = array(pts[1,:]) ; C = array(pts[2,:])
b = (A+B+C) / 3.
barycenters.append(b)
barycenters = array(barycenters)
tri_b = Delaunay(barycenters)
plt.triplot(points[:,0], points[:,1], tri.simplices.copy())
plt.plot(points[:,0], points[:,1], 'or')
plt.plot(barycenters[:,0], barycenters[:,1], 'ob')
xmin = points[:,0].min() ; xmax = points[:,0].max()
ymin = points[:,1].min() ; ymax = points[:,1].max()
plt.xlim([xmin-0.2, xmax+0.2])
plt.ylim([ymin-0.2, ymax+0.2])
plt.show()
|
bacaldwell/ironic
|
refs/heads/master
|
ironic/objects/fields.py
|
7
|
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import six
from oslo_versionedobjects import fields as object_fields
from ironic.common import utils
class IntegerField(object_fields.IntegerField):
pass
class UUIDField(object_fields.UUIDField):
pass
class StringField(object_fields.StringField):
pass
class DateTimeField(object_fields.DateTimeField):
pass
class BooleanField(object_fields.BooleanField):
pass
class ListOfStringsField(object_fields.ListOfStringsField):
pass
class FlexibleDict(object_fields.FieldType):
@staticmethod
def coerce(obj, attr, value):
if isinstance(value, six.string_types):
value = ast.literal_eval(value)
return dict(value)
class FlexibleDictField(object_fields.AutoTypedField):
AUTO_TYPE = FlexibleDict()
# TODO(lucasagomes): In our code we've always translated None to {},
# this method makes this field to work like this. But probably won't
# be accepted as-is in the oslo_versionedobjects library
def _null(self, obj, attr):
if self.nullable:
return {}
super(FlexibleDictField, self)._null(obj, attr)
class MACAddress(object_fields.FieldType):
@staticmethod
def coerce(obj, attr, value):
return utils.validate_and_normalize_mac(value)
class MACAddressField(object_fields.AutoTypedField):
AUTO_TYPE = MACAddress()
|
ansible/ansible-modules-core
|
refs/heads/devel
|
cloud/rackspace/rax_cbs.py
|
25
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: rax_cbs
short_description: Manipulate Rackspace Cloud Block Storage Volumes
description:
- Manipulate Rackspace Cloud Block Storage Volumes
version_added: 1.6
options:
description:
description:
- Description to give the volume being created
default: null
image:
description:
- image to use for bootable volumes. Can be an C(id), C(human_id) or
C(name). This option requires C(pyrax>=1.9.3)
default: null
version_added: 1.9
meta:
description:
- A hash of metadata to associate with the volume
default: null
name:
description:
- Name to give the volume being created
default: null
required: true
size:
description:
- Size of the volume to create in Gigabytes
default: 100
required: true
snapshot_id:
description:
- The id of the snapshot to create the volume from
default: null
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
required: true
volume_type:
description:
- Type of the volume being created
choices:
- SATA
- SSD
default: SATA
required: true
wait:
description:
- wait for the volume to be in state 'available' before returning
default: "no"
choices:
- "yes"
- "no"
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Build a Block Storage Volume
gather_facts: False
hosts: local
connection: local
tasks:
- name: Storage volume create request
local_action:
module: rax_cbs
credentials: ~/.raxpub
name: my-volume
description: My Volume
volume_type: SSD
size: 150
region: DFW
wait: yes
state: present
meta:
app: my-cool-app
register: my_volume
'''
from distutils.version import LooseVersion
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_block_storage(module, state, name, description, meta, size,
snapshot_id, volume_type, wait, wait_timeout,
image):
changed = False
volume = None
instance = {}
cbs = pyrax.cloud_blockstorage
if cbs is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if image:
# pyrax<1.9.3 did not have support for specifying an image when
# creating a volume which is required for bootable volumes
if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'):
module.fail_json(msg='Creating a bootable volume requires '
'pyrax>=1.9.3')
image = rax_find_image(module, pyrax, image)
volume = rax_find_volume(module, pyrax, name)
if state == 'present':
if not volume:
kwargs = dict()
if image:
kwargs['image'] = image
try:
volume = cbs.create(name, size=size, volume_type=volume_type,
description=description,
metadata=meta,
snapshot_id=snapshot_id, **kwargs)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
else:
if wait:
attempts = wait_timeout / 5
pyrax.utils.wait_for_build(volume, interval=5,
attempts=attempts)
volume.get()
instance = rax_to_dict(volume)
result = dict(changed=changed, volume=instance)
if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id
elif wait and volume.status not in VOLUME_STATUS:
result['msg'] = 'Timeout waiting on %s' % volume.id
if 'msg' in result:
module.fail_json(**result)
else:
module.exit_json(**result)
elif state == 'absent':
if volume:
instance = rax_to_dict(volume)
try:
volume.delete()
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, volume=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
description=dict(type='str'),
image=dict(type='str'),
meta=dict(type='dict', default={}),
name=dict(required=True),
size=dict(type='int', default=100),
snapshot_id=dict(),
state=dict(default='present', choices=['present', 'absent']),
volume_type=dict(choices=['SSD', 'SATA'], default='SATA'),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
description = module.params.get('description')
image = module.params.get('image')
meta = module.params.get('meta')
name = module.params.get('name')
size = module.params.get('size')
snapshot_id = module.params.get('snapshot_id')
state = module.params.get('state')
volume_type = module.params.get('volume_type')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
setup_rax_module(module, pyrax)
cloud_block_storage(module, state, name, description, meta, size,
snapshot_id, volume_type, wait, wait_timeout,
image)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
if __name__ == '__main__':
main()
|
jerrylei98/Dailydos
|
refs/heads/master
|
venv/lib/python2.7/site-packages/wheel/__init__.py
|
219
|
# __variables__ with double-quoted values will be available in setup.py:
__version__ = "0.24.0"
|
CouchPotato/CouchPotatoV1
|
refs/heads/master
|
cherrypy/process/win32.py
|
93
|
"""Windows service. Requires pywin32."""
import os
import win32api
import win32con
import win32event
import win32service
import win32serviceutil
from cherrypy.process import wspbus, plugins
class ConsoleCtrlHandler(plugins.SimplePlugin):
"""A WSPBus plugin for handling Win32 console events (like Ctrl-C)."""
def __init__(self, bus):
self.is_set = False
plugins.SimplePlugin.__init__(self, bus)
def start(self):
if self.is_set:
self.bus.log('Handler for console events already set.', level=40)
return
result = win32api.SetConsoleCtrlHandler(self.handle, 1)
if result == 0:
self.bus.log('Could not SetConsoleCtrlHandler (error %r)' %
win32api.GetLastError(), level=40)
else:
self.bus.log('Set handler for console events.', level=40)
self.is_set = True
def stop(self):
if not self.is_set:
self.bus.log('Handler for console events already off.', level=40)
return
try:
result = win32api.SetConsoleCtrlHandler(self.handle, 0)
except ValueError:
# "ValueError: The object has not been registered"
result = 1
if result == 0:
self.bus.log('Could not remove SetConsoleCtrlHandler (error %r)' %
win32api.GetLastError(), level=40)
else:
self.bus.log('Removed handler for console events.', level=40)
self.is_set = False
def handle(self, event):
"""Handle console control events (like Ctrl-C)."""
if event in (win32con.CTRL_C_EVENT, win32con.CTRL_LOGOFF_EVENT,
win32con.CTRL_BREAK_EVENT, win32con.CTRL_SHUTDOWN_EVENT,
win32con.CTRL_CLOSE_EVENT):
self.bus.log('Console event %s: shutting down bus' % event)
# Remove self immediately so repeated Ctrl-C doesn't re-call it.
try:
self.stop()
except ValueError:
pass
self.bus.exit()
# 'First to return True stops the calls'
return 1
return 0
class Win32Bus(wspbus.Bus):
"""A Web Site Process Bus implementation for Win32.
Instead of time.sleep, this bus blocks using native win32event objects.
"""
def __init__(self):
self.events = {}
wspbus.Bus.__init__(self)
def _get_state_event(self, state):
"""Return a win32event for the given state (creating it if needed)."""
try:
return self.events[state]
except KeyError:
event = win32event.CreateEvent(None, 0, 0,
"WSPBus %s Event (pid=%r)" %
(state.name, os.getpid()))
self.events[state] = event
return event
def _get_state(self):
return self._state
def _set_state(self, value):
self._state = value
event = self._get_state_event(value)
win32event.PulseEvent(event)
state = property(_get_state, _set_state)
def wait(self, state, interval=0.1, channel=None):
"""Wait for the given state(s), KeyboardInterrupt or SystemExit.
Since this class uses native win32event objects, the interval
argument is ignored.
"""
if isinstance(state, (tuple, list)):
# Don't wait for an event that beat us to the punch ;)
if self.state not in state:
events = tuple([self._get_state_event(s) for s in state])
win32event.WaitForMultipleObjects(events, 0, win32event.INFINITE)
else:
# Don't wait for an event that beat us to the punch ;)
if self.state != state:
event = self._get_state_event(state)
win32event.WaitForSingleObject(event, win32event.INFINITE)
class _ControlCodes(dict):
"""Control codes used to "signal" a service via ControlService.
User-defined control codes are in the range 128-255. We generally use
the standard Python value for the Linux signal and add 128. Example:
>>> signal.SIGUSR1
10
control_codes['graceful'] = 128 + 10
"""
def key_for(self, obj):
"""For the given value, return its corresponding key."""
for key, val in self.items():
if val is obj:
return key
raise ValueError("The given object could not be found: %r" % obj)
control_codes = _ControlCodes({'graceful': 138})
def signal_child(service, command):
if command == 'stop':
win32serviceutil.StopService(service)
elif command == 'restart':
win32serviceutil.RestartService(service)
else:
win32serviceutil.ControlService(service, control_codes[command])
class PyWebService(win32serviceutil.ServiceFramework):
"""Python Web Service."""
_svc_name_ = "Python Web Service"
_svc_display_name_ = "Python Web Service"
_svc_deps_ = None # sequence of service names on which this depends
_exe_name_ = "pywebsvc"
_exe_args_ = None # Default to no arguments
# Only exists on Windows 2000 or later, ignored on windows NT
_svc_description_ = "Python Web Service"
def SvcDoRun(self):
from cherrypy import process
process.bus.start()
process.bus.block()
def SvcStop(self):
from cherrypy import process
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
process.bus.exit()
def SvcOther(self, control):
process.bus.publish(control_codes.key_for(control))
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(PyWebService)
|
HoracioAlvarado/fwd
|
refs/heads/master
|
venv/Lib/site-packages/alembic/templates/generic/env.py
|
76
|
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
aospx-kitkat/platform_external_chromium_org
|
refs/heads/kitkat
|
tools/telemetry/telemetry/test_runner.py
|
23
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parses the command line, discovers the appropriate tests, and runs them.
Handles test configuration, but all the logic for
actually running the test is in Test and PageRunner."""
import copy
import inspect
import json
import optparse
import os
import sys
from telemetry import test
from telemetry.core import browser_options
from telemetry.core import discover
from telemetry.core import util
class Command(object):
usage = ''
@property
def name(self):
return self.__class__.__name__.lower()
@property
def description(self):
return self.__doc__
def CreateParser(self):
return optparse.OptionParser('%%prog %s %s' % (self.name, self.usage))
def AddParserOptions(self, parser):
pass
def ValidateCommandLine(self, parser, options, args):
pass
def Run(self, options, args):
raise NotImplementedError()
class Help(Command):
"""Display help information"""
def Run(self, options, args):
print ('usage: %s <command> [<args>]' % _GetScriptName())
print 'Available commands are:'
for command in COMMANDS:
print ' %-10s %s' % (command.name, command.description)
return 0
class List(Command):
"""Lists the available tests"""
def AddParserOptions(self, parser):
parser.add_option('-j', '--json', action='store_true')
def Run(self, options, args):
if options.json:
test_list = []
for test_name, test_class in sorted(_GetTests().items()):
test_list.append({
'name': test_name,
'description': test_class.__doc__,
'enabled': test_class.enabled,
'options': test_class.options,
})
print json.dumps(test_list)
else:
print 'Available tests are:'
for test_name, test_class in sorted(_GetTests().items()):
if test_class.__doc__:
print ' %-20s %s' % (test_name,
test_class.__doc__.splitlines()[0])
else:
print ' %-20s' % test_name
return 0
class Run(Command):
"""Run one or more tests"""
usage = 'test_name [...] [<args>]'
def CreateParser(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser('%%prog %s %s' % (self.name, self.usage))
return parser
def ValidateCommandLine(self, parser, options, args):
if not args:
parser.error('Must provide at least one test name')
for test_name in args:
if test_name not in _GetTests():
parser.error('No test named "%s"' % test_name)
def Run(self, options, args):
total_failures = 0
for test_name in args:
test_failures = _GetTests()[test_name]().Run(copy.copy(options))
total_failures += test_failures
return min(255, total_failures)
COMMANDS = [cls() for _, cls in inspect.getmembers(sys.modules[__name__])
if inspect.isclass(cls)
and cls is not Command and issubclass(cls, Command)]
def _GetScriptName():
return os.path.basename(sys.argv[0])
def _GetTests():
# Lazy load and cache results.
if not hasattr(_GetTests, 'tests'):
base_dir = util.GetBaseDir()
_GetTests.tests = discover.DiscoverClasses(base_dir, base_dir, test.Test,
index_by_class_name=True)
return _GetTests.tests
def Main():
# Get the command name from the command line.
if len(sys.argv) > 1 and sys.argv[1] == '--help':
sys.argv[1] = 'help'
command_name = 'run'
for arg in sys.argv[1:]:
if not arg.startswith('-'):
command_name = arg
break
# Validate and interpret the command name.
commands = [command for command in COMMANDS
if command.name.startswith(command_name)]
if len(commands) > 1:
print >> sys.stderr, ('"%s" is not a %s command. Did you mean one of these?'
% (command_name, _GetScriptName()))
for command in commands:
print >> sys.stderr, ' %-10s %s' % (command.name, command.description)
return 1
if commands:
command = commands[0]
else:
command = Run()
# Parse and run the command.
parser = command.CreateParser()
command.AddParserOptions(parser)
options, args = parser.parse_args()
if commands:
args = args[1:]
command.ValidateCommandLine(parser, options, args)
return command.Run(options, args)
|
Outernet-Project/bottle-utils
|
refs/heads/master
|
bottle_utils/form/__init__.py
|
1
|
"""
.. module:: bottle_utils.form
:synopsis: Form processing and validation library
.. moduleauthor:: Outernet Inc <[email protected]>
"""
from .exceptions import ValidationError
from .fields import (DormantField,
Field,
StringField,
PasswordField,
HiddenField,
EmailField,
TextAreaField,
DateField,
FileField,
IntegerField,
FloatField,
BooleanField,
SelectField)
from .forms import Form
from .validators import (Validator, Required, DateValidator, InRangeValidator,
LengthValidator)
__all__ = ['ValidationError',
'DormantField',
'Field',
'StringField',
'PasswordField',
'HiddenField',
'EmailField',
'TextAreaField',
'DateField',
'FileField',
'IntegerField',
'FloatField',
'BooleanField',
'SelectField',
'Form',
'Validator',
'Required',
'DateValidator',
'InRangeValidator']
|
SiLab-Bonn/pyBAR_fei4_interpreter
|
refs/heads/master
|
examples/example.py
|
1
|
''' Example how to interpret raw data and how to histogram the hits.
'''
import numpy as np
from pybar_fei4_interpreter.data_interpreter import PyDataInterpreter
from pybar_fei4_interpreter.data_histograming import PyDataHistograming
# Initialize interpretation modules
interpreter = PyDataInterpreter()
histograming = PyDataHistograming()
# Create raw data
raw_data = np.array([67307647, 67645759, 67660079, 67541711, 67718111, 67913663, 67914223, 67847647, 67978655, 68081199, 68219119, 68219487, 68425615, 68311343, 68490719, 68373295, 68553519, 68693039, 68573503, 68709951, 68717058, 68734735, 68604719, 68753999, 68761151, 68847327, 69014799, 69079791, 69211359, 69221055, 69279567, 69499247, 69773183, 69788527, 69998559, 69868559, 69872655, 70003599, 69902527, 70274575, 70321471, 70429983, 70563295, 70574959, 70447631, 70584591, 70783023, 71091999, 70972687, 70985087, 71214815, 71382623, 71609135, 71643519, 71720527, 71897695, 72167199, 72040047, 72264927, 72423983, 77471983, 77602863, 77604383, 77485295, 77616415, 77618927, 77619231, 77639983, 77655871, 77544159, 77548303, 77338399, 77345567, 77346287, 77360399, 77255407, 77386211, 77268287, 77279215, 77409599, 77075983, 76951903, 76980527, 77117023, 76991055, 77011007, 77148127, 77148815, 76827167, 76700031, 76868895, 76758575, 76889567, 76558303, 76429599, 76584783, 76468191, 76610943, 76613743, 76620879, 76629375, 76285999, 76321908, 76194319, 76205599, 76233759, 76065391, 76075839, 76093759, 75801311, 75826319, 75829215, 75699231, 75403887, 75565039, 75439135, 75111711, 75115151, 75251487, 75258399, 75138015, 75303471, 74974111, 74868559, 75030047, 75050079, 74714591, 74722847, 74595103, 74649935, 74656815, 74796511, 74455519, 74391519, 74402607, 74534383, 74189695, 74064911, 74246271, 74116063, 74248719, 74133119, 73935183, 73941087, 73811295, 73663583, 73743423, 73449647, 73453391, 73323743, 73343471, 73474159, 73345087, 73206751, 72899295, 72958559, 72828447, 72542623, 82383232, 67374687, 67503967, 67766575, 68179999, 68052847, 68198239, 68104495, 68235759, 68238223, 68472415, 68490463, 68501279, 68621071, 68623903, 68821791, 68988639, 68864047, 69003183, 68876015, 69007423, 68891407, 69267743, 69272367, 69159567, 69666911, 69684447, 70003247, 70018895, 69898927, 69938543, 69942031, 70198863, 70339919, 70587455, 70462783, 70597679, 70796399, 70800015, 70703887, 71121183, 71323151, 71243535, 71578703, 71467695, 71622879, 71629359, 71831264, 71836511, 71710319, 71992943, 72353855, 72355039, 77606628, 77608287, 77622047, 77510223, 77653263, 77664319, 77546223, 77677471, 77549375, 77213519, 77219551, 77232207, 77234991, 77366511, 77373791, 77389647, 77404383, 77070655, 77087199, 76956975, 76996431, 77009183, 77015327, 76683567, 76840351, 76862255, 76888804, 76548975, 76554767, 76427087, 76560159, 76451967, 76456847, 76468015, 76627295, 76352831, 76354863, 76365887, 75923999, 76074175, 75955439, 76086063, 75774239, 75781535, 75792671, 75662111, 75793647, 75797167, 75827023, 75696543, 75390527, 75522031, 75533663, 75541775, 75432255, 75571535, 75115535, 75247999, 75145197, 75151391, 75160799, 74974991, 74852831, 74871839, 74882783, 75023199, 74896943, 75028767, 75046431, 74922463, 74725711, 74621199, 74658623, 74663183, 74336383, 74484559, 74364526, 74370287, 74370639, 74517983, 74393615, 74205471, 74217359, 74227263, 74231727, 74102559, 74237999, 74248735, 73953599, 73868591, 74000703, 74002975, 73877295, 73664910, 73695967, 73704751, 73579583, 73582639, 73719055, 73405998, 73448207, 73481951, 73008831, 73175087, 73044495, 73058863, 73194895, 73197919, 73093151, 72895567, 72918543, 72947039, 72957919, 82383481, 67392015, 67303135, 67312799, 67318303, 67453727, 67454767, 67634719, 67645887, 67717391, 67914111, 67947919, 67818463, 68052959, 68097215, 68500543, 68711909, 68584735, 68726975, 68741679, 68615471, 68750559, 68755487, 68629311, 68764687, 68765648, 68990175, 69022959, 69023727, 69217327, 69547327, 69665839, 69809983, 69814815, 70006831, 70037807, 70055951, 70068511, 70184031, 70323999, 70334687, 70566095, 70588751, 70723935, 71049695, 70952031, 71084831, 71376863, 71256287, 71611039, 71487727, 71618591, 71623999, 71514239, 71891231, 71897327, 71897663, 72036783, 72391487, 77604975, 77608163, 77621327, 77501983, 77635039, 77646559, 77654671, 77655695, 77546543, 77678383, 77345471, 77224735, 77375519, 77385519, 77393967, 76944399, 76975663, 77114628, 77115231, 77127525, 77142959, 76677423, 76699967, 76722287, 76857647, 76739039, 76883567, 76891615, 76453343, 76584335, 76590623, 76594607, 76600031, 76611167, 76617743, 76622303, 76285999, 76329231, 76335839, 76348175, 76350351, 76356783, 75910383, 75639343, 75787615, 75660079, 75796895, 75797615, 75692559, 75827999, 75833487, 75836479, 75518943, 75568143, 75278943, 75290271, 75297903, 75309391, 75312479, 75315119, 74852223, 74987055, 74858047, 74992943, 74875439, 75008031, 74885407, 75027743, 75055583, 74927839, 74738719, 74629087, 74767391, 74779295, 74789343, 74791247, 74323183, 74454239, 74349455, 74364751, 74516047, 74528559, 74192207, 74201535, 74084367, 74220511, 74109039, 74263263, 74133215, 73807119, 73945313, 73868148, 74001631, 73536815, 73684815, 73711439, 73275407, 73408799, 73052767, 73190975, 73209823, 72788271, 72960607, 72487647, 82383730, 67407151, 67415583, 67322127, 67523871, 67700959, 67583039, 67905375, 67793199, 68159583, 68237791, 68306479, 68492399], np.uint32)
# Set settings
histograming.set_no_scan_parameter() # The data has no scan parameter, thus should not be histogrammed per scan parameter
histograming.create_occupancy_hist(True) # Tell the histogrammer to create a occupancy hist
# Interpret the raw data (builds hits)
interpreter.interpret_raw_data(raw_data)
# Hits are buffered per event, since the interpret_raw_data call does not have to be called event aligned;
# to tell the interpreter that the last event is finished this has to be called
interpreter.store_event()
# Histogram the htis
hits = interpreter.get_hits()
histograming.add_hits(hits)
# Get and show the occupancy hist
occ_hist = histograming.get_occupancy()[:, :, 0] # 0 because there is no scan parameter, otherwise histogramming is done per scan parameter
print hits
print np.where(occ_hist != 0)
|
yhoshino11/pytest_example
|
refs/heads/master
|
.tox/flake8/lib/python2.7/site-packages/pip/_vendor/colorama/__init__.py
|
445
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
from .initialise import init, deinit, reinit
from .ansi import Fore, Back, Style, Cursor
from .ansitowin32 import AnsiToWin32
__version__ = '0.3.3'
|
CCI-MOC/nova
|
refs/heads/k2k-liberty
|
nova/tests/unit/virt/ironic/test_driver.py
|
2
|
# Copyright 2015 Red Hat, Inc.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the ironic driver."""
from ironicclient import exc as ironic_exception
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_utils import uuidutils
import six
from testtools.matchers import HasLength
from nova.api.metadata import base as instance_metadata
from nova.compute import power_state as nova_states
from nova.compute import task_states
from nova.compute import vm_states
from nova import context as nova_context
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit import utils
from nova.tests.unit.virt.ironic import utils as ironic_utils
from nova.virt import configdrive
from nova.virt import driver
from nova.virt import fake
from nova.virt import firewall
from nova.virt import hardware
from nova.virt.ironic import client_wrapper as cw
from nova.virt.ironic import driver as ironic_driver
from nova.virt.ironic import ironic_states
CONF = cfg.CONF
IRONIC_FLAGS = dict(
api_version=1,
group='ironic',
)
FAKE_CLIENT = ironic_utils.FakeClient()
class FakeClientWrapper(cw.IronicClientWrapper):
def _get_client(self):
return FAKE_CLIENT
class FakeLoopingCall(object):
def __init__(self):
self.wait = mock.MagicMock()
self.start = mock.MagicMock()
self.start.return_value = self
def _get_properties():
return {'cpus': 2,
'memory_mb': 512,
'local_gb': 10,
'cpu_arch': 'x86_64',
'capabilities': None}
def _get_instance_info():
return {'vcpus': 1,
'memory_mb': 1024,
'local_gb': 10}
def _get_stats():
return {'cpu_arch': 'x86_64'}
FAKE_CLIENT_WRAPPER = FakeClientWrapper()
@mock.patch.object(cw, 'IronicClientWrapper', lambda *_: FAKE_CLIENT_WRAPPER)
class IronicDriverTestCase(test.NoDBTestCase):
@mock.patch.object(cw, 'IronicClientWrapper',
lambda *_: FAKE_CLIENT_WRAPPER)
def setUp(self):
super(IronicDriverTestCase, self).setUp()
self.flags(**IRONIC_FLAGS)
# set client log config to exercise the code that manipulates it
CONF.set_override('client_log_level', 'DEBUG', group='ironic')
self.driver = ironic_driver.IronicDriver(None)
self.driver.virtapi = fake.FakeVirtAPI()
self.ctx = nova_context.get_admin_context()
self.instance_uuid = uuidutils.generate_uuid()
# mock retries configs to avoid sleeps and make tests run quicker
CONF.set_default('api_max_retries', default=1, group='ironic')
CONF.set_default('api_retry_interval', default=0, group='ironic')
def test_public_api_signatures(self):
self.assertPublicAPISignatures(driver.ComputeDriver(None), self.driver)
def test_validate_driver_loading(self):
self.assertIsInstance(self.driver, ironic_driver.IronicDriver)
def test_driver_capabilities(self):
self.assertFalse(self.driver.capabilities['has_imagecache'],
'Driver capabilities for \'has_imagecache\''
'is invalid')
self.assertFalse(self.driver.capabilities['supports_recreate'],
'Driver capabilities for \'supports_recreate\''
'is invalid')
def test__get_hypervisor_type(self):
self.assertEqual('ironic', self.driver._get_hypervisor_type())
def test__get_hypervisor_version(self):
self.assertEqual(1, self.driver._get_hypervisor_version())
@mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
def test__validate_instance_and_node(self, mock_gbiui):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=self.instance_uuid)
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid)
ironicclient = cw.IronicClientWrapper()
mock_gbiui.return_value = node
result = ironic_driver._validate_instance_and_node(ironicclient,
instance)
self.assertEqual(result.uuid, node_uuid)
@mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
def test__validate_instance_and_node_failed(self, mock_gbiui):
ironicclient = cw.IronicClientWrapper()
mock_gbiui.side_effect = ironic_exception.NotFound()
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid)
self.assertRaises(exception.InstanceNotFound,
ironic_driver._validate_instance_and_node,
ironicclient, instance)
@mock.patch.object(objects.Instance, 'refresh')
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test__wait_for_active_pass(self, fake_validate, fake_refresh):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
node = ironic_utils.get_test_node(
provision_state=ironic_states.DEPLOYING)
fake_validate.return_value = node
self.driver._wait_for_active(FAKE_CLIENT, instance)
fake_validate.assert_called_once_with(FAKE_CLIENT, instance)
fake_refresh.assert_called_once_with()
@mock.patch.object(objects.Instance, 'refresh')
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test__wait_for_active_done(self, fake_validate, fake_refresh):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
node = ironic_utils.get_test_node(
provision_state=ironic_states.ACTIVE)
fake_validate.return_value = node
self.assertRaises(loopingcall.LoopingCallDone,
self.driver._wait_for_active,
FAKE_CLIENT, instance)
fake_validate.assert_called_once_with(FAKE_CLIENT, instance)
fake_refresh.assert_called_once_with()
@mock.patch.object(objects.Instance, 'refresh')
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test__wait_for_active_fail(self, fake_validate, fake_refresh):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
node = ironic_utils.get_test_node(
provision_state=ironic_states.DEPLOYFAIL)
fake_validate.return_value = node
self.assertRaises(exception.InstanceDeployFailure,
self.driver._wait_for_active,
FAKE_CLIENT, instance)
fake_validate.assert_called_once_with(FAKE_CLIENT, instance)
fake_refresh.assert_called_once_with()
@mock.patch.object(objects.Instance, 'refresh')
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def _wait_for_active_abort(self, instance_params, fake_validate,
fake_refresh):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid(),
**instance_params)
self.assertRaises(exception.InstanceDeployFailure,
self.driver._wait_for_active,
FAKE_CLIENT, instance)
# Assert _validate_instance_and_node wasn't called
self.assertFalse(fake_validate.called)
fake_refresh.assert_called_once_with()
def test__wait_for_active_abort_deleting(self):
self._wait_for_active_abort({'task_state': task_states.DELETING})
def test__wait_for_active_abort_deleted(self):
self._wait_for_active_abort({'vm_state': vm_states.DELETED})
def test__wait_for_active_abort_error(self):
self._wait_for_active_abort({'vm_state': vm_states.ERROR})
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test__wait_for_power_state_pass(self, fake_validate):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
node = ironic_utils.get_test_node(
target_power_state=ironic_states.POWER_OFF)
fake_validate.return_value = node
self.driver._wait_for_power_state(
FAKE_CLIENT, instance, 'fake message')
self.assertTrue(fake_validate.called)
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test__wait_for_power_state_ok(self, fake_validate):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
node = ironic_utils.get_test_node(
target_power_state=ironic_states.NOSTATE)
fake_validate.return_value = node
self.assertRaises(loopingcall.LoopingCallDone,
self.driver._wait_for_power_state,
FAKE_CLIENT, instance, 'fake message')
self.assertTrue(fake_validate.called)
def _test__node_resource(self, has_inst_info):
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
stats = _get_stats()
if has_inst_info:
instance_info = _get_instance_info()
else:
instance_info = {}
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=self.instance_uuid,
instance_info=instance_info,
properties=props)
result = self.driver._node_resource(node)
wantkeys = ["hypervisor_hostname", "hypervisor_type",
"hypervisor_version", "cpu_info",
"vcpus", "vcpus_used",
"memory_mb", "memory_mb_used",
"local_gb", "local_gb_used",
"disk_available_least",
"supported_instances",
"stats",
"numa_topology"]
wantkeys.sort()
gotkeys = result.keys()
gotkeys.sort()
self.assertEqual(wantkeys, gotkeys)
if has_inst_info:
props_dict = instance_info
expected_cpus = instance_info['vcpus']
else:
props_dict = props
expected_cpus = props['cpus']
self.assertEqual(expected_cpus, result['vcpus'])
self.assertEqual(expected_cpus, result['vcpus_used'])
self.assertEqual(props_dict['memory_mb'], result['memory_mb'])
self.assertEqual(props_dict['memory_mb'], result['memory_mb_used'])
self.assertEqual(props_dict['local_gb'], result['local_gb'])
self.assertEqual(props_dict['local_gb'], result['local_gb_used'])
self.assertEqual(node_uuid, result['hypervisor_hostname'])
self.assertEqual(stats, jsonutils.loads(result['stats']))
self.assertIsNone(result['numa_topology'])
def test__node_resource(self):
self._test__node_resource(True)
def test__node_resource_no_instance_info(self):
self._test__node_resource(False)
def test__node_resource_canonicalizes_arch(self):
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
props['cpu_arch'] = 'i386'
node = ironic_utils.get_test_node(uuid=node_uuid, properties=props)
result = self.driver._node_resource(node)
self.assertEqual('i686',
jsonutils.loads(result['supported_instances'])[0][0])
self.assertEqual('i386',
jsonutils.loads(result['stats'])['cpu_arch'])
def test__node_resource_unknown_arch(self):
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
del props['cpu_arch']
node = ironic_utils.get_test_node(uuid=node_uuid, properties=props)
result = self.driver._node_resource(node)
self.assertEqual([], jsonutils.loads(result['supported_instances']))
def test__node_resource_exposes_capabilities(self):
props = _get_properties()
props['capabilities'] = 'test:capability, test2:value2'
node = ironic_utils.get_test_node(properties=props)
result = self.driver._node_resource(node)
stats = jsonutils.loads(result['stats'])
self.assertIsNone(stats.get('capabilities'))
self.assertEqual('capability', stats.get('test'))
self.assertEqual('value2', stats.get('test2'))
def test__node_resource_no_capabilities(self):
props = _get_properties()
props['capabilities'] = None
node = ironic_utils.get_test_node(properties=props)
result = self.driver._node_resource(node)
self.assertIsNone(jsonutils.loads(result['stats']).get('capabilities'))
def test__node_resource_malformed_capabilities(self):
props = _get_properties()
props['capabilities'] = 'test:capability,:no_key,no_val:'
node = ironic_utils.get_test_node(properties=props)
result = self.driver._node_resource(node)
stats = jsonutils.loads(result['stats'])
self.assertEqual('capability', stats.get('test'))
def test__node_resource_available(self):
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
stats = _get_stats()
node = ironic_utils.get_test_node(
uuid=node_uuid,
instance_uuid=None,
power_state=ironic_states.POWER_OFF,
properties=props,
provision_state=ironic_states.AVAILABLE)
result = self.driver._node_resource(node)
self.assertEqual(props['cpus'], result['vcpus'])
self.assertEqual(0, result['vcpus_used'])
self.assertEqual(props['memory_mb'], result['memory_mb'])
self.assertEqual(0, result['memory_mb_used'])
self.assertEqual(props['local_gb'], result['local_gb'])
self.assertEqual(0, result['local_gb_used'])
self.assertEqual(node_uuid, result['hypervisor_hostname'])
self.assertEqual(stats, jsonutils.loads(result['stats']))
@mock.patch.object(ironic_driver.IronicDriver,
'_node_resources_unavailable')
def test__node_resource_unavailable_node_res(self, mock_res_unavail):
mock_res_unavail.return_value = True
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
stats = _get_stats()
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=None,
properties=props)
result = self.driver._node_resource(node)
self.assertEqual(0, result['vcpus'])
self.assertEqual(0, result['vcpus_used'])
self.assertEqual(0, result['memory_mb'])
self.assertEqual(0, result['memory_mb_used'])
self.assertEqual(0, result['local_gb'])
self.assertEqual(0, result['local_gb_used'])
self.assertEqual(node_uuid, result['hypervisor_hostname'])
self.assertEqual(stats, jsonutils.loads(result['stats']))
@mock.patch.object(ironic_driver.IronicDriver,
'_node_resources_used')
def test__node_resource_used_node_res(self, mock_res_used):
mock_res_used.return_value = True
node_uuid = uuidutils.generate_uuid()
props = _get_properties()
stats = _get_stats()
instance_info = _get_instance_info()
node = ironic_utils.get_test_node(
uuid=node_uuid,
instance_uuid=uuidutils.generate_uuid(),
provision_state=ironic_states.ACTIVE,
properties=props,
instance_info=instance_info)
result = self.driver._node_resource(node)
self.assertEqual(instance_info['vcpus'], result['vcpus'])
self.assertEqual(instance_info['vcpus'], result['vcpus_used'])
self.assertEqual(instance_info['memory_mb'], result['memory_mb'])
self.assertEqual(instance_info['memory_mb'], result['memory_mb_used'])
self.assertEqual(instance_info['local_gb'], result['local_gb'])
self.assertEqual(instance_info['local_gb'], result['local_gb_used'])
self.assertEqual(node_uuid, result['hypervisor_hostname'])
self.assertEqual(stats, jsonutils.loads(result['stats']))
@mock.patch.object(ironic_driver.LOG, 'warning')
def test__parse_node_properties(self, mock_warning):
props = _get_properties()
node = ironic_utils.get_test_node(
uuid=uuidutils.generate_uuid(),
properties=props)
# raw_cpu_arch is included because extra_specs filters do not
# canonicalized the arch
props['raw_cpu_arch'] = props['cpu_arch']
parsed = self.driver._parse_node_properties(node)
self.assertEqual(props, parsed)
# Assert we didn't log any warning since all properties are
# correct
self.assertFalse(mock_warning.called)
@mock.patch.object(ironic_driver.LOG, 'warning')
def test__parse_node_properties_bad_values(self, mock_warning):
props = _get_properties()
props['cpus'] = 'bad-value'
props['memory_mb'] = 'bad-value'
props['local_gb'] = 'bad-value'
props['cpu_arch'] = 'bad-value'
node = ironic_utils.get_test_node(
uuid=uuidutils.generate_uuid(),
properties=props)
# raw_cpu_arch is included because extra_specs filters do not
# canonicalized the arch
props['raw_cpu_arch'] = props['cpu_arch']
parsed = self.driver._parse_node_properties(node)
expected_props = props.copy()
expected_props['cpus'] = 0
expected_props['memory_mb'] = 0
expected_props['local_gb'] = 0
expected_props['cpu_arch'] = None
self.assertEqual(expected_props, parsed)
self.assertEqual(4, mock_warning.call_count)
@mock.patch.object(ironic_driver.LOG, 'warning')
def test__parse_node_instance_info(self, mock_warning):
props = _get_properties()
instance_info = _get_instance_info()
node = ironic_utils.get_test_node(
uuid=uuidutils.generate_uuid(),
instance_info=instance_info)
parsed = self.driver._parse_node_instance_info(node, props)
self.assertEqual(instance_info, parsed)
self.assertFalse(mock_warning.called)
@mock.patch.object(ironic_driver.LOG, 'warning')
def test__parse_node_instance_info_bad_values(self, mock_warning):
props = _get_properties()
instance_info = _get_instance_info()
instance_info['vcpus'] = 'bad-value'
instance_info['memory_mb'] = 'bad-value'
instance_info['local_gb'] = 'bad-value'
node = ironic_utils.get_test_node(
uuid=uuidutils.generate_uuid(),
instance_info=instance_info)
parsed = self.driver._parse_node_instance_info(node, props)
expected = {
'vcpus': props['cpus'],
'memory_mb': props['memory_mb'],
'local_gb': props['local_gb']
}
self.assertEqual(expected, parsed)
self.assertEqual(3, mock_warning.call_count)
@mock.patch.object(ironic_driver.LOG, 'warning')
def test__parse_node_properties_canonicalize_cpu_arch(self, mock_warning):
props = _get_properties()
props['cpu_arch'] = 'amd64'
node = ironic_utils.get_test_node(
uuid=uuidutils.generate_uuid(),
properties=props)
# raw_cpu_arch is included because extra_specs filters do not
# canonicalized the arch
props['raw_cpu_arch'] = props['cpu_arch']
parsed = self.driver._parse_node_properties(node)
expected_props = props.copy()
# Make sure it cpu_arch was canonicalized
expected_props['cpu_arch'] = 'x86_64'
self.assertEqual(expected_props, parsed)
# Assert we didn't log any warning since all properties are
# correct
self.assertFalse(mock_warning.called)
@mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter',
create=True)
@mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering',
create=True)
@mock.patch.object(firewall.NoopFirewallDriver, 'apply_instance_filter',
create=True)
def test__start_firewall(self, mock_aif, mock_sbf, mock_pif):
fake_inst = 'fake-inst'
fake_net_info = utils.get_test_network_info()
self.driver._start_firewall(fake_inst, fake_net_info)
mock_aif.assert_called_once_with(fake_inst, fake_net_info)
mock_sbf.assert_called_once_with(fake_inst, fake_net_info)
mock_pif.assert_called_once_with(fake_inst, fake_net_info)
@mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance',
create=True)
def test__stop_firewall(self, mock_ui):
fake_inst = 'fake-inst'
fake_net_info = utils.get_test_network_info()
self.driver._stop_firewall(fake_inst, fake_net_info)
mock_ui.assert_called_once_with(fake_inst, fake_net_info)
@mock.patch.object(cw.IronicClientWrapper, 'call')
def test_instance_exists(self, mock_call):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid)
self.assertTrue(self.driver.instance_exists(instance))
mock_call.assert_called_once_with('node.get_by_instance_uuid',
self.instance_uuid)
@mock.patch.object(cw.IronicClientWrapper, 'call')
def test_instance_exists_fail(self, mock_call):
mock_call.side_effect = ironic_exception.NotFound
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid)
self.assertFalse(self.driver.instance_exists(instance))
mock_call.assert_called_once_with('node.get_by_instance_uuid',
self.instance_uuid)
@mock.patch.object(cw.IronicClientWrapper, 'call')
@mock.patch.object(objects.Instance, 'get_by_uuid')
def test_list_instances(self, mock_inst_by_uuid, mock_call):
nodes = []
instances = []
for i in range(2):
uuid = uuidutils.generate_uuid()
instances.append(fake_instance.fake_instance_obj(self.ctx,
id=i,
uuid=uuid))
nodes.append(ironic_utils.get_test_node(instance_uuid=uuid))
mock_inst_by_uuid.side_effect = instances
mock_call.return_value = nodes
response = self.driver.list_instances()
mock_call.assert_called_with("node.list", associated=True, limit=0)
expected_calls = [mock.call(mock.ANY, instances[0].uuid),
mock.call(mock.ANY, instances[1].uuid)]
mock_inst_by_uuid.assert_has_calls(expected_calls)
self.assertEqual(['instance-00000000', 'instance-00000001'],
sorted(response))
@mock.patch.object(cw.IronicClientWrapper, 'call')
@mock.patch.object(objects.Instance, 'get_by_uuid')
def test_list_instances_fail(self, mock_inst_by_uuid, mock_call):
mock_call.side_effect = exception.NovaException
response = self.driver.list_instances()
mock_call.assert_called_with("node.list", associated=True, limit=0)
self.assertFalse(mock_inst_by_uuid.called)
self.assertThat(response, HasLength(0))
@mock.patch.object(cw.IronicClientWrapper, 'call')
def test_list_instance_uuids(self, mock_call):
num_nodes = 2
nodes = []
for n in range(num_nodes):
nodes.append(ironic_utils.get_test_node(
instance_uuid=uuidutils.generate_uuid()))
mock_call.return_value = nodes
uuids = self.driver.list_instance_uuids()
mock_call.assert_called_with('node.list', associated=True, limit=0)
expected = [n.instance_uuid for n in nodes]
self.assertEqual(sorted(expected), sorted(uuids))
@mock.patch.object(FAKE_CLIENT.node, 'list')
@mock.patch.object(FAKE_CLIENT.node, 'get')
def test_node_is_available_empty_cache_empty_list(self, mock_get,
mock_list):
node = ironic_utils.get_test_node()
mock_get.return_value = node
mock_list.return_value = []
self.assertTrue(self.driver.node_is_available(node.uuid))
mock_get.assert_called_with(node.uuid)
mock_list.assert_called_with(detail=True, limit=0)
mock_get.side_effect = ironic_exception.NotFound
self.assertFalse(self.driver.node_is_available(node.uuid))
@mock.patch.object(FAKE_CLIENT.node, 'list')
@mock.patch.object(FAKE_CLIENT.node, 'get')
def test_node_is_available_empty_cache(self, mock_get, mock_list):
node = ironic_utils.get_test_node()
mock_get.return_value = node
mock_list.return_value = [node]
self.assertTrue(self.driver.node_is_available(node.uuid))
mock_list.assert_called_with(detail=True, limit=0)
self.assertEqual(0, mock_get.call_count)
@mock.patch.object(FAKE_CLIENT.node, 'list')
@mock.patch.object(FAKE_CLIENT.node, 'get')
def test_node_is_available_with_cache(self, mock_get, mock_list):
node = ironic_utils.get_test_node()
mock_get.return_value = node
mock_list.return_value = [node]
# populate the cache
self.driver.get_available_nodes(refresh=True)
# prove that zero calls are made after populating cache
mock_list.reset_mock()
self.assertTrue(self.driver.node_is_available(node.uuid))
self.assertEqual(0, mock_list.call_count)
self.assertEqual(0, mock_get.call_count)
def test__node_resources_unavailable(self):
node_dicts = [
# a node in maintenance /w no instance and power OFF
{'uuid': uuidutils.generate_uuid(),
'maintenance': True,
'power_state': ironic_states.POWER_OFF,
'provision_state': ironic_states.AVAILABLE},
# a node in maintenance /w no instance and ERROR power state
{'uuid': uuidutils.generate_uuid(),
'maintenance': True,
'power_state': ironic_states.ERROR,
'provision_state': ironic_states.AVAILABLE},
# a node not in maintenance /w no instance and bad power state
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.NOSTATE,
'provision_state': ironic_states.AVAILABLE},
# a node not in maintenance or bad power state, bad provision state
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.POWER_ON,
'provision_state': ironic_states.MANAGEABLE},
# a node in cleaning
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.POWER_ON,
'provision_state': ironic_states.CLEANING},
# a node in cleaning, waiting for a clean step to finish
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.POWER_ON,
'provision_state': ironic_states.CLEANWAIT},
# a node in deleting
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.POWER_ON,
'provision_state': ironic_states.DELETING},
# a node in deleted
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.POWER_ON,
'provision_state': ironic_states.DELETED}
]
for n in node_dicts:
node = ironic_utils.get_test_node(**n)
self.assertTrue(self.driver._node_resources_unavailable(node))
for ok_state in (ironic_states.AVAILABLE, ironic_states.NOSTATE):
# these are both ok and should present as available
avail_node = ironic_utils.get_test_node(
power_state=ironic_states.POWER_OFF,
provision_state=ok_state)
unavailable = self.driver._node_resources_unavailable(avail_node)
self.assertFalse(unavailable)
def test__node_resources_used(self):
node_dicts = [
# a node in maintenance /w instance and active
{'uuid': uuidutils.generate_uuid(),
'instance_uuid': uuidutils.generate_uuid(),
'provision_state': ironic_states.ACTIVE},
]
for n in node_dicts:
node = ironic_utils.get_test_node(**n)
self.assertTrue(self.driver._node_resources_used(node))
unused_node = ironic_utils.get_test_node(
instance_uuid=None,
provision_state=ironic_states.AVAILABLE)
self.assertFalse(self.driver._node_resources_used(unused_node))
@mock.patch.object(FAKE_CLIENT.node, 'list')
def test_get_available_nodes(self, mock_list):
node_dicts = [
# a node in maintenance /w no instance and power OFF
{'uuid': uuidutils.generate_uuid(),
'maintenance': True,
'power_state': ironic_states.POWER_OFF},
# a node /w instance and power ON
{'uuid': uuidutils.generate_uuid(),
'instance_uuid': self.instance_uuid,
'power_state': ironic_states.POWER_ON},
# a node not in maintenance /w no instance and bad power state
{'uuid': uuidutils.generate_uuid(),
'power_state': ironic_states.ERROR},
]
nodes = [ironic_utils.get_test_node(**n) for n in node_dicts]
mock_list.return_value = nodes
available_nodes = self.driver.get_available_nodes()
expected_uuids = [n['uuid'] for n in node_dicts]
self.assertEqual(sorted(expected_uuids), sorted(available_nodes))
@mock.patch.object(FAKE_CLIENT.node, 'get')
@mock.patch.object(FAKE_CLIENT.node, 'list')
@mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
def test_get_available_resource(self, mock_nr, mock_list, mock_get):
node = ironic_utils.get_test_node()
node_2 = ironic_utils.get_test_node(uuid=uuidutils.generate_uuid())
fake_resource = 'fake-resource'
mock_get.return_value = node
# ensure cache gets populated without the node we want
mock_list.return_value = [node_2]
mock_nr.return_value = fake_resource
result = self.driver.get_available_resource(node.uuid)
self.assertEqual(fake_resource, result)
mock_nr.assert_called_once_with(node)
mock_get.assert_called_once_with(node.uuid)
@mock.patch.object(FAKE_CLIENT.node, 'get')
@mock.patch.object(FAKE_CLIENT.node, 'list')
@mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
def test_get_available_resource_with_cache(self, mock_nr, mock_list,
mock_get):
node = ironic_utils.get_test_node()
fake_resource = 'fake-resource'
mock_list.return_value = [node]
mock_nr.return_value = fake_resource
# populate the cache
self.driver.get_available_nodes(refresh=True)
mock_list.reset_mock()
result = self.driver.get_available_resource(node.uuid)
self.assertEqual(fake_resource, result)
self.assertEqual(0, mock_list.call_count)
self.assertEqual(0, mock_get.call_count)
mock_nr.assert_called_once_with(node)
@mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
def test_get_info(self, mock_gbiu):
properties = {'memory_mb': 512, 'cpus': 2}
power_state = ironic_states.POWER_ON
node = ironic_utils.get_test_node(instance_uuid=self.instance_uuid,
properties=properties,
power_state=power_state)
mock_gbiu.return_value = node
# ironic_states.POWER_ON should be mapped to
# nova_states.RUNNING
memory_kib = properties['memory_mb'] * 1024
instance = fake_instance.fake_instance_obj('fake-context',
uuid=self.instance_uuid)
result = self.driver.get_info(instance)
self.assertEqual(hardware.InstanceInfo(state=nova_states.RUNNING,
max_mem_kb=memory_kib,
mem_kb=memory_kib,
num_cpu=properties['cpus']),
result)
@mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
def test_get_info_http_not_found(self, mock_gbiu):
mock_gbiu.side_effect = ironic_exception.NotFound()
instance = fake_instance.fake_instance_obj(
self.ctx, uuid=uuidutils.generate_uuid())
result = self.driver.get_info(instance)
self.assertEqual(hardware.InstanceInfo(state=nova_states.NOSTATE),
result)
@mock.patch.object(FAKE_CLIENT, 'node')
def test_macs_for_instance(self, mock_node):
node = ironic_utils.get_test_node()
port = ironic_utils.get_test_port()
mock_node.get.return_value = node
mock_node.list_ports.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
result = self.driver.macs_for_instance(instance)
self.assertEqual(set([port.address]), result)
mock_node.list_ports.assert_called_once_with(node.uuid)
@mock.patch.object(FAKE_CLIENT.node, 'get')
def test_macs_for_instance_http_not_found(self, mock_get):
mock_get.side_effect = ironic_exception.NotFound()
instance = fake_instance.fake_instance_obj(
self.ctx, node=uuidutils.generate_uuid())
result = self.driver.macs_for_instance(instance)
self.assertIsNone(result)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
@mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
def _test_spawn(self, mock_sf, mock_pvifs, mock_adf, mock_wait_active,
mock_node, mock_looping, mock_save):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
fake_flavor = objects.Flavor(ephemeral_gb=0)
instance.flavor = fake_flavor
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
mock_node.get_by_instance_uuid.return_value = node
mock_node.set_provision_state.return_value = mock.MagicMock()
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
image_meta = ironic_utils.get_test_image_meta()
self.driver.spawn(self.ctx, instance, image_meta, [], None)
mock_node.get.assert_called_once_with(node_uuid)
mock_node.validate.assert_called_once_with(node_uuid)
mock_adf.assert_called_once_with(node, instance,
test.MatchType(objects.ImageMeta),
fake_flavor)
mock_pvifs.assert_called_once_with(node, instance, None)
mock_sf.assert_called_once_with(instance, None)
mock_node.set_provision_state.assert_called_once_with(node_uuid,
'active', configdrive=mock.ANY)
self.assertIsNone(instance.default_ephemeral_device)
self.assertFalse(mock_save.called)
mock_looping.assert_called_once_with(mock_wait_active,
FAKE_CLIENT_WRAPPER,
instance)
fake_looping_call.start.assert_called_once_with(
interval=CONF.ironic.api_retry_interval)
fake_looping_call.wait.assert_called_once_with()
@mock.patch.object(ironic_driver.IronicDriver, '_generate_configdrive')
@mock.patch.object(configdrive, 'required_by')
def test_spawn(self, mock_required_by, mock_configdrive):
mock_required_by.return_value = False
self._test_spawn()
# assert configdrive was not generated
self.assertFalse(mock_configdrive.called)
@mock.patch.object(ironic_driver.IronicDriver, '_generate_configdrive')
@mock.patch.object(configdrive, 'required_by')
def test_spawn_with_configdrive(self, mock_required_by, mock_configdrive):
mock_required_by.return_value = True
self._test_spawn()
# assert configdrive was generated
mock_configdrive.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
extra_md={}, files=[])
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, 'destroy')
@mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
@mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
def test_spawn_destroyed_after_failure(self, mock_sf, mock_pvifs, mock_adf,
mock_wait_active, mock_destroy,
mock_node, mock_looping,
mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
fake_flavor = objects.Flavor(ephemeral_gb=0)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = fake_flavor
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
mock_node.get_by_instance_uuid.return_value = node
mock_node.set_provision_state.return_value = mock.MagicMock()
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
deploy_exc = exception.InstanceDeployFailure('foo')
fake_looping_call.wait.side_effect = deploy_exc
self.assertRaises(
exception.InstanceDeployFailure,
self.driver.spawn, self.ctx, instance, None, [], None)
mock_destroy.assert_called_once_with(self.ctx, instance, None)
@mock.patch.object(FAKE_CLIENT.node, 'update')
def test__add_driver_fields_good(self, mock_update):
node = ironic_utils.get_test_node(driver='fake')
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
image_meta = ironic_utils.get_test_image_meta_object()
flavor = ironic_utils.get_test_flavor()
self.driver._add_driver_fields(node, instance, image_meta, flavor)
expected_patch = [{'path': '/instance_info/image_source', 'op': 'add',
'value': image_meta.id},
{'path': '/instance_info/root_gb', 'op': 'add',
'value': str(instance.root_gb)},
{'path': '/instance_info/swap_mb', 'op': 'add',
'value': str(flavor['swap'])},
{'path': '/instance_info/display_name',
'value': instance.display_name, 'op': 'add'},
{'path': '/instance_info/vcpus', 'op': 'add',
'value': str(instance.vcpus)},
{'path': '/instance_info/memory_mb', 'op': 'add',
'value': str(instance.memory_mb)},
{'path': '/instance_info/local_gb', 'op': 'add',
'value': str(node.properties.get('local_gb', 0))},
{'path': '/instance_uuid', 'op': 'add',
'value': instance.uuid}]
mock_update.assert_called_once_with(node.uuid, expected_patch)
@mock.patch.object(FAKE_CLIENT.node, 'update')
def test__add_driver_fields_fail(self, mock_update):
mock_update.side_effect = ironic_exception.BadRequest()
node = ironic_utils.get_test_node(driver='fake')
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
image_meta = ironic_utils.get_test_image_meta_object()
flavor = ironic_utils.get_test_flavor()
self.assertRaises(exception.InstanceDeployFailure,
self.driver._add_driver_fields,
node, instance, image_meta, flavor)
@mock.patch.object(FAKE_CLIENT.node, 'update')
def test__cleanup_deploy_good_with_flavor(self, mock_update):
node = ironic_utils.get_test_node(driver='fake',
instance_uuid=self.instance_uuid)
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
flavor = ironic_utils.get_test_flavor(extra_specs={})
self.driver._cleanup_deploy(self.ctx, node, instance, None,
flavor=flavor)
expected_patch = [{'path': '/instance_uuid', 'op': 'remove'}]
mock_update.assert_called_once_with(node.uuid, expected_patch)
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
@mock.patch.object(FAKE_CLIENT.node, 'update')
def test__cleanup_deploy_instance_already_removed(self, mock_update,
mock_validate):
mock_validate.side_effect = exception.InstanceNotFound(
instance_id='fake-instance')
node = ironic_utils.get_test_node(driver='fake',
instance_uuid=self.instance_uuid)
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
flavor = ironic_utils.get_test_flavor(extra_specs={})
self.driver._cleanup_deploy(self.ctx, node, instance, None,
flavor=flavor)
# assert node.update is not called
self.assertFalse(mock_update.called)
mock_validate.assert_called_once_with(mock.ANY, instance)
@mock.patch.object(FAKE_CLIENT.node, 'update')
def test__cleanup_deploy_without_flavor(self, mock_update):
node = ironic_utils.get_test_node(driver='fake',
instance_uuid=self.instance_uuid)
flavor = ironic_utils.get_test_flavor(extra_specs={})
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
instance.flavor = flavor
self.driver._cleanup_deploy(self.ctx, node, instance, None)
expected_patch = [{'path': '/instance_uuid', 'op': 'remove'}]
mock_update.assert_called_once_with(node.uuid, expected_patch)
@mock.patch.object(FAKE_CLIENT.node, 'update')
def test__cleanup_deploy_fail(self, mock_update):
mock_update.side_effect = ironic_exception.BadRequest()
node = ironic_utils.get_test_node(driver='fake',
instance_uuid=self.instance_uuid)
flavor = ironic_utils.get_test_flavor(extra_specs={})
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
instance.flavor = flavor
self.assertRaises(exception.InstanceTerminationFailure,
self.driver._cleanup_deploy,
self.ctx, node, instance, None)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(FAKE_CLIENT, 'node')
def test_spawn_node_driver_validation_fail(self, mock_node,
mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
mock_node.validate.return_value = ironic_utils.get_test_validation(
power=False, deploy=False)
mock_node.get.return_value = node
image_meta = ironic_utils.get_test_image_meta()
self.assertRaises(exception.ValidationError, self.driver.spawn,
self.ctx, instance, image_meta, [], None)
mock_node.get.assert_called_once_with(node_uuid)
mock_node.validate.assert_called_once_with(node_uuid)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_spawn_node_prepare_for_deploy_fail(self, mock_cleanup_deploy,
mock_pvifs, mock_sf,
mock_node, mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
image_meta = ironic_utils.get_test_image_meta()
class TestException(Exception):
pass
mock_sf.side_effect = TestException()
self.assertRaises(TestException, self.driver.spawn,
self.ctx, instance, image_meta, [], None)
mock_node.get.assert_called_once_with(node_uuid)
mock_node.validate.assert_called_once_with(node_uuid)
mock_cleanup_deploy.assert_called_with(self.ctx, node, instance, None,
flavor=flavor)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_spawn_node_trigger_deploy_fail(self, mock_cleanup_deploy,
mock_pvifs, mock_sf,
mock_node, mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
image_meta = ironic_utils.get_test_image_meta()
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
mock_node.set_provision_state.side_effect = exception.NovaException()
self.assertRaises(exception.NovaException, self.driver.spawn,
self.ctx, instance, image_meta, [], None)
mock_node.get.assert_called_once_with(node_uuid)
mock_node.validate.assert_called_once_with(node_uuid)
mock_cleanup_deploy.assert_called_once_with(self.ctx, node,
instance, None,
flavor=flavor)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_spawn_node_trigger_deploy_fail2(self, mock_cleanup_deploy,
mock_pvifs, mock_sf,
mock_node, mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
image_meta = ironic_utils.get_test_image_meta()
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
mock_node.set_provision_state.side_effect = ironic_exception.BadRequest
self.assertRaises(ironic_exception.BadRequest,
self.driver.spawn,
self.ctx, instance, image_meta, [], None)
mock_node.get.assert_called_once_with(node_uuid)
mock_node.validate.assert_called_once_with(node_uuid)
mock_cleanup_deploy.assert_called_once_with(self.ctx, node,
instance, None,
flavor=flavor)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, 'destroy')
def test_spawn_node_trigger_deploy_fail3(self, mock_destroy,
mock_pvifs, mock_sf,
mock_node, mock_looping,
mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor()
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
image_meta = ironic_utils.get_test_image_meta()
mock_node.get.return_value = node
mock_node.validate.return_value = ironic_utils.get_test_validation()
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
fake_looping_call.wait.side_effect = ironic_exception.BadRequest
fake_net_info = utils.get_test_network_info()
self.assertRaises(ironic_exception.BadRequest,
self.driver.spawn, self.ctx, instance,
image_meta, [], None, fake_net_info)
mock_destroy.assert_called_once_with(self.ctx, instance,
fake_net_info)
@mock.patch.object(configdrive, 'required_by')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
@mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
def test_spawn_sets_default_ephemeral_device(self, mock_sf, mock_pvifs,
mock_wait, mock_node,
mock_save, mock_looping,
mock_required_by):
mock_required_by.return_value = False
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
flavor = ironic_utils.get_test_flavor(ephemeral_gb=1)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
instance.flavor = flavor
mock_node.get_by_instance_uuid.return_value = node
mock_node.set_provision_state.return_value = mock.MagicMock()
image_meta = ironic_utils.get_test_image_meta()
self.driver.spawn(self.ctx, instance, image_meta, [], None)
self.assertTrue(mock_save.called)
self.assertEqual('/dev/sda1', instance.default_ephemeral_device)
@mock.patch.object(FAKE_CLIENT, 'node')
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def _test_destroy(self, state, mock_cleanup_deploy, mock_node):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
network_info = 'foo'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
provision_state=state)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
def fake_set_provision_state(*_):
node.provision_state = None
mock_node.get_by_instance_uuid.return_value = node
mock_node.set_provision_state.side_effect = fake_set_provision_state
self.driver.destroy(self.ctx, instance, network_info, None)
mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
mock_cleanup_deploy.assert_called_with(self.ctx, node,
instance, network_info)
# For states that makes sense check if set_provision_state has
# been called
if state in ironic_driver._UNPROVISION_STATES:
mock_node.set_provision_state.assert_called_once_with(
node_uuid, 'deleted')
else:
self.assertFalse(mock_node.set_provision_state.called)
def test_destroy(self):
for state in ironic_states.PROVISION_STATE_LIST:
self._test_destroy(state)
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test_destroy_trigger_undeploy_fail(self, fake_validate, mock_sps):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
provision_state=ironic_states.ACTIVE)
fake_validate.return_value = node
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
mock_sps.side_effect = exception.NovaException()
self.assertRaises(exception.NovaException, self.driver.destroy,
self.ctx, instance, None, None)
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def _test__unprovision_instance(self, mock_validate_inst, state=None):
fake_ironic_client = mock.Mock()
node = ironic_utils.get_test_node(
driver='fake',
provision_state=state)
instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid)
mock_validate_inst.return_value = node
self.driver._unprovision(fake_ironic_client, instance, node)
mock_validate_inst.assert_called_once_with(fake_ironic_client,
instance)
fake_ironic_client.call.assert_called_once_with(
"node.set_provision_state", node.uuid, "deleted")
def test__unprovision_cleaning(self):
self._test__unprovision_instance(state=ironic_states.CLEANING)
def test__unprovision_cleanwait(self):
self._test__unprovision_instance(state=ironic_states.CLEANWAIT)
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test__unprovision_fail_max_retries(self, mock_validate_inst):
CONF.set_default('api_max_retries', default=2, group='ironic')
fake_ironic_client = mock.Mock()
node = ironic_utils.get_test_node(
driver='fake',
provision_state=ironic_states.ACTIVE)
instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid)
mock_validate_inst.return_value = node
self.assertRaises(exception.NovaException, self.driver._unprovision,
fake_ironic_client, instance, node)
expected_calls = (mock.call(mock.ANY, instance),
mock.call(mock.ANY, instance))
mock_validate_inst.assert_has_calls(expected_calls)
fake_ironic_client.call.assert_called_once_with(
"node.set_provision_state", node.uuid, "deleted")
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
def test__unprovision_instance_not_found(self, mock_validate_inst):
fake_ironic_client = mock.Mock()
node = ironic_utils.get_test_node(
driver='fake', provision_state=ironic_states.DELETING)
instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid)
mock_validate_inst.side_effect = exception.InstanceNotFound(
instance_id='fake')
self.driver._unprovision(fake_ironic_client, instance, node)
mock_validate_inst.assert_called_once_with(fake_ironic_client,
instance)
fake_ironic_client.call.assert_called_once_with(
"node.set_provision_state", node.uuid, "deleted")
@mock.patch.object(FAKE_CLIENT, 'node')
def test_destroy_unassociate_fail(self, mock_node):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
provision_state=ironic_states.ACTIVE)
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
mock_node.get_by_instance_uuid.return_value = node
mock_node.update.side_effect = exception.NovaException()
self.assertRaises(exception.NovaException, self.driver.destroy,
self.ctx, instance, None, None)
mock_node.set_provision_state.assert_called_once_with(node_uuid,
'deleted')
mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
@mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
def test_reboot(self, mock_sp, fake_validate, mock_looping):
node = ironic_utils.get_test_node()
fake_validate.side_effect = [node, node]
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
self.driver.reboot(self.ctx, instance, None, None)
mock_sp.assert_called_once_with(node.uuid, 'reboot')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
@mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
def test_power_off(self, mock_sp, fake_validate, mock_looping):
self._test_power_on_off(mock_sp, fake_validate, mock_looping,
method_name='power_off')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(ironic_driver, '_validate_instance_and_node')
@mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
def test_power_on(self, mock_sp, fake_validate, mock_looping):
self._test_power_on_off(mock_sp, fake_validate, mock_looping,
method_name='power_on')
def _test_power_on_off(self, mock_sp, fake_validate, mock_looping,
method_name=None):
node = ironic_utils.get_test_node()
fake_validate.side_effect = [node, node]
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
instance = fake_instance.fake_instance_obj(self.ctx,
node=self.instance_uuid)
# Call the method under test here
if method_name == 'power_on':
self.driver.power_on(self.ctx, instance,
utils.get_test_network_info())
mock_sp.assert_called_once_with(node.uuid, 'on')
elif method_name == 'power_off':
self.driver.power_off(instance)
mock_sp.assert_called_once_with(node.uuid, 'off')
@mock.patch.object(FAKE_CLIENT.node, 'list_ports')
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
def test_plug_vifs_with_port(self, mock_uvifs, mock_port_udt, mock_lp):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
# make the address be consistent with network_info's
port = ironic_utils.get_test_port(address='fake')
mock_lp.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
network_info = utils.get_test_network_info()
port_id = six.text_type(network_info[0]['id'])
expected_patch = [{'op': 'add',
'path': '/extra/vif_port_id',
'value': port_id}]
self.driver._plug_vifs(node, instance, network_info)
# asserts
mock_uvifs.assert_called_once_with(node, instance, network_info)
mock_lp.assert_called_once_with(node_uuid)
mock_port_udt.assert_called_with(port.uuid, expected_patch)
@mock.patch.object(FAKE_CLIENT.node, 'get')
@mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
def test_plug_vifs(self, mock__plug_vifs, mock_get):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
mock_get.return_value = node
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
network_info = utils.get_test_network_info()
self.driver.plug_vifs(instance, network_info)
mock_get.assert_called_once_with(node_uuid)
mock__plug_vifs.assert_called_once_with(node, instance, network_info)
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(FAKE_CLIENT.node, 'list_ports')
@mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
def test_plug_vifs_multiple_ports(self, mock_uvifs, mock_lp,
mock_port_udt):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
first_ironic_port_uuid = 'aaaaaaaa-bbbb-1111-dddd-eeeeeeeeeeee'
first_port = ironic_utils.get_test_port(uuid=first_ironic_port_uuid,
node_uuid=node_uuid,
address='11:FF:FF:FF:FF:FF')
second_ironic_port_uuid = 'aaaaaaaa-bbbb-2222-dddd-eeeeeeeeeeee'
second_port = ironic_utils.get_test_port(uuid=second_ironic_port_uuid,
node_uuid=node_uuid,
address='22:FF:FF:FF:FF:FF')
mock_lp.return_value = [second_port, first_port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
first_vif_id = 'aaaaaaaa-vv11-cccc-dddd-eeeeeeeeeeee'
second_vif_id = 'aaaaaaaa-vv22-cccc-dddd-eeeeeeeeeeee'
first_vif = ironic_utils.get_test_vif(
address='22:FF:FF:FF:FF:FF',
id=second_vif_id)
second_vif = ironic_utils.get_test_vif(
address='11:FF:FF:FF:FF:FF',
id=first_vif_id)
network_info = [first_vif, second_vif]
self.driver._plug_vifs(node, instance, network_info)
# asserts
mock_uvifs.assert_called_once_with(node, instance, network_info)
mock_lp.assert_called_once_with(node_uuid)
calls = (mock.call(first_ironic_port_uuid,
[{'op': 'add', 'path': '/extra/vif_port_id',
'value': first_vif_id}]),
mock.call(second_ironic_port_uuid,
[{'op': 'add', 'path': '/extra/vif_port_id',
'value': second_vif_id}]))
mock_port_udt.assert_has_calls(calls, any_order=True)
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(FAKE_CLIENT.node, 'list_ports')
@mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
def test_plug_vifs_count_mismatch(self, mock_uvifs, mock_lp,
mock_port_udt):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
port = ironic_utils.get_test_port()
mock_lp.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
# len(network_info) > len(ports)
network_info = (utils.get_test_network_info() +
utils.get_test_network_info())
self.assertRaises(exception.NovaException,
self.driver._plug_vifs, node, instance,
network_info)
# asserts
mock_uvifs.assert_called_once_with(node, instance, network_info)
mock_lp.assert_called_once_with(node_uuid)
# assert port.update() was not called
self.assertFalse(mock_port_udt.called)
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(FAKE_CLIENT.node, 'list_ports')
@mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
def test_plug_vifs_no_network_info(self, mock_uvifs, mock_lp,
mock_port_udt):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
port = ironic_utils.get_test_port()
mock_lp.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
network_info = []
self.driver._plug_vifs(node, instance, network_info)
# asserts
mock_uvifs.assert_called_once_with(node, instance, network_info)
mock_lp.assert_called_once_with(node_uuid)
# assert port.update() was not called
self.assertFalse(mock_port_udt.called)
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(FAKE_CLIENT, 'node')
def test_unplug_vifs(self, mock_node, mock_update):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
port = ironic_utils.get_test_port(extra={'vif_port_id': 'fake-vif'})
mock_node.get.return_value = node
mock_node.list_ports.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
expected_patch = [{'op': 'remove', 'path':
'/extra/vif_port_id'}]
self.driver.unplug_vifs(instance,
utils.get_test_network_info())
# asserts
mock_node.get.assert_called_once_with(node_uuid)
mock_node.list_ports.assert_called_once_with(node_uuid, detail=True)
mock_update.assert_called_once_with(port.uuid, expected_patch)
@mock.patch.object(FAKE_CLIENT.port, 'update')
@mock.patch.object(FAKE_CLIENT, 'node')
def test_unplug_vifs_port_not_associated(self, mock_node, mock_update):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(uuid=node_uuid)
port = ironic_utils.get_test_port(extra={})
mock_node.get.return_value = node
mock_node.list_ports.return_value = [port]
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
self.driver.unplug_vifs(instance, utils.get_test_network_info())
mock_node.get.assert_called_once_with(node_uuid)
mock_node.list_ports.assert_called_once_with(node_uuid, detail=True)
# assert port.update() was not called
self.assertFalse(mock_update.called)
@mock.patch.object(FAKE_CLIENT.port, 'update')
def test_unplug_vifs_no_network_info(self, mock_update):
instance = fake_instance.fake_instance_obj(self.ctx)
network_info = []
self.driver.unplug_vifs(instance, network_info)
# assert port.update() was not called
self.assertFalse(mock_update.called)
@mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance',
create=True)
def test_unfilter_instance(self, mock_ui):
instance = fake_instance.fake_instance_obj(self.ctx)
network_info = utils.get_test_network_info()
self.driver.unfilter_instance(instance, network_info)
mock_ui.assert_called_once_with(instance, network_info)
@mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering',
create=True)
@mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter',
create=True)
def test_ensure_filtering_rules_for_instance(self, mock_pif, mock_sbf):
instance = fake_instance.fake_instance_obj(self.ctx)
network_info = utils.get_test_network_info()
self.driver.ensure_filtering_rules_for_instance(instance,
network_info)
mock_sbf.assert_called_once_with(instance, network_info)
mock_pif.assert_called_once_with(instance, network_info)
@mock.patch.object(firewall.NoopFirewallDriver,
'refresh_instance_security_rules', create=True)
def test_refresh_instance_security_rules(self, mock_risr):
instance = fake_instance.fake_instance_obj(self.ctx)
self.driver.refresh_instance_security_rules(instance)
mock_risr.assert_called_once_with(instance)
@mock.patch.object(firewall.NoopFirewallDriver,
'refresh_provider_fw_rules', create=True)
def test_refresh_provider_fw_rules(self, mock_rpfr):
fake_instance.fake_instance_obj(self.ctx)
self.driver.refresh_provider_fw_rules()
mock_rpfr.assert_called_once_with()
@mock.patch.object(firewall.NoopFirewallDriver,
'refresh_security_group_members', create=True)
def test_refresh_security_group_members(self, mock_rsgm):
fake_group = 'fake-security-group-members'
self.driver.refresh_security_group_members(fake_group)
mock_rsgm.assert_called_once_with(fake_group)
@mock.patch.object(firewall.NoopFirewallDriver,
'refresh_instance_security_rules', create=True)
def test_refresh_security_group_rules(self, mock_risr):
fake_group = 'fake-security-group-members'
self.driver.refresh_instance_security_rules(fake_group)
mock_risr.assert_called_once_with(fake_group)
@mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
@mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
@mock.patch.object(FAKE_CLIENT.node, 'get')
@mock.patch.object(objects.Instance, 'save')
def _test_rebuild(self, mock_save, mock_get, mock_driver_fields,
mock_set_pstate, mock_looping, mock_wait_active,
preserve=False):
node_uuid = uuidutils.generate_uuid()
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=self.instance_uuid,
instance_type_id=5)
mock_get.return_value = node
image_meta = ironic_utils.get_test_image_meta()
flavor_id = 5
flavor = objects.Flavor(flavor_id=flavor_id, name='baremetal')
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid,
node=node_uuid,
instance_type_id=flavor_id)
instance.flavor = flavor
fake_looping_call = FakeLoopingCall()
mock_looping.return_value = fake_looping_call
self.driver.rebuild(
context=self.ctx, instance=instance, image_meta=image_meta,
injected_files=None, admin_password=None, bdms=None,
detach_block_devices=None, attach_block_devices=None,
preserve_ephemeral=preserve)
mock_save.assert_called_once_with(
expected_task_state=[task_states.REBUILDING])
mock_driver_fields.assert_called_once_with(
node, instance,
test.MatchType(objects.ImageMeta),
flavor, preserve)
mock_set_pstate.assert_called_once_with(node_uuid,
ironic_states.REBUILD)
mock_looping.assert_called_once_with(mock_wait_active,
FAKE_CLIENT_WRAPPER,
instance)
fake_looping_call.start.assert_called_once_with(
interval=CONF.ironic.api_retry_interval)
fake_looping_call.wait.assert_called_once_with()
def test_rebuild_preserve_ephemeral(self):
self._test_rebuild(preserve=True)
def test_rebuild_no_preserve_ephemeral(self):
self._test_rebuild(preserve=False)
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
@mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
@mock.patch.object(FAKE_CLIENT.node, 'get')
@mock.patch.object(objects.Instance, 'save')
def test_rebuild_failures(self, mock_save, mock_get, mock_driver_fields,
mock_set_pstate):
node_uuid = uuidutils.generate_uuid()
node = ironic_utils.get_test_node(uuid=node_uuid,
instance_uuid=self.instance_uuid,
instance_type_id=5)
mock_get.return_value = node
image_meta = ironic_utils.get_test_image_meta()
flavor_id = 5
flavor = objects.Flavor(flavor_id=flavor_id, name='baremetal')
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid,
node=node_uuid,
instance_type_id=flavor_id)
instance.flavor = flavor
exceptions = [
exception.NovaException(),
ironic_exception.BadRequest(),
ironic_exception.InternalServerError(),
]
for e in exceptions:
mock_set_pstate.side_effect = e
self.assertRaises(exception.InstanceDeployFailure,
self.driver.rebuild,
context=self.ctx, instance=instance, image_meta=image_meta,
injected_files=None, admin_password=None, bdms=None,
detach_block_devices=None, attach_block_devices=None)
@mock.patch.object(instance_metadata, 'InstanceMetadata')
@mock.patch.object(configdrive, 'ConfigDriveBuilder')
class IronicDriverGenerateConfigDriveTestCase(test.NoDBTestCase):
@mock.patch.object(cw, 'IronicClientWrapper',
lambda *_: FAKE_CLIENT_WRAPPER)
def setUp(self):
super(IronicDriverGenerateConfigDriveTestCase, self).setUp()
self.flags(**IRONIC_FLAGS)
self.driver = ironic_driver.IronicDriver(None)
self.driver.virtapi = fake.FakeVirtAPI()
self.ctx = nova_context.get_admin_context()
node_uuid = uuidutils.generate_uuid()
self.node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
self.instance = fake_instance.fake_instance_obj(self.ctx,
node=node_uuid)
self.network_info = utils.get_test_network_info()
def test_generate_configdrive(self, mock_cd_builder, mock_instance_meta):
mock_instance_meta.return_value = 'fake-instance'
mock_make_drive = mock.MagicMock(make_drive=lambda *_: None)
mock_cd_builder.return_value.__enter__.return_value = mock_make_drive
self.driver._generate_configdrive(self.instance, self.node,
self.network_info)
mock_cd_builder.assert_called_once_with(instance_md='fake-instance')
mock_instance_meta.assert_called_once_with(self.instance,
network_info=self.network_info, extra_md={}, content=None)
def test_generate_configdrive_fail(self, mock_cd_builder,
mock_instance_meta):
mock_cd_builder.side_effect = exception.ConfigDriveMountFailed(
operation='foo', error='error')
mock_instance_meta.return_value = 'fake-instance'
mock_make_drive = mock.MagicMock(make_drive=lambda *_: None)
mock_cd_builder.return_value.__enter__.return_value = mock_make_drive
self.assertRaises(exception.ConfigDriveMountFailed,
self.driver._generate_configdrive,
self.instance, self.node, self.network_info)
mock_cd_builder.assert_called_once_with(instance_md='fake-instance')
mock_instance_meta.assert_called_once_with(self.instance,
network_info=self.network_info, extra_md={}, content=None)
|
suncycheng/intellij-community
|
refs/heads/master
|
python/testData/refactoring/invertBoolean/my_file.py
|
83
|
BOOL = True
|
tmerrick1/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/r-partykit/package.py
|
5
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RPartykit(RPackage):
"""A toolkit with infrastructure for representing, summarizing, and
visualizing tree-structured regression and classification models. This
unified infrastructure can be used for reading/coercing tree models from
different sources ('rpart', 'RWeka', 'PMML') yielding objects that share
functionality for print()/plot()/predict() methods. Furthermore, new and
improved reimplementations of conditional inference trees (ctree()) and
model-based recursive partitioning (mob()) from the 'party' package are
provided based on the new infrastructure."""
homepage = "http://partykit.r-forge.r-project.org/partykit"
url = "https://cran.r-project.org/src/contrib/partykit_1.1-1.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/partykit"
version('1.1-1', '8fcb31d73ec1b8cd3bcd9789639a9277')
depends_on('r-survival', type=('build', 'run'))
depends_on('r-formula', type=('build', 'run'))
|
Flaburgan/the-federation.info
|
refs/heads/master
|
manage.py
|
4
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.local")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
dushu1203/chromium.src
|
refs/heads/nw12
|
third_party/cython/src/Cython/Build/Cythonize.py
|
90
|
#!/usr/bin/env python
import os
import shutil
import tempfile
from distutils.core import setup
from Cython.Build.Dependencies import cythonize, extended_iglob
from Cython.Utils import is_package_dir
from Cython.Compiler import Options
try:
import multiprocessing
parallel_compiles = int(multiprocessing.cpu_count() * 1.5)
except ImportError:
multiprocessing = None
parallel_compiles = 0
class _FakePool(object):
def map_async(self, func, args):
from itertools import imap
for _ in imap(func, args):
pass
def close(self): pass
def terminate(self): pass
def join(self): pass
def parse_directives(option, name, value, parser):
dest = option.dest
old_directives = dict(getattr(parser.values, dest,
Options.directive_defaults))
directives = Options.parse_directive_list(
value, relaxed_bool=True, current_settings=old_directives)
setattr(parser.values, dest, directives)
def parse_options(option, name, value, parser):
dest = option.dest
options = dict(getattr(parser.values, dest, {}))
for opt in value.split(','):
if '=' in opt:
n, v = opt.split('=', 1)
v = v.lower() not in ('false', 'f', '0', 'no')
else:
n, v = opt, True
options[n] = v
setattr(parser.values, dest, options)
def find_package_base(path):
base_dir, package_path = os.path.split(path)
while os.path.isfile(os.path.join(base_dir, '__init__.py')):
base_dir, parent = os.path.split(base_dir)
package_path = '%s/%s' % (parent, package_path)
return base_dir, package_path
def cython_compile(path_pattern, options):
pool = None
paths = map(os.path.abspath, extended_iglob(path_pattern))
try:
for path in paths:
if options.build_inplace:
base_dir = path
while not os.path.isdir(base_dir) or is_package_dir(base_dir):
base_dir = os.path.dirname(base_dir)
else:
base_dir = None
if os.path.isdir(path):
# recursively compiling a package
paths = [os.path.join(path, '**', '*.%s' % ext)
for ext in ('py', 'pyx')]
else:
# assume it's a file(-like thing)
paths = [path]
ext_modules = cythonize(
paths,
nthreads=options.parallel,
exclude_failures=options.keep_going,
exclude=options.excludes,
compiler_directives=options.directives,
force=options.force,
quiet=options.quiet,
**options.options)
if ext_modules and options.build:
if len(ext_modules) > 1 and options.parallel > 1:
if pool is None:
try:
pool = multiprocessing.Pool(options.parallel)
except OSError:
pool = _FakePool()
pool.map_async(run_distutils, [
(base_dir, [ext]) for ext in ext_modules])
else:
run_distutils((base_dir, ext_modules))
except:
if pool is not None:
pool.terminate()
raise
else:
if pool is not None:
pool.close()
pool.join()
def run_distutils(args):
base_dir, ext_modules = args
script_args = ['build_ext', '-i']
cwd = os.getcwd()
temp_dir = None
try:
if base_dir:
os.chdir(base_dir)
temp_dir = tempfile.mkdtemp(dir=base_dir)
script_args.extend(['--build-temp', temp_dir])
setup(
script_name='setup.py',
script_args=script_args,
ext_modules=ext_modules,
)
finally:
if base_dir:
os.chdir(cwd)
if temp_dir and os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
def parse_args(args):
from optparse import OptionParser
parser = OptionParser(usage='%prog [options] [sources and packages]+')
parser.add_option('-X', '--directive', metavar='NAME=VALUE,...', dest='directives',
type=str, action='callback', callback=parse_directives, default={},
help='set a compiler directive')
parser.add_option('-s', '--option', metavar='NAME=VALUE', dest='options',
type=str, action='callback', callback=parse_options, default={},
help='set a cythonize option')
parser.add_option('-3', dest='python3_mode', action='store_true',
help='use Python 3 syntax mode by default')
parser.add_option('-x', '--exclude', metavar='PATTERN', dest='excludes',
action='append', default=[],
help='exclude certain file patterns from the compilation')
parser.add_option('-b', '--build', dest='build', action='store_true',
help='build extension modules using distutils')
parser.add_option('-i', '--inplace', dest='build_inplace', action='store_true',
help='build extension modules in place using distutils (implies -b)')
parser.add_option('-j', '--parallel', dest='parallel', metavar='N',
type=int, default=parallel_compiles,
help=('run builds in N parallel jobs (default: %d)' %
parallel_compiles or 1))
parser.add_option('-f', '--force', dest='force', action='store_true',
help='force recompilation')
parser.add_option('-q', '--quiet', dest='quiet', action='store_true',
help='be less verbose during compilation')
parser.add_option('--lenient', dest='lenient', action='store_true',
help='increase Python compatibility by ignoring some compile time errors')
parser.add_option('-k', '--keep-going', dest='keep_going', action='store_true',
help='compile as much as possible, ignore compilation failures')
options, args = parser.parse_args(args)
if not args:
parser.error("no source files provided")
if options.build_inplace:
options.build = True
if multiprocessing is None:
options.parallel = 0
if options.python3_mode:
options.options['language_level'] = 3
return options, args
def main(args=None):
options, paths = parse_args(args)
if options.lenient:
# increase Python compatibility by ignoring compile time errors
Options.error_on_unknown_names = False
Options.error_on_uninitialized = False
for path in paths:
cython_compile(path, options)
if __name__ == '__main__':
main()
|
DimensionDataCBUSydney/libcloud
|
refs/heads/trunk
|
docs/examples/compute/cloudstack/instantiate_driver_url.py
|
63
|
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
apikey = 'your api key'
secretkey = 'your secret key'
url = 'http://example.com/path/to/api'
Driver = get_driver(Provider.CLOUDSTACK)
conn = Driver(key=apikey, secret=secretkey, url=url)
|
ComNets-Bremen/TWIN
|
refs/heads/master
|
Sprinkler/global_variables.py
|
1
|
#!/usr/bin/python3
# TWIN node - A Flexible Testbed for Wireless Sensor Networks
# Copyright (C) 2016, Communication Networks, University of Bremen, Germany
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; version 3 of the License.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, see <http://www.gnu.org/licenses/>
#
# This file is part of TWIN
"""Global Variables Used Through Out the Module
"""
# Version for Trickle Algorithm
# initial Version value is 0
VERSION = 0
# Instance of Class trickleTimer
tt = None
# Instance of Multicast Socket
mcastSock = None
# Multicast to all ipv6-nodes
# Link-Local IPv6 Address
MCAST_GRP = "ff02::1"
# Multicast Port
# Chosen as Arbitrary value
MCAST_PORT = 30001
# TTL value for Multicasting
MCAST_TTL = 2
# Luby-Transform Block Size
BLOCKSIZE = 1452
# Filename for Encoding
# Default name chosen due to design
FILENAME = "incomingData0.tar"
# Path Variable for the Filename
# Path is for Raspberry Pi
PATH = "/home/pi/incoming"
# Dictionary Cache for a pseudo-route table
# Python List : JSON file for neighboring nodes
rCache = {'fountain': '', 'neighbors': []}
|
Maximilian-Reuter/SickRage-1
|
refs/heads/master
|
lib/hachoir_core/tools.py
|
58
|
# -*- coding: utf-8 -*-
"""
Various utilities.
"""
from hachoir_core.i18n import _, ngettext
import re
import stat
from datetime import datetime, timedelta, MAXYEAR
from warnings import warn
def deprecated(comment=None):
"""
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used.
Examples: ::
@deprecated
def oldfunc(): ...
@deprecated("use newfunc()!")
def oldfunc2(): ...
Code from: http://code.activestate.com/recipes/391367/
"""
def _deprecated(func):
def newFunc(*args, **kwargs):
message = "Call to deprecated function %s" % func.__name__
if comment:
message += ": " + comment
warn(message, category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
return _deprecated
def paddingSize(value, align):
"""
Compute size of a padding field.
>>> paddingSize(31, 4)
1
>>> paddingSize(32, 4)
0
>>> paddingSize(33, 4)
3
Note: (value + paddingSize(value, align)) == alignValue(value, align)
"""
if value % align != 0:
return align - (value % align)
else:
return 0
def alignValue(value, align):
"""
Align a value to next 'align' multiple.
>>> alignValue(31, 4)
32
>>> alignValue(32, 4)
32
>>> alignValue(33, 4)
36
Note: alignValue(value, align) == (value + paddingSize(value, align))
"""
if value % align != 0:
return value + align - (value % align)
else:
return value
def timedelta2seconds(delta):
"""
Convert a datetime.timedelta() objet to a number of second
(floatting point number).
>>> timedelta2seconds(timedelta(seconds=2, microseconds=40000))
2.04
>>> timedelta2seconds(timedelta(minutes=1, milliseconds=250))
60.25
"""
return delta.microseconds / 1000000.0 \
+ delta.seconds + delta.days * 60*60*24
def humanDurationNanosec(nsec):
"""
Convert a duration in nanosecond to human natural representation.
Returns an unicode string.
>>> humanDurationNanosec(60417893)
u'60.42 ms'
"""
# Nano second
if nsec < 1000:
return u"%u nsec" % nsec
# Micro seconds
usec, nsec = divmod(nsec, 1000)
if usec < 1000:
return u"%.2f usec" % (usec+float(nsec)/1000)
# Milli seconds
msec, usec = divmod(usec, 1000)
if msec < 1000:
return u"%.2f ms" % (msec + float(usec)/1000)
return humanDuration(msec)
def humanDuration(delta):
"""
Convert a duration in millisecond to human natural representation.
Returns an unicode string.
>>> humanDuration(0)
u'0 ms'
>>> humanDuration(213)
u'213 ms'
>>> humanDuration(4213)
u'4 sec 213 ms'
>>> humanDuration(6402309)
u'1 hour 46 min 42 sec'
"""
if not isinstance(delta, timedelta):
delta = timedelta(microseconds=delta*1000)
# Milliseconds
text = []
if 1000 <= delta.microseconds:
text.append(u"%u ms" % (delta.microseconds//1000))
# Seconds
minutes, seconds = divmod(delta.seconds, 60)
hours, minutes = divmod(minutes, 60)
if seconds:
text.append(u"%u sec" % seconds)
if minutes:
text.append(u"%u min" % minutes)
if hours:
text.append(ngettext("%u hour", "%u hours", hours) % hours)
# Days
years, days = divmod(delta.days, 365)
if days:
text.append(ngettext("%u day", "%u days", days) % days)
if years:
text.append(ngettext("%u year", "%u years", years) % years)
if 3 < len(text):
text = text[-3:]
elif not text:
return u"0 ms"
return u" ".join(reversed(text))
def humanFilesize(size):
"""
Convert a file size in byte to human natural representation.
It uses the values: 1 KB is 1024 bytes, 1 MB is 1024 KB, etc.
The result is an unicode string.
>>> humanFilesize(1)
u'1 byte'
>>> humanFilesize(790)
u'790 bytes'
>>> humanFilesize(256960)
u'250.9 KB'
"""
if size < 10000:
return ngettext("%u byte", "%u bytes", size) % size
units = [_("KB"), _("MB"), _("GB"), _("TB")]
size = float(size)
divisor = 1024
for unit in units:
size = size / divisor
if size < divisor:
return "%.1f %s" % (size, unit)
return "%u %s" % (size, unit)
def humanBitSize(size):
"""
Convert a size in bit to human classic representation.
It uses the values: 1 Kbit is 1000 bits, 1 Mbit is 1000 Kbit, etc.
The result is an unicode string.
>>> humanBitSize(1)
u'1 bit'
>>> humanBitSize(790)
u'790 bits'
>>> humanBitSize(256960)
u'257.0 Kbit'
"""
divisor = 1000
if size < divisor:
return ngettext("%u bit", "%u bits", size) % size
units = [u"Kbit", u"Mbit", u"Gbit", u"Tbit"]
size = float(size)
for unit in units:
size = size / divisor
if size < divisor:
return "%.1f %s" % (size, unit)
return u"%u %s" % (size, unit)
def humanBitRate(size):
"""
Convert a bit rate to human classic representation. It uses humanBitSize()
to convert size into human reprensation. The result is an unicode string.
>>> humanBitRate(790)
u'790 bits/sec'
>>> humanBitRate(256960)
u'257.0 Kbit/sec'
"""
return "".join((humanBitSize(size), "/sec"))
def humanFrequency(hertz):
"""
Convert a frequency in hertz to human classic representation.
It uses the values: 1 KHz is 1000 Hz, 1 MHz is 1000 KMhz, etc.
The result is an unicode string.
>>> humanFrequency(790)
u'790 Hz'
>>> humanFrequency(629469)
u'629.5 kHz'
"""
divisor = 1000
if hertz < divisor:
return u"%u Hz" % hertz
units = [u"kHz", u"MHz", u"GHz", u"THz"]
hertz = float(hertz)
for unit in units:
hertz = hertz / divisor
if hertz < divisor:
return u"%.1f %s" % (hertz, unit)
return u"%s %s" % (hertz, unit)
regex_control_code = re.compile(r"([\x00-\x1f\x7f])")
controlchars = tuple({
# Don't use "\0", because "\0"+"0"+"1" = "\001" = "\1" (1 character)
# Same rease to not use octal syntax ("\1")
ord("\n"): r"\n",
ord("\r"): r"\r",
ord("\t"): r"\t",
ord("\a"): r"\a",
ord("\b"): r"\b",
}.get(code, '\\x%02x' % code)
for code in xrange(128)
)
def makePrintable(data, charset, quote=None, to_unicode=False, smart=True):
r"""
Prepare a string to make it printable in the specified charset.
It escapes control characters. Characters with code bigger than 127
are escaped if data type is 'str' or if charset is "ASCII".
Examples with Unicode:
>>> aged = unicode("âgé", "UTF-8")
>>> repr(aged) # text type is 'unicode'
"u'\\xe2g\\xe9'"
>>> makePrintable("abc\0", "UTF-8")
'abc\\0'
>>> makePrintable(aged, "latin1")
'\xe2g\xe9'
>>> makePrintable(aged, "latin1", quote='"')
'"\xe2g\xe9"'
Examples with string encoded in latin1:
>>> aged_latin = unicode("âgé", "UTF-8").encode("latin1")
>>> repr(aged_latin) # text type is 'str'
"'\\xe2g\\xe9'"
>>> makePrintable(aged_latin, "latin1")
'\\xe2g\\xe9'
>>> makePrintable("", "latin1")
''
>>> makePrintable("a", "latin1", quote='"')
'"a"'
>>> makePrintable("", "latin1", quote='"')
'(empty)'
>>> makePrintable("abc", "latin1", quote="'")
"'abc'"
Control codes:
>>> makePrintable("\0\x03\x0a\x10 \x7f", "latin1")
'\\0\\3\\n\\x10 \\x7f'
Quote character may also be escaped (only ' and "):
>>> print makePrintable("a\"b", "latin-1", quote='"')
"a\"b"
>>> print makePrintable("a\"b", "latin-1", quote="'")
'a"b'
>>> print makePrintable("a'b", "latin-1", quote="'")
'a\'b'
"""
if data:
if not isinstance(data, unicode):
data = unicode(data, "ISO-8859-1")
charset = "ASCII"
data = regex_control_code.sub(
lambda regs: controlchars[ord(regs.group(1))], data)
if quote:
if quote in "\"'":
data = data.replace(quote, '\\' + quote)
data = ''.join((quote, data, quote))
elif quote:
data = "(empty)"
data = data.encode(charset, "backslashreplace")
if smart:
# Replace \x00\x01 by \0\1
data = re.sub(r"\\x0([0-7])(?=[^0-7]|$)", r"\\\1", data)
if to_unicode:
data = unicode(data, charset)
return data
def makeUnicode(text):
r"""
Convert text to printable Unicode string. For byte string (type 'str'),
use charset ISO-8859-1 for the conversion to Unicode
>>> makeUnicode(u'abc\0d')
u'abc\\0d'
>>> makeUnicode('a\xe9')
u'a\xe9'
"""
if isinstance(text, str):
text = unicode(text, "ISO-8859-1")
elif not isinstance(text, unicode):
try:
text = unicode(text)
except UnicodeError:
try:
text = str(text)
except Exception:
text = repr(text)
return makeUnicode(text)
text = regex_control_code.sub(
lambda regs: controlchars[ord(regs.group(1))], text)
text = re.sub(r"\\x0([0-7])(?=[^0-7]|$)", r"\\\1", text)
return text
def binarySearch(seq, cmp_func):
"""
Search a value in a sequence using binary search. Returns index of the
value, or None if the value doesn't exist.
'seq' have to be sorted in ascending order according to the
comparaison function ;
'cmp_func', prototype func(x), is the compare function:
- Return strictly positive value if we have to search forward ;
- Return strictly negative value if we have to search backward ;
- Otherwise (zero) we got the value.
>>> # Search number 5 (search forward)
... binarySearch([0, 4, 5, 10], lambda x: 5-x)
2
>>> # Backward search
... binarySearch([10, 5, 4, 0], lambda x: x-5)
1
"""
lower = 0
upper = len(seq)
while lower < upper:
index = (lower + upper) >> 1
diff = cmp_func(seq[index])
if diff < 0:
upper = index
elif diff > 0:
lower = index + 1
else:
return index
return None
def lowerBound(seq, cmp_func):
f = 0
l = len(seq)
while l > 0:
h = l >> 1
m = f + h
if cmp_func(seq[m]):
f = m
f += 1
l -= h + 1
else:
l = h
return f
def humanUnixAttributes(mode):
"""
Convert a Unix file attributes (or "file mode") to an unicode string.
Original source code:
http://cvs.savannah.gnu.org/viewcvs/coreutils/lib/filemode.c?root=coreutils
>>> humanUnixAttributes(0644)
u'-rw-r--r-- (644)'
>>> humanUnixAttributes(02755)
u'-rwxr-sr-x (2755)'
"""
def ftypelet(mode):
if stat.S_ISREG (mode) or not stat.S_IFMT(mode):
return '-'
if stat.S_ISBLK (mode): return 'b'
if stat.S_ISCHR (mode): return 'c'
if stat.S_ISDIR (mode): return 'd'
if stat.S_ISFIFO(mode): return 'p'
if stat.S_ISLNK (mode): return 'l'
if stat.S_ISSOCK(mode): return 's'
return '?'
chars = [ ftypelet(mode), 'r', 'w', 'x', 'r', 'w', 'x', 'r', 'w', 'x' ]
for i in xrange(1, 10):
if not mode & 1 << 9 - i:
chars[i] = '-'
if mode & stat.S_ISUID:
if chars[3] != 'x':
chars[3] = 'S'
else:
chars[3] = 's'
if mode & stat.S_ISGID:
if chars[6] != 'x':
chars[6] = 'S'
else:
chars[6] = 's'
if mode & stat.S_ISVTX:
if chars[9] != 'x':
chars[9] = 'T'
else:
chars[9] = 't'
return u"%s (%o)" % (''.join(chars), mode)
def createDict(data, index):
"""
Create a new dictionnay from dictionnary key=>values:
just keep value number 'index' from all values.
>>> data={10: ("dix", 100, "a"), 20: ("vingt", 200, "b")}
>>> createDict(data, 0)
{10: 'dix', 20: 'vingt'}
>>> createDict(data, 2)
{10: 'a', 20: 'b'}
"""
return dict( (key,values[index]) for key, values in data.iteritems() )
# Start of UNIX timestamp (Epoch): 1st January 1970 at 00:00
UNIX_TIMESTAMP_T0 = datetime(1970, 1, 1)
def timestampUNIX(value):
"""
Convert an UNIX (32-bit) timestamp to datetime object. Timestamp value
is the number of seconds since the 1st January 1970 at 00:00. Maximum
value is 2147483647: 19 january 2038 at 03:14:07.
May raise ValueError for invalid value: value have to be in 0..2147483647.
>>> timestampUNIX(0)
datetime.datetime(1970, 1, 1, 0, 0)
>>> timestampUNIX(1154175644)
datetime.datetime(2006, 7, 29, 12, 20, 44)
>>> timestampUNIX(1154175644.37)
datetime.datetime(2006, 7, 29, 12, 20, 44, 370000)
>>> timestampUNIX(2147483647)
datetime.datetime(2038, 1, 19, 3, 14, 7)
"""
if not isinstance(value, (float, int, long)):
raise TypeError("timestampUNIX(): an integer or float is required")
if not(0 <= value <= 2147483647):
raise ValueError("timestampUNIX(): value have to be in 0..2147483647")
return UNIX_TIMESTAMP_T0 + timedelta(seconds=value)
# Start of Macintosh timestamp: 1st January 1904 at 00:00
MAC_TIMESTAMP_T0 = datetime(1904, 1, 1)
def timestampMac32(value):
"""
Convert an Mac (32-bit) timestamp to string. The format is the number
of seconds since the 1st January 1904 (to 2040). Returns unicode string.
>>> timestampMac32(0)
datetime.datetime(1904, 1, 1, 0, 0)
>>> timestampMac32(2843043290)
datetime.datetime(1994, 2, 2, 14, 14, 50)
"""
if not isinstance(value, (float, int, long)):
raise TypeError("an integer or float is required")
if not(0 <= value <= 4294967295):
return _("invalid Mac timestamp (%s)") % value
return MAC_TIMESTAMP_T0 + timedelta(seconds=value)
def durationWin64(value):
"""
Convert Windows 64-bit duration to string. The timestamp format is
a 64-bit number: number of 100ns. See also timestampWin64().
>>> str(durationWin64(1072580000))
'0:01:47.258000'
>>> str(durationWin64(2146280000))
'0:03:34.628000'
"""
if not isinstance(value, (float, int, long)):
raise TypeError("an integer or float is required")
if value < 0:
raise ValueError("value have to be a positive or nul integer")
return timedelta(microseconds=value/10)
# Start of 64-bit Windows timestamp: 1st January 1600 at 00:00
WIN64_TIMESTAMP_T0 = datetime(1601, 1, 1, 0, 0, 0)
def timestampWin64(value):
"""
Convert Windows 64-bit timestamp to string. The timestamp format is
a 64-bit number which represents number of 100ns since the
1st January 1601 at 00:00. Result is an unicode string.
See also durationWin64(). Maximum date is 28 may 60056.
>>> timestampWin64(0)
datetime.datetime(1601, 1, 1, 0, 0)
>>> timestampWin64(127840491566710000)
datetime.datetime(2006, 2, 10, 12, 45, 56, 671000)
"""
try:
return WIN64_TIMESTAMP_T0 + durationWin64(value)
except OverflowError:
raise ValueError(_("date newer than year %s (value=%s)") % (MAXYEAR, value))
# Start of 60-bit UUID timestamp: 15 October 1582 at 00:00
UUID60_TIMESTAMP_T0 = datetime(1582, 10, 15, 0, 0, 0)
def timestampUUID60(value):
"""
Convert UUID 60-bit timestamp to string. The timestamp format is
a 60-bit number which represents number of 100ns since the
the 15 October 1582 at 00:00. Result is an unicode string.
>>> timestampUUID60(0)
datetime.datetime(1582, 10, 15, 0, 0)
>>> timestampUUID60(130435676263032368)
datetime.datetime(1996, 2, 14, 5, 13, 46, 303236)
"""
if not isinstance(value, (float, int, long)):
raise TypeError("an integer or float is required")
if value < 0:
raise ValueError("value have to be a positive or nul integer")
try:
return UUID60_TIMESTAMP_T0 + timedelta(microseconds=value/10)
except OverflowError:
raise ValueError(_("timestampUUID60() overflow (value=%s)") % value)
def humanDatetime(value, strip_microsecond=True):
"""
Convert a timestamp to Unicode string: use ISO format with space separator.
>>> humanDatetime( datetime(2006, 7, 29, 12, 20, 44) )
u'2006-07-29 12:20:44'
>>> humanDatetime( datetime(2003, 6, 30, 16, 0, 5, 370000) )
u'2003-06-30 16:00:05'
>>> humanDatetime( datetime(2003, 6, 30, 16, 0, 5, 370000), False )
u'2003-06-30 16:00:05.370000'
"""
text = unicode(value.isoformat())
text = text.replace('T', ' ')
if strip_microsecond and "." in text:
text = text.split(".")[0]
return text
NEWLINES_REGEX = re.compile("\n+")
def normalizeNewline(text):
r"""
Replace Windows and Mac newlines with Unix newlines.
Replace multiple consecutive newlines with one newline.
>>> normalizeNewline('a\r\nb')
'a\nb'
>>> normalizeNewline('a\r\rb')
'a\nb'
>>> normalizeNewline('a\n\nb')
'a\nb'
"""
text = text.replace("\r\n", "\n")
text = text.replace("\r", "\n")
return NEWLINES_REGEX.sub("\n", text)
|
impedimentToProgress/Ratchet
|
refs/heads/master
|
llvm/utils/lit/tests/shell-parsing.py
|
131
|
# Just run the ShUtil unit tests.
#
# RUN: %{python} -m lit.ShUtil
|
caterinaurban/Typpete
|
refs/heads/master
|
typpete/unittests/inference/except_clause.py
|
1
|
class MyException(Exception):
def __init__(self):
self.val = 15
try:
a = 23
except MyException as e:
b = e.val
a = b + 2
# a := int
# b := int
|
kdarnell/hydrateflash
|
refs/heads/master
|
flashalgorithm.py
|
1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Algorithm for determining phase stability given P, T, and composition
The algorithmic framework presented here constitutes a hydrate
flash algorithm, such that the amount and presence of hydrate can
be predicted given simple input. It follows the general procedure
of CSMGem.
Functions
----------
stability func :
Calculation of phase stability compatibility
objective :
Calculation to determine objective function that must be minimized
jacobian :
Calculation to determine jacobian of objective function that must be minimized
ideal_LV :
Calculation of ideal partition coefficients between liquid and vapor phases
ideal_VAq :
Calculation of ideal partition coefficients between vapor and aqueous phases
ideal_Ice Aq :
Calculation of ideal partition coefficients between ice and aqueous phases
ideal_VHs1 :
Calculation of ideal partition coefficients between vapor and structure 1 hydrate phases
ideal_VHs2 :
Calculation of ideal partition coefficients between vapor and structure 2 hydrate phases
make_ideal_K_allmat :
Use ideal partition coefficient functions to construct a matrix of coefficients
"""
import numpy as np
import time
import component_properties as cp
import aq_hb_eos as aq
import h_vdwpm_eos as h
import vlhc_srk_eos as hc
import pdb
"""Mapping from columns of K_all_mat to corresponding partition coefficient
First phase is numerator, second phase is denominator"""
K_dict = {0: ('lhc', 'vapor'),
1: ('vapor', 'aqueous'),
2: ('vapor', 's1'),
3: ('vapor', 's2'),
4: ('ice', 'aqueous')}
"""Transformation from K_all_mat to partition coefficients of the form,
K_{j, ref_phase} where keys in K_transform refer to reference phase
and subsequent keys refer to phase j, and tuple describes algebraic
manipulation of K_mat_all. 9 refers to the value 1 0-4 refers to a
column"""
K_transform = {'aqueous': {'vapor': (1),
'lhc': (1, 0),
'aqueous': (9),
'ice': (4),
's1': (1, 2),
's2': (1, 3)},
'vapor': {'vapor': (9),
'lhc': (9, 0),
'aqueous': (9, 1),
'ice': (4, 1),
's1': (9, 2),
's2': (9, 3)},
'lhc': {'vapor': (0),
'lhc': (9),
'aqueous': (0, 1),
'ice': ((0, 4), 1),
's1': (0, 2),
's2': (0, 3)},
'ice': {'vapor': (1, 4),
'lhc': (1, (0, 4)),
'aqueous': (9, 4),
'ice': (9),
's1': (1, (2, 4)),
's2': (1, (3, 4))}}
def stability_func(alpha, theta):
"""Simple function that should always equal zero
Parameters
----------
alpha : float, numpy array
Molar of phase fraction
theta : float, numpy array
Stability of phase
Returns
----------
Calculation output of size identical to alpha, theta
"""
return alpha * theta / (alpha + theta)
def objective(z, alpha, theta, K):
"""Objective function to be minimized
Parameters
----------
z : list, numpy array
Total composition of each component with size Nc
alpha : list, numpy array
Molar phase fractions with size Np
theta : list, numpy array
Stability of phases with size Np
K : list, numpy array
Partition coefficients for each component
in each phase with size Nc x Np
Returns
----------
cost : numpy array
Numerical "cost" or residual of size 2*Np
"""
if type(z) != np.ndarray:
z = np.asarray(z)
if type(alpha) != np.ndarray:
alpha = np.asarray(alpha)
if type(theta) != np.ndarray:
theta = np.asarray(theta)
if type(K) != np.ndarray:
K = np.asarray(K)
numerator = z[:, np.newaxis] * (K * np.exp(theta[np.newaxis, :]) - 1)
denominator = 1 + np.sum(
alpha[np.newaxis, :]
* (K * np.exp(theta[np.newaxis, :]) - 1),
axis=1)
e_cost = np.sum(numerator / denominator[:, np.newaxis], axis=0)
y_cost = stability_func(alpha, theta)
cost = np.concatenate((e_cost, y_cost))
return cost
def jacobian(z, alpha, theta, K):
"""Jacobian of objective function to be minimized
Parameters
----------
z : list, numpy array
Total composition of each component with size Nc
alpha : list, numpy array
Molar phase fractions with size Np
theta : list, numpy array
Stability of phases with size Np
K : list, numpy array
Partition coefficients for each component
in each phase with size Nc x Np
Returns
----------
jacobian : numpy array
Jacobian matrix of objective function of size 2*Np x 2 *Np
"""
if type(z) != np.ndarray:
z = np.asarray(z)
if type(alpha) != np.ndarray:
alpha = np.asarray(alpha)
if type(theta) != np.ndarray:
theta = np.asarray(theta)
if type(K) != np.ndarray:
K = np.asarray(K)
stability_mat = (K*np.exp(theta[np.newaxis, :]) - 1.0)
alpha_numerator = (
z[:, np.newaxis, np.newaxis]
* stability_mat[:, :, np.newaxis]
* stability_mat[:, np.newaxis, :])
theta_numerator = (
z[:, np.newaxis, np.newaxis]
* stability_mat[:, :, np.newaxis]
* K[:, np.newaxis,:]
* alpha[np.newaxis, np.newaxis, :]
* np.exp(theta[np.newaxis, np.newaxis, :]))
denominator = (
1.0 + (np.sum(alpha[np.newaxis, :]
* stability_mat,
axis=1))
)**2
jac_alpha = -np.sum(
alpha_numerator / denominator[:, np.newaxis, np.newaxis],
axis = 0)
jac_theta = -np.sum(
theta_numerator / denominator[:, np.newaxis, np.newaxis],
axis = 0)
diag_denom = 1.0 + np.sum((K * np.exp(theta[np.newaxis, :]) - 1.0)
* alpha[np.newaxis, :],
axis=1)
diag = np.sum(z[:, np.newaxis] * K * np.exp(theta[np.newaxis, :])
/ diag_denom[:, np.newaxis],
axis=0)
jac_theta += np.diag(diag)
jac_cost = np.concatenate((jac_alpha, jac_theta), axis=1)
jac_alpha_y = (theta/(alpha + theta)
- alpha*theta/(alpha + theta)**2)
jac_theta_y = (alpha/(alpha + theta)
- alpha*theta/(alpha + theta)**2)
jac_stability = np.concatenate((np.diag(jac_alpha_y),
np.diag(jac_theta_y)),
axis=1)
jacobian = np.concatenate((jac_cost, jac_stability), axis=0)
return jacobian
#TODO: Convert all the ideal stuff into a separate class.
def ideal_LV(compobjs, T, P):
"""Ideal partition coefficients for liquid and vapor phases
Parameters
----------
compobjs : list, tuple
List of components
T : float
Temperature in Kelvin
P : float
Pressure in bar
Returns
----------
K : numpy array
Array of partition coefficients for each component
"""
if not hasattr(compobjs, '__iter__'):
compobjs = [compobjs]
K = np.ones([len(compobjs)])
for ii, comp in enumerate(compobjs):
if comp.compname != 'h2o':
K[ii] = ((comp.Pc/P)
* np.exp(5.373*(1.0 + comp.SRK['omega'])
*(1 - comp.Tc/T)))
else:
K[ii] = (-133.67 + 0.63288*T)/P + 3.19211e-3*P
return K
def ideal_VAq(compobjs, T, P):
"""Ideal partition coefficients for vapor and aqueous phases
Parameters
----------
compobjs : list, tuple
List of components
T : float
Temperature in Kelvin
P : float
Pressure in bar
Returns
----------
K : numpy array
Array of partition coefficients for each component
"""
if not hasattr(compobjs, '__iter__'):
compobjs = [compobjs]
K = np.ones([len(compobjs)])
for ii, comp in enumerate(compobjs):
if comp.compname != 'h2o':
gamma_inf = np.exp(0.688 - 0.642*comp.N_carb)
a1 = (5.927140 - 6.096480*(comp.Tc/T)
- 1.288620*np.log(T/comp.Tc) + 0.169347*T**6/comp.Tc**6)
a2 = (15.25180 - 15.68750*(comp.Tc/T)
- 13.47210*np.log(T/comp.Tc) + 0.43577*T**6/comp.Tc**6)
P_sat = comp.Pc*np.exp(a1 + comp.SRK['omega']*a2)
else:
gamma_inf = 1.0
P_sat = np.exp(12.048399 - 4030.18425/(T + -38.15))
K[ii] = (P_sat/P)*gamma_inf
return K
def ideal_IceAq(compobjs, T, P):
"""Ideal partition coefficients for ice and aqueous phases
Parameters
----------
compobjs : list, tuple
List of components
T : float
Temperature in Kelvin
P : float
Pressure in bar
Returns
----------
K : numpy array
Array of partition coefficients for each component
"""
if not hasattr(compobjs, '__iter__'):
compobjs = [compobjs]
K = np.ones([len(compobjs)])
for ii, comp in enumerate(compobjs):
if comp.compname != 'h2o':
K[ii] = 0
else:
T_0 = 273.1576
P_0 = 6.11457e-3
T_ice = T_0 - 7.404e-3*(P - P_0) - 1.461e-6*(P - P_0)**2
xw_aq = 1 + 8.33076e-3*(T - T_ice) + 3.91416e-5*(T - T_ice)**2
K[ii] = 1.0/xw_aq
return K
def ideal_VHs1(compobjs, T, P):
"""Ideal partition coefficients for vapor s1 hydrate phases
Parameters
----------
compobjs : list, tuple
List of components
T : float
Temperature in Kelvin
P : float
Pressure in bar
Returns
----------
K : numpy array
Array of partition coefficients for each component
"""
if not hasattr(compobjs, '__iter__'):
compobjs = [compobjs]
K = np.ones([len(compobjs)])
for ii, comp in enumerate(compobjs):
if comp.compname != 'h2o':
s = comp.ideal['Hs1']
K_wf = np.exp(
s['a1'] + s['a2']*np.log(P) + s['a3']*np.log(P)**2
- (s['a4'] + s['a5']*np.log(P) + s['a6']*np.log(P)**2
+ s['a7']*np.log(P)**3)/T
+ s['a8']/P + s['a9']/P**2 + s['a10']*T + s['a11']*P
+ s['a12']*np.log(P/T**2) + s['a13']/T**2)
K[ii] = K_wf/(1 - 0.88)
else:
K[ii] = (ideal_VAq(comp, T, P)
/ (0.88 * ideal_IceAq(comp, T, P)))
return np.abs(K)
def ideal_VHs2(compobjs, T, P):
"""Ideal partition coefficients for vapor s1 hydrate phases
Parameters
----------
compobjs : list, tuple
List of components
T : float
Temperature in Kelvin
P : float
Pressure in bar
Returns
----------
K : numpy array
Array of partition coefficients for each component
"""
if not hasattr(compobjs, '__iter__'):
compobjs = [compobjs]
K = np.ones([len(compobjs)])
T_Kelvin = T
T = T*9.0/5.0 - 459.67
for ii, comp in enumerate(compobjs):
if comp.compname != 'h2o':
s = comp.ideal['Hs2']
K_wf = np.exp(
s['a1'] + s['a2']*T + s['a3']*P + s['a4']/T
+ s['a5']/P + s['a6']*T*P + s['a6']*T**2
+ s['a8']*P**2 + s['a9']*P/T + s['a10']*np.log(P/T)
+ s['a11']/P**2 + s['a12']*T/P + s['a13']*T**2/P
+ s['a14']*P/T**2 + s['a15']*T/P**3 + s['a16']*T**3
+ s['a17']*P**3/T**2 + s['a18']*T**4
+ s['a19']*np.log(P))
K[ii] = K_wf/(1 - 0.90)
else:
K[ii] = (ideal_VAq(comp, T_Kelvin, P)
/ (0.90 * ideal_IceAq(comp, T_Kelvin, P)))
return K
def make_ideal_K_allmat(compobjs, T, P):
"""Ideal partition coefficients for vapor s1 hydrate phases
Parameters
----------
compobjs : list, tuple
List of components
T : float
Temperature in Kelvin
P : float
Pressure in bar
Returns
----------
K_all_mat : numpy array
Matrix of all possible partition coefficients for each component
"""
if not hasattr(compobjs, '__iter__'):
compobjs = [compobjs]
K_all_mat = np.zeros([len(compobjs), 5])
K_all_mat[:, 0] = ideal_LV(compobjs, T, P)
K_all_mat[:, 1] = ideal_VAq(compobjs, T, P)
K_all_mat[:, 2] = ideal_VHs1(compobjs, T, P)
K_all_mat[:, 3] = ideal_VHs2(compobjs, T, P)
K_all_mat[:, 4] = ideal_IceAq(compobjs, T, P)
return K_all_mat
class FlashController(object):
"""Flash calculation and auxiliary components
Attributes
----------
phase_menu : dict
Dictionary relating possible phases (keys) and aliases
for those phases (values).
eos_menu : dict
Dictionary relating possible phases (keys) and different
eos's (values) for each phase that might be used.
eos_default : dict
Dictionary setting default eos (values) for each possible
phase (key).
"""
phase_menu = {'aqueous': ('aqueous', 'aq', 'water', 'liquid'),
'vapor': ('vapor', 'v', 'gas', 'vaporhc', 'hc'),
'lhc': ('lhc', 'liquidco2', 'liquid_co2', 'l',
'liquidhc'),
's1': ('s1', 's1hydrate', 's1h', 'hs1', 'h1',
'hydrate', 'hydrate s1', 'structure 1',
'structure 1 hydrate', 'str 1'),
's2': ('s2', 's2hydrate', 's2h', 'hs2', 'h2',
'hydrate s2', 'structure 2',
'structure 2 hydrate', 'str 2'),
'ice': ('ice')}
eos_menu = {'aqueous': ('aqhb', 'henryslaw'),
'vapor': ('srk', 'pr'),
'lhc': ('srk', 'pr'),
's1': ('hvdwpm', 'hvdwpped', 'hvdwpvel'),
's2': ('hvdwpm', 'hvdwpped', 'hvdwpvel'),
'ice': ('ice')}
eos_default={'aqueous':'aqhb',
'vapor': 'srk',
'lhc': 'srk',
's1': 'hvdwpm',
's2': 'hvdwpm',
'ice': 'ice'}
def __init__(self,
components,
phases=['aqueous', 'vapor', 'lhc', 's1', 's2'],
eos=eos_default,
T=298.15,
P=1.0):
"""Flash controller for a fixed set of components with allowable modification
to pressure, temperature, or composition
Parameters
----------
components : list, tuple
Set of components to be used for the flash controller. This is not
allowed to change
phases : list, tuple
Set of phases to consider during flash
eos : dict
Dictionary for relating phases to a specific type of eos
T : float
Temperature in Kelvin
P : float
Pressure in bar
Attributes
----------
T : float
Temperature in Kelvin
P : float
Pressure in bar
ref_phase : str
Name of phase that is used for partition coefficient denominator
compobjs : list
List of components as 'Component' objects created with
'component_properties.py'
compname : list
List of proper component names
h2oexists : bool
Boolean variable that describes whether water is one of the components
h2oind : int
Index to water in all component-indexed lists
phases : list
List of phases to consider in calculation
fug_list : list
List of phase-based eos objects for fugacity calculations
hyd_phases : dictionary
Dictionary hydrate phases and corresponding indices in 'self.phases'
nonhyd_phases : list
List of non-hydrates phases where each element is the corresponding index in 'self.phases'
feed : list, array
Total composition (z) of calculation
ref_comp : numpy array
Composition of reference phase
ref_comp : numpy array
Fugacity of reference phase
ref_phases_tried : list
List of reference phases tried during calculation starting empty
K_calc : numpy array
Array of reference phase relative partition coefficients wiht size Nc x Np
x_calc : numpy array
Array of compositions in each phase with size Nc x Np
alpha_calc : numpy array
Array of molar phase fraction of each phase with size Np
theta_calc : numpy array
Array of phase stability of each phase with size Np
Methods
----------
set_feed :
Take a list or array of size Nc and set the
total composition such sum(z) == 1
set_ref_index :
Determine the index within self.phases of the new reference phases
set_phases :
Re-assign phases from a list based argument of phase types
change_ref_phase :
Cycles through possible unused reference phases
main_handler :
Primary method for calculation logic
calc_x :
Calculation of composition for auxiliary variables
calc_K :
Calculation of partition coefficients using output of fugacity calculations
calc_fugacity :
Calculation of fugacity of each component in each phase
find_alphatheta_min :
Calculation that performs minimization of objective function at fixed x and K
make_ideal_K_mat :
Determine initial partition coefficients independent of composition
"""
self.T = T
self.P = P
self.ref_phase = None
self.completed = False
self.monitor = []
self.iter_output = {}
# Check that components exceed 1.
if type(components) is str or len(components) == 1:
raise ValueError("""More than one component is necessary
to run flash algorithm.""")
elif type(components[0]) is str:
# Use list or array of component names to populate a new list of
# component objects
self.compobjs = []
for compname in components:
self.compobjs.append(cp.Component(compname))
elif isinstance(components[0], cp.Component):
self.compobjs = components
else:
raise ValueError("""Component are not properly defined.
Pass a list of strings
or a list of component objects""")
self.compname = []
self.h2oexists = False
self.h2oind = None
for ii, comp in enumerate(self.compobjs):
self.compname.append(comp.compname)
if comp.compname == 'h2o':
self.h2oexists = True
self.h2oind = ii
self.Nc = len(self.compobjs)
# Check that phases exceed 1
if type(phases) is str or len(phases) == 1:
raise ValueError(""""More than one component is necessary
to run flash algorithm.""")
else:
# Populate phase list and make sure it's a valid phase.
self.phases = []
for phase in phases:
if phase.lower() not in self.phases:
phase_found = False
for real_phase, alias in self.phase_menu.items():
if phase.lower() in alias:
self.phases.append(real_phase)
phase_found = True
if not phase_found:
raise ValueError(phase + """
is not a supported phase!! \nConsult
"FlashController.phase_menu" attribute for
\nvalid phases and ssociated call strings.""")
else:
# Perhaps, I should just print warning that eliminates
# duplicate phases.
raise ValueError("""One or more phases are repeated.
Distinct phases are necessary to
run flash algorithm.""")
# Allow option for changing the default eos for any given phase.
# Check to make sure the eos is supported and that the phase being
# modified is a valid phase.
if eos != self.eos_default:
if type(eos) is not dict:
raise TypeError("""
"eos" specifies equation of state to be used with
a specific phase. Pass a dictionary as eos
(e.g., eos={"vapor": "pr"}).""")
else:
self.eos = self.eos_default.copy()
for phase_tmp, eos_tmp in eos.items():
if phase_tmp in self.eos_default.keys():
if eos_tmp in self.eos_menu[phase_tmp]:
self.eos[phase_tmp] = eos_tmp
else:
raise ValueError(
eos_tmp + ' is not a valid eos for '
+ phase_tmp + ' phase.')
else:
for valid_phase, alias in self.phase_menu.items():
phase_found = False
if phase_tmp.lower() in alias:
if eos_tmp in self.eos_menu[valid_phase]:
self.eos[valid_phase] = eos_tmp
phase_found = True
else:
raise ValueError(
eos_tmp + ' is not a valid eos for '
+ valid_phase + ' phase.')
if not phase_found:
raise ValueError(
phase_tmp + ' is not a valid phase.')
else:
self.eos = eos
# Build list for fugacity calculations
fug_list = list()
hyd_phases = dict()
phase_check = list(self.phases)
for ii, phase in enumerate(phase_check):
if phase in ('aqueous', 's1', 's2'):
if self.h2oexists:
if phase == 'aqueous':
aq_obj = aq.HegBromEos(self.compobjs, self.T, self.P)
fug_list.append(aq_obj)
self.ref_phase = phase
else:
hyd_phases[phase] = ii
h_obj = h.HvdwpmEos(self.compobjs, self.T, self.P,
structure=phase)
fug_list.append(h_obj)
else:
self.phases.remove(phase)
elif phase == 'ice':
print('Not currently supported for an ice phase.')
self.phases.remove(phase)
if not hyd_phases:
for hyd_phase, hyd_ind in hyd_phases.items():
hyd_ind += -1
else:
hc_obj = hc.SrkEos(self.compobjs, self.T, self.P)
fug_list.append(hc_obj)
if (self.ref_phase is None) and ('vapor' in self.phases):
self.ref_phase = 'vapor'
elif (self.ref_phase is None) and ('lhc' in self.phases):
self.ref_phase = 'lhc'
self.fug_list = fug_list
self.hyd_phases = hyd_phases
self.nonhyd_phases = [ii for ii in range(len(self.phases))
if ii not in self.hyd_phases.values()]
self.Np = len(self.phases)
self.feed = None
self.ref_phases_tried = []
self.ref_comp = np.zeros([len(self.compobjs)])
self.ref_fug = np.zeros([len(self.compobjs)])
self.set_ref_index()
self.ref_phase_iter = 0
self.K_calc = np.zeros([len(self.compobjs), len(self.phases)])
self.x_calc = np.zeros([len(self.compobjs), len(self.phases)])
self.alpha_calc = np.zeros([len(self.phases)])
self.theta_calc = np.zeros([len(self.phases)])
def set_feed(self, z, setref=True):
"""Utility for setting the feed and reference phase based on feed
Parameters
----------
z : list, tuple, numpy array
Mole fraction of each components
setref : bool
Flag for setting reference phase
"""
self.feed = np.asarray(z) / sum(z)
if setref:
if len(z) != len(self.compobjs):
raise ValueError("""Feed fraction has different dimension than
initial component list!""")
elif self.h2oexists:
if ((z[self.h2oind] > 0.8)
or (('vapor' not in self.phases)
and ('lhc' not in self.phases))):
self.ref_phase = 'aqueous'
else:
if 'vapor' in self.phases:
self.ref_phase = 'vapor'
elif 'lhc' in self.phases:
self.ref_phase = 'lhc'
elif 'vapor' in self.phases:
# This is true for now. In practice, this sometimes needs to be
# lhc, but we will handle that within a different method.
self.ref_phase = 'vapor'
elif 'lhc' in self.phases:
self.ref_phase = 'lhc'
self.ref_phases_tried = []
# TODO: Refactor this to be called at initialization and make all the same checks.
def set_phases(self, phases):
"""Utility to reset phases
Parameters
----------
phases : list, tuple
List of new phases
"""
self.phases = phases
def set_ref_index(self):
"""Utility to set index of reference phases"""
if self.ref_phase not in self.phases:
self.phases.append(self.ref_phase)
self.ref_ind = [ii for ii, phase in enumerate(self.phases)
if phase == self.ref_phase].pop()
# TODO: Make this more robust, it is possibility of breaking!
def change_ref_phase(self):
"""Utility to change reference phase on calculation stall"""
self.ref_phases_tried.append(self.ref_phase)
self.ref_phase_list = [
phase for phase in self.phases
if (phase not in self.ref_phases_tried)
and (phase not in ['s1', 's2'])
]
if self.ref_phase_list == []:
self.ref_phase = self.ref_phases_tried[
np.mod(self.ref_phase_iter,
len(self.ref_phases_tried))
]
self.ref_phase_iter += 1
else:
self.ref_phase_iter = 0
if len(self.ref_phase_list) > 1:
if self.ref_phase == 'vapor':
self.ref_phase = 'lhc'
elif self.ref_phase == 'lhc':
self.ref_phase = 'vapor'
elif self.ref_phase == 'aqueous':
self.ref_phase == 'lhc'
else:
self.ref_phase = self.ref_phase_list[0]
self.set_ref_index()
def main_handler(self, compobjs, z, T, P,
K_init=[], verbose=False,
initialize=True, run_diagnostics=False,
incipient_calc=False, monitor_calc=False,
**kwargs):
"""Primary logical utility for performing flash calculation
Parameters
----------
compobjs : list, tuple
List of components
z : list, tuple, numpy array
Molar composition of each component
T : float
Temperature in Kelvin
P : float
Pressure in bar
K_init : numpy array
Partition coefficient matrix to use at start of calculation
verbose : bool
Flag for printing to screen
initialize : bool
Flag for initializing the calculation using ideal partition coefficients
run_diagnostics : bool
Flag for doing debugging
Returns
----------
values : list
List of calculation output of gibbs energy minimum
values[0] : numpy array
Composition (x) with size Nc x Np
values[1] : numpy array
Molar phase fraction (\alpha) with size Np
values[2] : numpy array
Partition coefficient matrix of each component
in each phase (K) with size Nc x Np
values[3] : int
Number of iterations required for convergence
values[4] : float
Maximum error on any variable from minimization calculation
"""
# z = np.asarray(z)
self.set_feed(z)
self.set_ref_index()
if type(z) != np.ndarray:
z = np.asarray(z)
if verbose:
tstart = time.time()
if monitor_calc:
self.monitor = []
self.iter_output = {}
if initialize or not self.completed:
alpha_0 = np.ones([self.Np]) / self.Np
theta_0 = np.zeros([self.Np])
# TODO: Rewrite so that ideal K doesn't have to be re-calculated!
if not incipient_calc:
if K_init == []:
K_0 = self.make_ideal_K_mat(compobjs, T, P)
else:
# Add more code to allow the specification of a partition coefficient
K_0 = np.asarray(K_init)
K_0 = K_0 / K_0[:, self.ref_ind][:, np.newaxis]
if verbose:
print('K is not the default')
else:
K_0 = self.incipient_calc(T, P)
if monitor_calc:
self.monitor.append([{'alpha': alpha_0,
'theta': theta_0,
'K': K_0,
'step': 0,
'x': np.zeros_like(K_0),
'inner': [],
'error': []}])
alpha_new, theta_new = self.find_alphatheta_min(z, alpha_0,
theta_0, K_0,
monitor_calc=monitor_calc)
x_new = self.calc_x(z, alpha_new, theta_new, K_0, T, P)
fug_new = self.calc_fugacity(T, P, x_new)
x_new = self.calc_x(z, alpha_new, theta_new, K_0, T, P)
K_new = self.calc_K(T, P, x_new)
if monitor_calc:
self.monitor.append([{'alpha': alpha_new,
'theta': theta_new,
'K': K_new,
'step': 0.5,
'x': x_new,
'inner': self.iter_output,
'error': []}])
if run_diagnostics:
print('Initial K:\n', K_0)
print('First iter K:\n', K_new)
print('First x:\n', x_new)
print('First fugacity :\n', fug_new)
print('First alpha:\n', alpha_new)
print('First theta:\n', theta_new)
else:
K_0 = self.K_calc / self.K_calc[:, self.ref_ind][:, np.newaxis]
alpha_0 = np.ones([self.Np]) / self.Np
theta_0 = np.zeros([self.Np])
alpha_new, theta_new = self.find_alphatheta_min(z, alpha_0,
theta_0, K_0,
monitor_calc=monitor_calc)
x_new = self.calc_x(z, alpha_new, theta_new, K_0, T, P)
fug_new = self.calc_fugacity(T, P, x_new)
x_new = self.calc_x(z, alpha_new, theta_new, K_0, T, P)
K_new = self.calc_K(T, P, x_new)
# alpha_new = self.alpha_calc.copy()
# theta_new = self.theta_calc.copy()
# K_new = self.K_calc.copy()
# x_new = self.x_calc.copy()
if monitor_calc:
self.monitor.append([{'alpha': alpha_new,
'theta': theta_new,
'K': K_new,
'step': 0,
'x': x_new,
'inner': self.iter_output,
'error': []}])
error = 1e6
Obj_error = 1e6
TOL = 1e-6
itercount = 0
refphase_itercount = 0
iterlim = 100
alpha_old = alpha_new.copy()
theta_old = theta_new.copy()
x_old = x_new.copy()
K_old = K_new.copy()
while error > TOL and itercount < iterlim:
# Perform newton iteration to update alpha and theta at
# a fixed x and K
self.iter_output = {}
alpha_new, theta_new = self.find_alphatheta_min(z, alpha_old,
theta_old, K_new,
monitor_calc=monitor_calc)
phase_diff = np.linalg.norm(alpha_new - alpha_old) + np.linalg.norm(theta_new - theta_old)
# Perform one iteration of successive substitution to update
# x and K at the new alpha and theta.
x_error = 1e6
x_counter = 0
if (itercount == 0) or (phase_diff > TOL / 10):
x_counter_lim = 1
else:
x_counter_lim = 1
if monitor_calc:
x_iter_out = []
while (x_error > TOL) and (x_counter < x_counter_lim):
x_new = self.calc_x(z, alpha_new, theta_new, K_new, T, P)
K_new = self.calc_K(T, P, x_new)
x_error = np.linalg.norm(x_new - x_old)
x_counter += 1
if monitor_calc:
x_iter_out.append([x_counter, {'x': x_new, 'K': K_new}])
if run_diagnostics:
print('Iter K:\n', K_new)
print('Iter x:\n', x_new)
print('Iter alpha:\n', alpha_new)
print('Iter theta:\n', theta_new)
print('Iter fug:\n:', self.calc_fugacity(T, P, x_new))
print('Iter z:\n:', z)
# Determine error associated new x and K and change in x
# Set iteration error to the maximum of the two.
Obj_error = np.linalg.norm(objective(z, alpha_new,
theta_new, K_new))
error = max(Obj_error, x_error)
if monitor_calc:
self.iter_output['comp'] = x_iter_out
self.monitor.append([{'alpha': alpha_new,
'theta': theta_new,
'K': K_new,
'step': itercount,
'x': x_new,
'inner': self.iter_output,
'error': {'Obj': Obj_error, 'max': error}}])
itercount += 1
refphase_itercount += 1
nan_occur = (np.isnan(x_new).any() or np.isnan(K_new).any()
or np.isnan(alpha_new).any() or np.isnan(theta_new).any())
# if (
# (((refphase_itercount > 25) or (error < TOL))
# and ((alpha_new[self.ref_ind] < 0.001)
# or (theta_new[self.ref_ind] == 1e-10)))
# or nan_occur
# ):
if (
(((refphase_itercount > 25) or (error > TOL))
and (alpha_new[self.ref_ind] < 0.0001))
or nan_occur
):
error = 1e6
# alpha_new = np.ones([self.Np]) / (1 - self.Np)
# alpha_new[self.ref_ind] = 0.0
# theta_new = np.zeros([self.Np])
# theta_new[self.ref_ind] = 2.0
self.change_ref_phase()
refphase_itercount = 0
# TODO change these 3 lines to investigate the effect of changing the reference phase
# K_new = self.make_ideal_K_mat(compobjs, T, P)
# alpha_new = np.ones([self.Np])/self.Np
# theta_new = np.zeros([self.Np])
# K_new = self.calc_K(T, P, x_new)
if verbose:
print('Changed reference phase')
# Set old values using copy
# (NOT direct assignment due to
# Python quirkiness of memory indexing)
alpha_old = alpha_new.copy()
theta_old = theta_new.copy()
x_old = x_new.copy()
K_old = K_new.copy()
# Print a bunch of crap if desired.
if verbose:
print('\nIteration no: ', itercount)
print('alpha = ', alpha_new)
print('theta = ', theta_new)
print('x = \n', x_new)
print('K = \n', K_new)
print('Composition error: ', x_error)
print('objective function error: ', Obj_error)
if verbose:
print('\nElapsed time =', time.time() - tstart, '\n')
self.K_calc = K_new.copy()
self.x_calc = x_new.copy()
self.alpha_calc = alpha_new.copy()
self.theta_calc = theta_new.copy()
self.completed = True
values = [x_new, alpha_new, K_new, itercount, error]
return values
def calc_x(self, z, alpha, theta, K, T, P):
"""Composition of each component in each phases
Parameters
----------
z : list, numpy array
Total composition of each component with size Nc
alpha : list, numpy array
Molar phase fractions with size Np
theta : list, numpy array
Stability of phases with size Np
K : list, numpy array
Partition coefficients for each component
in each phase with size Nc x Np
T : float
Temperature in Kelvin
P : float
Pressure in bar
Returns
----------
x : numpy array
Composition of each component in each phase
at fixed alpha and theta with size Nc x Np
"""
if type(z) != np.ndarray:
z = np.asarray(z)
if type(alpha) != np.ndarray:
alpha = np.asarray(alpha)
if type(theta) != np.ndarray:
theta = np.asarray(theta)
if type(K) != np.ndarray:
K = np.asarray(K)
x_numerator = z[:, np.newaxis]*K*np.exp(theta[np.newaxis, :])
x_denominator = 1 + np.sum(
alpha[np.newaxis, :]*(K*np.exp(theta[np.newaxis, :]) - 1),
axis=1)
x_mat = x_numerator / x_denominator[:, np.newaxis]
fug_mat = self.calc_fugacity(T, P, x_mat)
for hyd_phase, ind in self.hyd_phases.items():
x_mat[:, ind] = self.fug_list[ind].hyd_comp()
x = np.minimum(1, np.abs(x_mat))
x = x / np.sum(x, axis=0)[np.newaxis, :]
return x
def calc_K(self, T, P, x_mat):
"""Partition coefficients of each component in each phase
Parameters
----------
T : float
Temperature in Kelvin
P : float
Pressure in bar
x_mat : numpy array
Composition of each component in each phase
Returns
----------
K : numpy array
Partition coefficient matrix of each component
in each phase at fixed alpha and theta with size Nc x Np
"""
fug_mat = self.calc_fugacity(T, P, x_mat)
K_mat = np.ones_like(x_mat)
for ii, phase in enumerate(self.phases):
if phase != self.ref_phase:
K_mat[:, ii] = (fug_mat[:, self.ref_ind]/fug_mat[:, ii]
* x_mat[:, ii]/x_mat[:, self.ref_ind])
K = np.real(np.abs(K_mat))
return K
def calc_fugacity(self, T, P, x_mat):
"""Fugacity of each component in each phase
Parameters
----------
T : float
Temperature in Kelvin
P : float
Pressure in bar
x_mat : numpy array
Composition of each component in each phase
Returns
----------
fug_out : numpy array
Fugacity matrix of each component in each phase
at fixed alpha and theta with size Nc x Np
"""
fug_out = np.zeros_like(x_mat)
for ii, phase in enumerate(self.phases):
if phase == 'aqueous':
fug_out[:,ii] = self.fug_list[ii].calc(self.compobjs,
T,
P,
x_mat[:,ii])
elif phase == 'vapor' or phase == 'lhc':
fug_out[:,ii] = self.fug_list[ii].calc(self.compobjs,
T,
P,
x_mat[:, ii],
phase=phase)
# Update the reference phase fugacity, which cannot be hydrate.
self.ref_fug = fug_out[:, self.ref_ind]
self.ref_comp = x_mat[:, self.ref_ind]
# Do this separately because we need the reference phase fugacity.
for hyd_phase, ind in self.hyd_phases.items():
fug_out[:, ind] = self.fug_list[ind].calc(self.compobjs,
T,
P,
[],
self.ref_fug)
return fug_out
def find_alphatheta_min(self, z, alpha0, theta0, K, print_iter_info=False, monitor_calc=False):
"""Algorithm for determining objective function minimization at fixed K
Parameters
----------
z : list, numpy array
Molar fraction of each component with size Nc
alpha0 : list, numpy array
Initial molar phase fraction with size Np
theta0 : list, numpy array
Initial molar phase stability with size Np
K : list, numpy array
Partition coefficient matrix with size Nc x Np
print_iter_info : bool
Flag to print minimization progress
Returns
----------
new_values : list
Result of gibbs energy minimization
new_values[0] : numpy array
Molar phase fractions at gibbs energy minimum
at fixed x and K with size Np
new_values[0] : numpy array
Phase stabilities at gibbs energy minimum
at fixed x and K with size Np
"""
if not hasattr(self, 'ref_ind'):
self.ref_ind = 0
# Set iteration parameters
nres = 1e6
ndx = 1e6
TOL = 1e-6
kmax = 100
k = 0
dx = np.zeros([2*self.Np])
if type(z) != np.ndarray:
z = np.asarray(z)
if type(alpha0) != np.ndarray:
alpha0 = np.asarray(alpha0)
if type(theta0) != np.ndarray:
theta0 = np.asarray(theta0)
if type(K) != np.ndarray:
K = np.asarray(K)
# Mask arrays to avoid the reference phase.
alf_mask = np.ones([2*self.Np], dtype=bool)
theta_mask = np.ones([2*self.Np], dtype=bool)
arr_mask = np.ones([self.Np], dtype=bool)
arrdbl_mask = np.ones([2*self.Np], dtype=bool)
mat_mask = np.ones([2*self.Np, 2*self.Np], dtype=bool)
# Populate masked arrays for 4 different types.
# Mask reference phase in alpha array
alf_mask[self.ref_ind] = 0
alf_mask[self.Np:] = 0
# Mask reference phase in theta array
theta_mask[0:self.Np] = 0
theta_mask[self.ref_ind + self.Np] = 0
# Mask reference phase in x array
arr_mask[self.ref_ind] = 0
arrdbl_mask[self.ref_ind] = 0
arrdbl_mask[self.ref_ind + self.Np] = 0
# Mask reference phase rows and columns in jacobian matrix
mat_mask[self.ref_ind, :] = 0
mat_mask[self.ref_ind + self.Np, :] = 0
mat_mask[:, self.ref_ind] = 0
mat_mask[:, self.ref_ind + self.Np] = 0
# Define 'x' as the concatenation of alpha and theta
# x = np.concatenate((alpha0, theta0))
alpha_old = alpha0.copy()
theta_old = theta0.copy()
alpha_new = alpha_old.copy()
theta_new = theta_old.copy()
if monitor_calc:
iter_monitor = []
values_changed = True
# Iterate until converged
while ((nres > TOL) and (ndx > TOL/100) and (k < kmax)) and (values_changed):
if monitor_calc:
iter_monitor.append([k, {'alpha': alpha_new, 'theta': theta_new,
'res': nres, 'delta': ndx}])
# Solve for change in variables using non-reference phases
res = objective(z, alpha_old, theta_old, K)
J = jacobian(z, alpha_old, theta_old, K)
J_mod = J[mat_mask].reshape([2*(self.Np - 1), 2*(self.Np - 1)])
res_mod = res[arrdbl_mask]
# try:
# dx_tmp = -np.linalg.solve(J_mod, res_mod)
# except:
# dx_tmp = -np.matmul(np.linalg.pinv(J_mod), res_mod)
dx_tmp = -np.matmul(np.linalg.pinv(J_mod), res_mod)
# Populate dx for non-reference phases
dx[arrdbl_mask] = dx_tmp
# Determine error
nres = np.linalg.norm(res)
ndx = np.linalg.norm(dx)
# Adjust alpha using a maximum change of
# the larger of 0.5*alpha_i or 0.001.
alpha_new[arr_mask] = np.minimum(1,
np.maximum(0, (
alpha_old[arr_mask]
+ np.sign(dx[alf_mask])
* np.minimum(np.maximum(1e-4, 0.25*alpha_old[arr_mask]),
np.abs(dx[alf_mask])))))
# Limit alpha to exist between 0 and 1 and adjust alpha_{ref_ind}
alpha_new[self.ref_ind] = np.minimum(1,
np.maximum(0,
1 - np.sum(alpha_new[arr_mask])))
alpha_new *= np.sum(alpha_new)**(-1)
# Adjust theta and limit it to a positive value
theta_new[arr_mask] = theta_old[arr_mask] + dx[theta_mask]
theta_new[arr_mask] = np.minimum(1.5, np.maximum(0, theta_new[arr_mask]))
# Use technique of Gupta to enforce that theta_i*alpha_i = 0
# or that theta_i = alpha_i = 1e-10, which will kick one of them
# to zero on the next iteration.
alpha_new[alpha_new < 1e-10] = 0
theta_new[theta_new < 1e-10] = 0
change_ind = (((alpha_new < 1e-10)
& (theta_new == 0))
| ((alpha_new == 0)
& (theta_new < 1e-10))
| ((alpha_new < 1e-10)
& (theta_new < 1e-10)))
alpha_new[change_ind] = 1e-10
theta_new[change_ind] = 1e-10
k += 1
values_changed = (TOL < (np.linalg.norm(alpha_old - alpha_new)
+ np.linalg.norm(theta_old - theta_new)))
alpha_old = alpha_new.copy()
theta_old = theta_new.copy()
if print_iter_info:
print('k=', k)
print('error=', nres)
print('param change=', ndx)
if monitor_calc:
self.iter_output['phase'] = iter_monitor
new_values = [alpha_new, theta_new]
return new_values
# Initialize the partition coefficient matrix based on P, T and components
# Provide the option to specify the feed to predict the appropriate
# reference phase or the option to specify the reference phase explicitly.
def make_ideal_K_mat(self, compobjs, T, P, **kwargs):
"""Ideal partition coefficient initialization routine
Parameters
----------
compobjs : list
List of components
T : float
Temperature in Kelvin
P : float
Pressure in bar
Returns
----------
K_mat_ref : numpy array
Ideal partition coefficients for
each component in each phase with size Nc x Np
"""
if not hasattr(compobjs, '__iter__'):
compobjs = [compobjs]
if kwargs is not None:
if ('z' or 'feed') in kwargs:
try:
self.set_feed(kwargs['z'])
except:
self.set_feed(kwargs['feed'])
elif 'ref_phase' in kwargs:
self.ref_phase = kwargs['ref_phase']
if self.ref_phase not in self.phases:
self.phases.append(self.ref_phase)
self.set_ref_index()
if 'phases' in kwargs:
phase_return = kwargs['phases']
else:
phase_return = self.phases
else:
phase_return = self.phases
K_all_mat = make_ideal_K_allmat(compobjs, T, P)
K_mat_ref = np.zeros([len(compobjs), len(phase_return)])
K_refdict = K_transform[self.ref_phase]
for ii, phase in enumerate(phase_return):
trans_tuple = K_refdict[phase]
if type(trans_tuple) is int:
if trans_tuple == 9:
K_mat_ref[:, ii] = 1
else:
K_mat_ref[:, ii] = K_all_mat[:, trans_tuple]
else:
if type(trans_tuple[0]) is int:
if type(trans_tuple[1]) is int:
if trans_tuple[0] == 9:
K_mat_ref[:, ii] = 1/K_all_mat[:, trans_tuple[1]]
else:
K_mat_ref[:, ii] = (K_all_mat[:, trans_tuple[0]]
/ K_all_mat[:, trans_tuple[1]])
else:
K_mat_ref[:, ii] = (
K_all_mat[:, trans_tuple[0]]
/ (K_all_mat[:, trans_tuple[1][0]]
* K_all_mat[:, trans_tuple[1][1]]))
else:
K_mat_ref[:, ii] = (
(K_all_mat[:, trans_tuple[0][0]]
* K_all_mat[:, trans_tuple[0][1]])
/ K_all_mat[:, trans_tuple[1]])
return K_mat_ref
# TODO fill in documentation here...
def incipient_calc(self, T, P):
if (self.h2oexists) and (len(self.feed) > 2):
z_wf = []
comp_wf = []
wf_comp_map = {}
for ii in range(len(self.feed)):
if ii != self.h2oind:
z_wf.append(self.feed[ii])
comp_wf.append(self.compname[ii])
wf_comp_map.update({ii: len(z_wf) - 1})
z_wf = np.asarray(z_wf) / sum(z_wf)
vlhc_flash = FlashController(comp_wf, phases=['vapor', 'lhc'])
vlhc_output = vlhc_flash.main_handler(vlhc_flash.compobjs, z=z_wf, T=T, P=P)
z_aqv = []
z_aqlhc = []
for ii in range(self.Nc):
if ii == self.h2oind:
z_aqv.append(self.feed[ii])
z_aqlhc.append(self.feed[ii])
else:
z_aqv.append(vlhc_output[0][wf_comp_map[ii], 0] / (1 - self.feed[self.h2oind]))
z_aqlhc.append(vlhc_output[0][wf_comp_map[ii], 1] / (1 - self.feed[self.h2oind]))
if 'vapor' in self.phases:
aqv_flash = FlashController(self.compname, phases=['aqueous', 'vapor'])
aqv_output = aqv_flash.main_handler(aqv_flash.compobjs, z=np.asarray(z_aqv), T=T, P=P)
if 'lhc' in self.phases:
aqlhc_flash = FlashController(self.compname, phases=['aqueous', 'lhc'])
aqlhc_output = aqlhc_flash.main_handler(aqlhc_flash.compobjs, z=np.asarray(z_aqlhc), T=T, P=P)
if 's1' in self.phases:
aqs1_flash = FlashController(self.compname, phases=['aqueous', 's1'])
aqs1_output = aqs1_flash.main_handler(aqs1_flash.compobjs, z=self.feed, T=T, P=P)
if 's2' in self.phases:
aqs2_flash = FlashController(self.compname, phases=['aqueous', 's2'])
aqs2_output = aqs2_flash.main_handler(aqs2_flash.compobjs, z=self.feed, T=T, P=P)
x_tmp = np.zeros([self.Nc, self.Np])
for jj, phase in enumerate(self.phases):
if phase == 'vapor':
x_tmp[:, jj] = aqv_output[0][:, 1]
elif phase == 'lhc':
x_tmp[:, jj] = aqlhc_output[0][:, 1]
elif phase == 'aqueous':
if (vlhc_output[1][0] >= vlhc_output[1][1]) and ('vapor' in self.phases):
x_tmp[:, jj] = aqv_output[0][:, 0]
else:
x_tmp[:, jj] = aqlhc_output[0][:, 0]
elif phase == 's1':
x_tmp[:, jj] = aqs1_output[0][:, 1]
elif phase == 's2':
x_tmp[:, jj] = aqs2_output[0][:, 1]
return x_tmp / x_tmp[:, self.ref_ind][:, np.newaxis]
else:
return self.make_ideal_K_mat(self.compobjs, T, P)
|
harisibrahimkv/django
|
refs/heads/master
|
django/conf/locale/pt/formats.py
|
65
|
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \d\e F \d\e Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'j \d\e F \d\e Y à\s H:i'
YEAR_MONTH_FORMAT = r'F \d\e Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', # '2006-10-25', '25/10/2006', '25/10/06'
# '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006'
# '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
bt3gl/Project-Euler
|
refs/heads/master
|
016-power_digit_sum.py
|
2
|
#!/usr/bin/python3
# mari von steinkirch @2013
# steinkirch at gmail
def power_digit_sum(n):
number = str(2**n)
sum_res = 0
for i in number:
sum_res += int(i)
return sum_res
def test_():
assert(power_digit_sum(15) == 26)
print(power_digit_sum(1000))
print('Tests Passed!')
if __name__ == '__main__':
test_()
|
campbe13/openhatch
|
refs/heads/master
|
vendor/packages/zope.interface/src/zope/interface/common/interfaces.py
|
22
|
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Interfaces for standard python exceptions
"""
from zope.interface import Interface
from zope.interface import classImplements
class IException(Interface): pass
class IStandardError(IException): pass
class IWarning(IException): pass
class ISyntaxError(IStandardError): pass
class ILookupError(IStandardError): pass
class IValueError(IStandardError): pass
class IRuntimeError(IStandardError): pass
class IArithmeticError(IStandardError): pass
class IAssertionError(IStandardError): pass
class IAttributeError(IStandardError): pass
class IDeprecationWarning(IWarning): pass
class IEOFError(IStandardError): pass
class IEnvironmentError(IStandardError): pass
class IFloatingPointError(IArithmeticError): pass
class IIOError(IEnvironmentError): pass
class IImportError(IStandardError): pass
class IIndentationError(ISyntaxError): pass
class IIndexError(ILookupError): pass
class IKeyError(ILookupError): pass
class IKeyboardInterrupt(IStandardError): pass
class IMemoryError(IStandardError): pass
class INameError(IStandardError): pass
class INotImplementedError(IRuntimeError): pass
class IOSError(IEnvironmentError): pass
class IOverflowError(IArithmeticError): pass
class IOverflowWarning(IWarning): pass
class IReferenceError(IStandardError): pass
class IRuntimeWarning(IWarning): pass
class IStopIteration(IException): pass
class ISyntaxWarning(IWarning): pass
class ISystemError(IStandardError): pass
class ISystemExit(IException): pass
class ITabError(IIndentationError): pass
class ITypeError(IStandardError): pass
class IUnboundLocalError(INameError): pass
class IUnicodeError(IValueError): pass
class IUserWarning(IWarning): pass
class IZeroDivisionError(IArithmeticError): pass
classImplements(ArithmeticError, IArithmeticError)
classImplements(AssertionError, IAssertionError)
classImplements(AttributeError, IAttributeError)
classImplements(DeprecationWarning, IDeprecationWarning)
classImplements(EnvironmentError, IEnvironmentError)
classImplements(EOFError, IEOFError)
classImplements(Exception, IException)
classImplements(FloatingPointError, IFloatingPointError)
classImplements(ImportError, IImportError)
classImplements(IndentationError, IIndentationError)
classImplements(IndexError, IIndexError)
classImplements(IOError, IIOError)
classImplements(KeyboardInterrupt, IKeyboardInterrupt)
classImplements(KeyError, IKeyError)
classImplements(LookupError, ILookupError)
classImplements(MemoryError, IMemoryError)
classImplements(NameError, INameError)
classImplements(NotImplementedError, INotImplementedError)
classImplements(OSError, IOSError)
classImplements(OverflowError, IOverflowError)
try:
classImplements(OverflowWarning, IOverflowWarning)
except NameError:
pass # OverflowWarning was removed in Python 2.5
classImplements(ReferenceError, IReferenceError)
classImplements(RuntimeError, IRuntimeError)
classImplements(RuntimeWarning, IRuntimeWarning)
try:
classImplements(StandardError, IStandardError)
except NameError:
pass # StandardError does not exist in Python 3
classImplements(StopIteration, IStopIteration)
classImplements(SyntaxError, ISyntaxError)
classImplements(SyntaxWarning, ISyntaxWarning)
classImplements(SystemError, ISystemError)
classImplements(SystemExit, ISystemExit)
classImplements(TabError, ITabError)
classImplements(TypeError, ITypeError)
classImplements(UnboundLocalError, IUnboundLocalError)
classImplements(UnicodeError, IUnicodeError)
classImplements(UserWarning, IUserWarning)
classImplements(ValueError, IValueError)
classImplements(Warning, IWarning)
classImplements(ZeroDivisionError, IZeroDivisionError)
|
adammenges/statsmodels
|
refs/heads/master
|
statsmodels/sandbox/tsa/diffusion.py
|
31
|
'''getting started with diffusions, continuous time stochastic processes
Author: josef-pktd
License: BSD
References
----------
An Algorithmic Introduction to Numerical Simulation of Stochastic Differential
Equations
Author(s): Desmond J. Higham
Source: SIAM Review, Vol. 43, No. 3 (Sep., 2001), pp. 525-546
Published by: Society for Industrial and Applied Mathematics
Stable URL: http://www.jstor.org/stable/3649798
http://www.sitmo.com/ especially the formula collection
Notes
-----
OU process: use same trick for ARMA with constant (non-zero mean) and drift
some of the processes have easy multivariate extensions
*Open Issues*
include xzero in returned sample or not? currently not
*TODOS*
* Milstein from Higham paper, for which processes does it apply
* Maximum Likelihood estimation
* more statistical properties (useful for tests)
* helper functions for display and MonteCarlo summaries (also for testing/checking)
* more processes for the menagerie (e.g. from empirical papers)
* characteristic functions
* transformations, non-linear e.g. log
* special estimators, e.g. Ait Sahalia, empirical characteristic functions
* fft examples
* check naming of methods, "simulate", "sample", "simexact", ... ?
stochastic volatility models: estimation unclear
finance applications ? option pricing, interest rate models
'''
from __future__ import print_function
import numpy as np
from scipy import stats, signal
import matplotlib.pyplot as plt
#np.random.seed(987656789)
class Diffusion(object):
'''Wiener Process, Brownian Motion with mu=0 and sigma=1
'''
def __init__(self):
pass
def simulateW(self, nobs=100, T=1, dt=None, nrepl=1):
'''generate sample of Wiener Process
'''
dt = T*1.0/nobs
t = np.linspace(dt, 1, nobs)
dW = np.sqrt(dt)*np.random.normal(size=(nrepl, nobs))
W = np.cumsum(dW,1)
self.dW = dW
return W, t
def expectedsim(self, func, nobs=100, T=1, dt=None, nrepl=1):
'''get expectation of a function of a Wiener Process by simulation
initially test example from
'''
W, t = self.simulateW(nobs=nobs, T=T, dt=dt, nrepl=nrepl)
U = func(t, W)
Umean = U.mean(0)
return U, Umean, t
class AffineDiffusion(Diffusion):
'''
differential equation:
:math::
dx_t = f(t,x)dt + \sigma(t,x)dW_t
integral:
:math::
x_T = x_0 + \\int_{0}^{T}f(t,S)dt + \\int_0^T \\sigma(t,S)dW_t
TODO: check definition, affine, what about jump diffusion?
'''
def __init__(self):
pass
def sim(self, nobs=100, T=1, dt=None, nrepl=1):
# this doesn't look correct if drift or sig depend on x
# see arithmetic BM
W, t = self.simulateW(nobs=nobs, T=T, dt=dt, nrepl=nrepl)
dx = self._drift() + self._sig() * W
x = np.cumsum(dx,1)
xmean = x.mean(0)
return x, xmean, t
def simEM(self, xzero=None, nobs=100, T=1, dt=None, nrepl=1, Tratio=4):
'''
from Higham 2001
TODO: reverse parameterization to start with final nobs and DT
TODO: check if I can skip the loop using my way from exactprocess
problem might be Winc (reshape into 3d and sum)
TODO: (later) check memory efficiency for large simulations
'''
#TODO: reverse parameterization to start with final nobs and DT
nobs = nobs * Tratio # simple way to change parameter
# maybe wrong parameterization,
# drift too large, variance too small ? which dt/Dt
# _drift, _sig independent of dt is wrong
if xzero is None:
xzero = self.xzero
if dt is None:
dt = T*1.0/nobs
W, t = self.simulateW(nobs=nobs, T=T, dt=dt, nrepl=nrepl)
dW = self.dW
t = np.linspace(dt, 1, nobs)
Dt = Tratio*dt;
L = nobs/Tratio; # L EM steps of size Dt = R*dt
Xem = np.zeros((nrepl,L)); # preallocate for efficiency
Xtemp = xzero
Xem[:,0] = xzero
for j in np.arange(1,L):
#Winc = np.sum(dW[:,Tratio*(j-1)+1:Tratio*j],1)
Winc = np.sum(dW[:,np.arange(Tratio*(j-1)+1,Tratio*j)],1)
#Xtemp = Xtemp + Dt*lamda*Xtemp + mu*Xtemp*Winc;
Xtemp = Xtemp + self._drift(x=Xtemp) + self._sig(x=Xtemp) * Winc
#Dt*lamda*Xtemp + mu*Xtemp*Winc;
Xem[:,j] = Xtemp
return Xem
'''
R = 4; Dt = R*dt; L = N/R; % L EM steps of size Dt = R*dt
Xem = zeros(1,L); % preallocate for efficiency
Xtemp = Xzero;
for j = 1:L
Winc = sum(dW(R*(j-1)+1:R*j));
Xtemp = Xtemp + Dt*lambda*Xtemp + mu*Xtemp*Winc;
Xem(j) = Xtemp;
end
'''
class ExactDiffusion(AffineDiffusion):
'''Diffusion that has an exact integral representation
this is currently mainly for geometric, log processes
'''
def __init__(self):
pass
def exactprocess(self, xzero, nobs, ddt=1., nrepl=2):
'''ddt : discrete delta t
should be the same as an AR(1)
not tested yet
'''
t = np.linspace(ddt, nobs*ddt, nobs)
#expnt = np.exp(-self.lambd * t)
expddt = np.exp(-self.lambd * ddt)
normrvs = np.random.normal(size=(nrepl,nobs))
#do I need lfilter here AR(1) ? if mean reverting lag-coeff<1
#lfilter doesn't handle 2d arrays, it does?
inc = self._exactconst(expddt) + self._exactstd(expddt) * normrvs
return signal.lfilter([1.], [1.,-expddt], inc)
def exactdist(self, xzero, t):
expnt = np.exp(-self.lambd * t)
meant = xzero * expnt + self._exactconst(expnt)
stdt = self._exactstd(expnt)
return stats.norm(loc=meant, scale=stdt)
class ArithmeticBrownian(AffineDiffusion):
'''
:math::
dx_t &= \\mu dt + \\sigma dW_t
'''
def __init__(self, xzero, mu, sigma):
self.xzero = xzero
self.mu = mu
self.sigma = sigma
def _drift(self, *args, **kwds):
return self.mu
def _sig(self, *args, **kwds):
return self.sigma
def exactprocess(self, nobs, xzero=None, ddt=1., nrepl=2):
'''ddt : discrete delta t
not tested yet
'''
if xzero is None:
xzero = self.xzero
t = np.linspace(ddt, nobs*ddt, nobs)
normrvs = np.random.normal(size=(nrepl,nobs))
inc = self._drift + self._sigma * np.sqrt(ddt) * normrvs
#return signal.lfilter([1.], [1.,-1], inc)
return xzero + np.cumsum(inc,1)
def exactdist(self, xzero, t):
expnt = np.exp(-self.lambd * t)
meant = self._drift * t
stdt = self._sigma * np.sqrt(t)
return stats.norm(loc=meant, scale=stdt)
class GeometricBrownian(AffineDiffusion):
'''Geometric Brownian Motion
:math::
dx_t &= \\mu x_t dt + \\sigma x_t dW_t
$x_t $ stochastic process of Geometric Brownian motion,
$\mu $ is the drift,
$\sigma $ is the Volatility,
$W$ is the Wiener process (Brownian motion).
'''
def __init__(self, xzero, mu, sigma):
self.xzero = xzero
self.mu = mu
self.sigma = sigma
def _drift(self, *args, **kwds):
x = kwds['x']
return self.mu * x
def _sig(self, *args, **kwds):
x = kwds['x']
return self.sigma * x
class OUprocess(AffineDiffusion):
'''Ornstein-Uhlenbeck
:math::
dx_t&=\\lambda(\\mu - x_t)dt+\\sigma dW_t
mean reverting process
TODO: move exact higher up in class hierarchy
'''
def __init__(self, xzero, mu, lambd, sigma):
self.xzero = xzero
self.lambd = lambd
self.mu = mu
self.sigma = sigma
def _drift(self, *args, **kwds):
x = kwds['x']
return self.lambd * (self.mu - x)
def _sig(self, *args, **kwds):
x = kwds['x']
return self.sigma * x
def exact(self, xzero, t, normrvs):
#TODO: aggregate over time for process with observations for all t
# i.e. exact conditional distribution for discrete time increment
# -> exactprocess
#TODO: for single t, return stats.norm -> exactdist
expnt = np.exp(-self.lambd * t)
return (xzero * expnt + self.mu * (1-expnt) +
self.sigma * np.sqrt((1-expnt*expnt)/2./self.lambd) * normrvs)
def exactprocess(self, xzero, nobs, ddt=1., nrepl=2):
'''ddt : discrete delta t
should be the same as an AR(1)
not tested yet
# after writing this I saw the same use of lfilter in sitmo
'''
t = np.linspace(ddt, nobs*ddt, nobs)
expnt = np.exp(-self.lambd * t)
expddt = np.exp(-self.lambd * ddt)
normrvs = np.random.normal(size=(nrepl,nobs))
#do I need lfilter here AR(1) ? lfilter doesn't handle 2d arrays, it does?
from scipy import signal
#xzero * expnt
inc = ( self.mu * (1-expddt) +
self.sigma * np.sqrt((1-expddt*expddt)/2./self.lambd) * normrvs )
return signal.lfilter([1.], [1.,-expddt], inc)
def exactdist(self, xzero, t):
#TODO: aggregate over time for process with observations for all t
#TODO: for single t, return stats.norm
expnt = np.exp(-self.lambd * t)
meant = xzero * expnt + self.mu * (1-expnt)
stdt = self.sigma * np.sqrt((1-expnt*expnt)/2./self.lambd)
from scipy import stats
return stats.norm(loc=meant, scale=stdt)
def fitls(self, data, dt):
'''assumes data is 1d, univariate time series
formula from sitmo
'''
# brute force, no parameter estimation errors
nobs = len(data)-1
exog = np.column_stack((np.ones(nobs), data[:-1]))
parest, res, rank, sing = np.linalg.lstsq(exog, data[1:])
const, slope = parest
errvar = res/(nobs-2.)
lambd = -np.log(slope)/dt
sigma = np.sqrt(-errvar * 2.*np.log(slope)/ (1-slope**2)/dt)
mu = const / (1-slope)
return mu, lambd, sigma
class SchwartzOne(ExactDiffusion):
'''the Schwartz type 1 stochastic process
:math::
dx_t = \\kappa (\\mu - \\ln x_t) x_t dt + \\sigma x_tdW \\
The Schwartz type 1 process is a log of the Ornstein-Uhlenbeck stochastic
process.
'''
def __init__(self, xzero, mu, kappa, sigma):
self.xzero = xzero
self.mu = mu
self.kappa = kappa
self.lambd = kappa #alias until I fix exact
self.sigma = sigma
def _exactconst(self, expnt):
return (1-expnt) * (self.mu - self.sigma**2 / 2. /self.kappa)
def _exactstd(self, expnt):
return self.sigma * np.sqrt((1-expnt*expnt)/2./self.kappa)
def exactprocess(self, xzero, nobs, ddt=1., nrepl=2):
'''uses exact solution for log of process
'''
lnxzero = np.log(xzero)
lnx = super(self.__class__, self).exactprocess(xzero, nobs, ddt=ddt, nrepl=nrepl)
return np.exp(lnx)
def exactdist(self, xzero, t):
expnt = np.exp(-self.lambd * t)
#TODO: check this is still wrong, just guessing
meant = np.log(xzero) * expnt + self._exactconst(expnt)
stdt = self._exactstd(expnt)
return stats.lognorm(loc=meant, scale=stdt)
def fitls(self, data, dt):
'''assumes data is 1d, univariate time series
formula from sitmo
'''
# brute force, no parameter estimation errors
nobs = len(data)-1
exog = np.column_stack((np.ones(nobs),np.log(data[:-1])))
parest, res, rank, sing = np.linalg.lstsq(exog, np.log(data[1:]))
const, slope = parest
errvar = res/(nobs-2.) #check denominator estimate, of sigma too low
kappa = -np.log(slope)/dt
sigma = np.sqrt(errvar * kappa / (1-np.exp(-2*kappa*dt)))
mu = const / (1-np.exp(-kappa*dt)) + sigma**2/2./kappa
if np.shape(mu)== (1,): mu = mu[0] # how to remove scalar array ?
if np.shape(sigma)== (1,): sigma = sigma[0]
#mu, kappa are good, sigma too small
return mu, kappa, sigma
class BrownianBridge(object):
def __init__(self):
pass
def simulate(self, x0, x1, nobs, nrepl=1, ddt=1., sigma=1.):
nobs=nobs+1
dt = ddt*1./nobs
t = np.linspace(dt, ddt-dt, nobs)
t = np.linspace(dt, ddt, nobs)
wm = [t/ddt, 1-t/ddt]
#wmi = wm[1]
#wm1 = x1*wm[0]
wmi = 1-dt/(ddt-t)
wm1 = x1*(dt/(ddt-t))
su = sigma* np.sqrt(t*(1-t)/ddt)
s = sigma* np.sqrt(dt*(ddt-t-dt)/(ddt-t))
x = np.zeros((nrepl, nobs))
x[:,0] = x0
rvs = s*np.random.normal(size=(nrepl,nobs))
for i in range(1,nobs):
x[:,i] = x[:,i-1]*wmi[i] + wm1[i] + rvs[:,i]
return x, t, su
class CompoundPoisson(object):
'''nobs iid compound poisson distributions, not a process in time
'''
def __init__(self, lambd, randfn=np.random.normal):
if len(lambd) != len(randfn):
raise ValueError('lambd and randfn need to have the same number of elements')
self.nobj = len(lambd)
self.randfn = randfn
self.lambd = np.asarray(lambd)
def simulate(self, nobs, nrepl=1):
nobj = self.nobj
x = np.zeros((nrepl, nobs, nobj))
N = np.random.poisson(self.lambd[None,None,:], size=(nrepl,nobs,nobj))
for io in range(nobj):
randfnc = self.randfn[io]
nc = N[:,:,io]
#print nrepl,nobs,nc
#xio = randfnc(size=(nrepl,nobs,np.max(nc))).cumsum(-1)[np.arange(nrepl)[:,None],np.arange(nobs),nc-1]
rvs = randfnc(size=(nrepl,nobs,np.max(nc)))
print('rvs.sum()', rvs.sum(), rvs.shape)
xio = rvs.cumsum(-1)[np.arange(nrepl)[:,None],np.arange(nobs),nc-1]
#print xio.shape
x[:,:,io] = xio
x[N==0] = 0
return x, N
'''
randn('state',100) % set the state of randn
T = 1; N = 500; dt = T/N; t = [dt:dt:1];
M = 1000; % M paths simultaneously
dW = sqrt(dt)*randn(M,N); % increments
W = cumsum(dW,2); % cumulative sum
U = exp(repmat(t,[M 1]) + 0.5*W);
Umean = mean(U);
plot([0,t],[1,Umean],'b-'), hold on % plot mean over M paths
plot([0,t],[ones(5,1),U(1:5,:)],'r--'), hold off % plot 5 individual paths
xlabel('t','FontSize',16)
ylabel('U(t)','FontSize',16,'Rotation',0,'HorizontalAlignment','right')
legend('mean of 1000 paths','5 individual paths',2)
averr = norm((Umean - exp(9*t/8)),'inf') % sample error
'''
if __name__ == '__main__':
doplot = 1
nrepl = 1000
examples = []#['all']
if 'all' in examples:
w = Diffusion()
# Wiener Process
# ^^^^^^^^^^^^^^
ws = w.simulateW(1000, nrepl=nrepl)
if doplot:
plt.figure()
tmp = plt.plot(ws[0].T)
tmp = plt.plot(ws[0].mean(0), linewidth=2)
plt.title('Standard Brownian Motion (Wiener Process)')
func = lambda t, W: np.exp(t + 0.5*W)
us = w.expectedsim(func, nobs=500, nrepl=nrepl)
if doplot:
plt.figure()
tmp = plt.plot(us[0].T)
tmp = plt.plot(us[1], linewidth=2)
plt.title('Brownian Motion - exp')
#plt.show()
averr = np.linalg.norm(us[1] - np.exp(9*us[2]/8.), np.inf)
print(averr)
#print us[1][:10]
#print np.exp(9.*us[2][:10]/8.)
# Geometric Brownian
# ^^^^^^^^^^^^^^^^^^
gb = GeometricBrownian(xzero=1., mu=0.01, sigma=0.5)
gbs = gb.simEM(nobs=100, nrepl=100)
if doplot:
plt.figure()
tmp = plt.plot(gbs.T)
tmp = plt.plot(gbs.mean(0), linewidth=2)
plt.title('Geometric Brownian')
plt.figure()
tmp = plt.plot(np.log(gbs).T)
tmp = plt.plot(np.log(gbs.mean(0)), linewidth=2)
plt.title('Geometric Brownian - log-transformed')
ab = ArithmeticBrownian(xzero=1, mu=0.05, sigma=1)
abs = ab.simEM(nobs=100, nrepl=100)
if doplot:
plt.figure()
tmp = plt.plot(abs.T)
tmp = plt.plot(abs.mean(0), linewidth=2)
plt.title('Arithmetic Brownian')
# Ornstein-Uhlenbeck
# ^^^^^^^^^^^^^^^^^^
ou = OUprocess(xzero=2, mu=1, lambd=0.5, sigma=0.1)
ous = ou.simEM()
oue = ou.exact(1, 1, np.random.normal(size=(5,10)))
ou.exact(0, np.linspace(0,10,10/0.1), 0)
ou.exactprocess(0,10)
print(ou.exactprocess(0,10, ddt=0.1,nrepl=10).mean(0))
#the following looks good, approaches mu
oues = ou.exactprocess(0,100, ddt=0.1,nrepl=100)
if doplot:
plt.figure()
tmp = plt.plot(oues.T)
tmp = plt.plot(oues.mean(0), linewidth=2)
plt.title('Ornstein-Uhlenbeck')
# SchwartsOne
# ^^^^^^^^^^^
so = SchwartzOne(xzero=0, mu=1, kappa=0.5, sigma=0.1)
sos = so.exactprocess(0,50, ddt=0.1,nrepl=100)
print(sos.mean(0))
print(np.log(sos.mean(0)))
doplot = 1
if doplot:
plt.figure()
tmp = plt.plot(sos.T)
tmp = plt.plot(sos.mean(0), linewidth=2)
plt.title('Schwartz One')
print(so.fitls(sos[0,:],dt=0.1))
sos2 = so.exactprocess(0,500, ddt=0.1,nrepl=5)
print('true: mu=1, kappa=0.5, sigma=0.1')
for i in range(5):
print(so.fitls(sos2[i],dt=0.1))
# Brownian Bridge
# ^^^^^^^^^^^^^^^
bb = BrownianBridge()
#bbs = bb.sample(x0, x1, nobs, nrepl=1, ddt=1., sigma=1.)
bbs, t, wm = bb.simulate(0, 0.5, 99, nrepl=500, ddt=1., sigma=0.1)
if doplot:
plt.figure()
tmp = plt.plot(bbs.T)
tmp = plt.plot(bbs.mean(0), linewidth=2)
plt.title('Brownian Bridge')
plt.figure()
plt.plot(wm,'r', label='theoretical')
plt.plot(bbs.std(0), label='simulated')
plt.title('Brownian Bridge - Variance')
plt.legend()
# Compound Poisson
# ^^^^^^^^^^^^^^^^
cp = CompoundPoisson([1,1], [np.random.normal,np.random.normal])
cps = cp.simulate(nobs=20000,nrepl=3)
print(cps[0].sum(-1).sum(-1))
print(cps[0].sum())
print(cps[0].mean(-1).mean(-1))
print(cps[0].mean())
print(cps[1].size)
print(cps[1].sum())
#Note Y = sum^{N} X is compound poisson of iid x, then
#E(Y) = E(N)*E(X) eg. eq. (6.37) page 385 in http://ee.stanford.edu/~gray/sp.html
#plt.show()
|
Kunalpod/codewars
|
refs/heads/master
|
build_a_pile_of_cubes.py
|
1
|
#Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Build a pile of Cubes
#Problem level: 6 kyu
def find_nb(m):
i = int((2*(m**0.5))**0.5)
if ((i**2)*((i+1)**2))//4 == m:
return i
return -1
|
hastalafiesta/Samsung_STE_Kernel
|
refs/heads/master
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
xzturn/tensorflow
|
refs/heads/master
|
tensorflow/python/eager/device_placement_test.py
|
3
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for device placement."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import remote
from tensorflow.python.eager import test
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
class SoftDevicePlacementTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(SoftDevicePlacementTest, self).setUp()
context._reset_context()
config.set_soft_device_placement(enabled=True)
context.context().log_device_placement = True
@test_util.run_gpu_only
def testDefaultPlacement(self):
a = constant_op.constant(1)
b = constant_op.constant(2)
c = a + b
with ops.device('CPU'):
d = a + b
self.assertIn('GPU', c.device)
self.assertIn('CPU', d.device)
@test_util.run_gpu_only
def testUnsupportedDevice(self):
a = constant_op.constant(1)
b = constant_op.constant(2)
s = constant_op.constant(list('hello world'))
with ops.device('GPU:0'):
c = a + b
t = s[a]
self.assertIn('GPU:0', c.device)
self.assertIn('CPU', t.device)
@test_util.run_gpu_only
def testUnknownDevice(self):
a = constant_op.constant(1)
b = constant_op.constant(2)
with ops.device('GPU:42'):
c = a + b
self.assertIn('GPU:0', c.device)
def testNoGpu(self):
if test_util.is_gpu_available():
# CPU only test.
return
a = constant_op.constant(1)
b = constant_op.constant(2)
c = a + b
with ops.device('GPU'):
d = a + b
self.assertIn('CPU', c.device)
self.assertIn('CPU', d.device)
@test_util.run_gpu_only
def testNestedDeviceScope(self):
a = constant_op.constant(1)
b = constant_op.constant(2)
with ops.device('CPU:0'):
with ops.device('GPU:42'):
c = a + b
# We don't support nested device placement right now.
self.assertIn('GPU:0', c.device)
@parameterized.named_parameters(('float', 1.0, None),
('int32', [1], dtypes.int32),
('string', ['a'], None))
def testSoftPlacedCPUConstant(self, value, dtype):
with ops.device('GPU:0'):
a = constant_op.constant(value, dtype=dtype)
self.assertIn('CPU:0', a.device)
self.assertIn('CPU:0', a.backing_device)
class HardDevicePlacementTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(HardDevicePlacementTest, self).setUp()
context._reset_context()
config.set_soft_device_placement(enabled=False)
context.context().log_device_placement = True
self.assertEqual(config.get_soft_device_placement(), False)
self.assertEqual(context.context().soft_device_placement, False)
@test_util.run_gpu_only
def testIdentityCanCopy(self):
config.set_device_policy('explicit')
with ops.device('CPU:0'):
x = constant_op.constant(1.0)
self.assertIn('CPU:0', x.device)
self.assertIn('CPU:0', x.backing_device)
with ops.device('GPU:0'):
y = array_ops.identity(x)
self.assertIn('GPU:0', y.device)
self.assertIn('GPU:0', y.backing_device)
@parameterized.named_parameters(('float_cpu0', 'CPU:0', 1.0, None),
('int32_cpu0', 'CPU:0', [1], dtypes.int32),
('string_cpu0', 'CPU:0', ['a'], None),
('float_gpu0', 'GPU:0', 1.0, None),
('int32_gpu0', 'GPU:0', [1], dtypes.int32),
('string_gpu0', 'GPU:0', ['a'], None),
('float_gpu99', 'GPU:99', 1.0, None),
('int32_gpu99', 'GPU:99', [1], dtypes.int32),
('string_gpu99', 'GPU:99', ['a'], None))
def testHardPlacedCPUConstant(self, device, value, dtype):
with ops.device(device):
a = constant_op.constant(value, dtype=dtype)
self.assertIn('CPU:0', a.device)
self.assertIn('CPU:0', a.backing_device)
class ClusterPlacementTest(test.TestCase):
def setUp(self):
super(ClusterPlacementTest, self).setUp()
context._reset_context()
config.set_soft_device_placement(enabled=True)
context.context().log_device_placement = True
workers, _ = test_util.create_local_cluster(2, 0)
remote.connect_to_remote_host([workers[0].target, workers[1].target])
def testNotFullySpecifiedTask(self):
a = constant_op.constant(1)
b = constant_op.constant(2)
with ops.device('/job:worker'):
c = a + b
self.assertIn('/job:worker/replica:0/task:0', c.device)
def testRemoteUnknownDevice(self):
a = constant_op.constant(1)
b = constant_op.constant(2)
# Right now we don't support soft device place on remote worker.
with self.assertRaises(errors.InvalidArgumentError) as cm:
with ops.device('/job:worker/replica:0/task:0/device:GPU:42'):
c = a + b
del c
self.assertIn('unknown device', cm.exception.message)
def testUnknownDeviceInFunctionReturnUnknowDevice(self):
@def_function.function
def f():
with ops.device('GPU:42'):
return constant_op.constant(1) + constant_op.constant(2)
gpus = config.list_physical_devices('GPU')
if not gpus:
self.assertIn('CPU:0', f().device)
else:
self.assertIn('GPU:0', f().device)
def testUnknownDeviceInFunction(self):
@def_function.function
def f():
with ops.device('GPU:42'):
a = constant_op.constant(1) + constant_op.constant(2)
return a + constant_op.constant(2)
gpus = config.list_physical_devices('GPU')
if not gpus:
self.assertIn('CPU:0', f().device)
else:
self.assertIn('GPU:0', f().device)
if __name__ == '__main__':
test.main()
|
zentralopensource/zentral
|
refs/heads/main
|
zentral/core/actions/backends/base.py
|
1
|
from django import forms
from zentral.conf import contact_groups
class BaseActionForm(forms.Form):
def __init__(self, *args, **kwargs):
self.config_d = kwargs.pop("config_d")
super(BaseActionForm, self).__init__(*args, **kwargs)
def get_action_config_d(self):
return {k: v for k, v in self.cleaned_data.items() if v}
class BaseAction(object):
action_form_class = BaseActionForm
probe_config_template_name = "core/probes/_action_probe_config.html"
def __init__(self, config_d):
self.name = config_d.pop("action_name")
self.config_d = config_d
def can_be_updated(self):
return self.action_form_class != BaseActionForm
def get_action_form(self, action_config_d=None):
args = []
kwargs = {"config_d": self.config_d}
if action_config_d is not None:
args.append(action_config_d)
return self.action_form_class(*args, **kwargs)
@staticmethod
def get_probe_context_action_config_d(action_config_d):
"""prepare a dict for the display of the action_config_d in the probe view"""
pacd = {}
for key, val in action_config_d.items():
if not val:
continue
if isinstance(val, list):
val = ', '.join([str(v) for v in val])
pacd[key.replace("_", " ")] = val
return pacd
class ContactGroupForm(BaseActionForm):
groups = forms.MultipleChoiceField(choices=[], required=True,
help_text="Select one or more configured contact groups")
def __init__(self, *args, **kwargs):
super(ContactGroupForm, self).__init__(*args, **kwargs)
self.fields['groups'].choices = [(g, g) for g in contact_groups]
|
blackPantherOS/packagemanagement
|
refs/heads/master
|
smartpm/smart/interfaces/qt4/mirrors.py
|
1
|
#-*- coding: utf-8 -*-
#
# Copyright (c) 2004 Conectiva, Inc.
#
# Written by Anders F Bjorklund <[email protected]>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart.interfaces.qt4 import getPixmap, centerWindow
from smart import *
from PyQt4 import QtGui as QtGui
from PyQt4 import QtCore as QtCore
class TextListViewItem(QtGui.QTreeWidgetItem):
def __init__(self, parent):
QtGui.QTreeWidgetItem.__init__(self, parent)
self._text = {}
self._oldtext = {}
def setText(self, col, text):
QtGui.QTreeWidgetItem.setText(self, col, text)
if col in self._text:
self._oldtext[col] = self._text[col]
self._text[col] = text
def oldtext(self, col):
return self._oldtext.get(col, None)
class QtMirrors(object):
def __init__(self, parent=None):
self._window = QtGui.QDialog(None)
self._window.setWindowIcon(QtGui.QIcon(getPixmap("smart")))
self._window.setWindowTitle(_("Mirrors"))
self._window.setModal(True)
self._window.setMinimumSize(600, 400)
layout = QtGui.QVBoxLayout(self._window)
#layout.setResizeMode(QtGui.QLayout.FreeResize)
vbox = QtGui.QWidget(self._window)
QtGui.QVBoxLayout(vbox)
vbox.layout().setMargin(10)
vbox.layout().setSpacing(10)
vbox.show()
layout.addWidget(vbox)
self._treeview = QtGui.QTreeWidget(vbox)
self._treeview.setHeaderHidden(True)
self._treeview.show()
vbox.layout().addWidget(self._treeview)
#self._treeview.addColumn(_("Mirror"))
self._treeview.setHeaderLabels([_("Mirror")])
QtCore.QObject.connect(self._treeview, QtCore.SIGNAL("itemChanged(QTreeWidgetItem *, int)"), self.itemChanged)
QtCore.QObject.connect(self._treeview, QtCore.SIGNAL("itemSelectionChanged()"), self.selectionChanged)
bbox = QtGui.QWidget(vbox)
QtGui.QHBoxLayout(bbox)
bbox.layout().setSpacing(10)
bbox.layout().addStretch(1)
bbox.show()
vbox.layout().addWidget(bbox)
button = QtGui.QPushButton(_("New"), bbox)
button.setEnabled(True)
button.setIcon(QtGui.QIcon(getPixmap("crystal-add")))
button.show()
QtCore.QObject.connect(button, QtCore.SIGNAL("clicked()"), self.newMirror)
self._newmirror = button
bbox.layout().addWidget(button)
button = QtGui.QPushButton(_("Delete"), bbox)
button.setEnabled(False)
button.setIcon(QtGui.QIcon(getPixmap("crystal-delete")))
button.show()
QtCore.QObject.connect(button, QtCore.SIGNAL("clicked()"), self.delMirror)
self._delmirror = button
bbox.layout().addWidget(button)
button = QtGui.QPushButton(_("Close"), bbox)
QtCore.QObject.connect(button, QtCore.SIGNAL("clicked()"), self._window, QtCore.SLOT("accept()"))
bbox.layout().addWidget(button)
button.setDefault(True)
def fill(self):
self._treeview.clear()
mirrors = sysconf.get("mirrors", {})
for origin in mirrors:
parent = TextListViewItem(self._treeview)
parent.setText(0, origin)
#parent.setRenameEnabled(0, True)
for mirror in mirrors[origin]:
item = TextListViewItem(parent)
item.setText(0, mirror)
#item.setRenameEnabled(0, True)
parent.setExpanded(True)
def show(self):
self.fill()
self._window.show()
centerWindow(self._window)
self._window.raise_()
self._window.exec_()
self._window.hide()
def newMirror(self):
item = self._treeview.selectedItems()
if item:
item = item[0]
if item.childCount() == 2:
item = item.parent()
origin = str(item.text(0))
else:
origin = ""
origin, mirror = MirrorCreator(self._window).show(origin)
if origin and mirror:
sysconf.add(("mirrors", origin), mirror, unique=True)
self.fill()
def delMirror(self):
item = self._treeview.selectedItems()
if not item:
return
item = item[0]
if item.parent() is None:
origin = str(item.text(0))
sysconf.remove(("mirrors", origin))
else:
print
mirror = str(item.text(0))
origin = str(item.parent().text(0))
print "%s %s" % (mirror, origin)
sysconf.remove(("mirrors", origin), mirror)
self.fill()
def selectionChanged(self):
item = self._treeview.selectedItems()
self._delmirror.setEnabled(bool(item))
def itemChanged(self, item, col):
newtext = item.text(col)
oldtext = item.oldtext(col)
if not oldtext:
return
if not item.parent():
if sysconf.has(("mirrors", str(newtext))):
iface.error(_("Origin already exists!"))
else:
sysconf.move(("mirrors", str(oldtext)), ("mirrors", str(newtext)))
else:
origin = item.parent().text(0)
if sysconf.has(("mirrors", str(origin)), str(newtext)):
iface.error(_("Mirror already exists!"))
else:
sysconf.remove(("mirrors", str(origin)), oldtext)
sysconf.add(("mirrors", str(origin)), str(newtext), unique=True)
class MirrorCreator(object):
def __init__(self, parent=None):
self._window = QtGui.QDialog(parent)
self._window.setWindowIcon(QtGui.QIcon(getPixmap("smart")))
self._window.setWindowTitle(_("New Mirror"))
self._window.setModal(True)
#self._window.setMinimumSize(600, 400)
vbox = QtGui.QWidget(self._window)
QtGui.QVBoxLayout(vbox)
vbox.layout().setMargin(10)
vbox.layout().setSpacing(10)
vbox.show()
table = QtGui.QWidget(vbox)
QtGui.QGridLayout(table)
table.layout().setSpacing(10)
table.show()
vbox.layout().addWidget(table)
label = QtGui.QLabel(_("Origin URL:"), table)
label.show()
table.layout().addWidget(label)
self._origin = QtGui.QLineEdit(table)
self._origin.setMaxLength(40)
self._origin.show()
table.layout().addWidget(self._origin)
label = QtGui.QLabel(_("Mirror URL:"), table)
label.show()
table.layout().addWidget(label)
self._mirror = QtGui.QLineEdit(table)
self._mirror.setMaxLength(40)
self._mirror.show()
table.layout().addWidget(self._mirror)
sep = QtGui.QFrame(vbox)
sep.setFrameStyle(QtGui.QFrame.HLine)
sep.show()
vbox.layout().addWidget(sep)
bbox = QtGui.QWidget(self._window)
QtGui.QHBoxLayout(bbox)
bbox.layout().setSpacing(10)
bbox.layout().addStretch(1)
bbox.show()
vbox.layout().addWidget(bbox)
button = QtGui.QPushButton(_("OK"), bbox)
QtCore.QObject.connect(button, QtCore.SIGNAL("clicked()"), self._window, QtCore.SLOT("accept()"))
bbox.layout().addWidget(button)
button = QtGui.QPushButton(_("Cancel"), bbox)
QtCore.QObject.connect(button, QtCore.SIGNAL("clicked()"), self._window, QtCore.SLOT("reject()"))
bbox.layout().addWidget(button)
vbox.adjustSize()
self._window.adjustSize()
def show(self, origin="", mirror=""):
self._origin.setText(origin)
self._mirror.setText(mirror)
origin = mirror = None
self._window.show()
self._window.raise_()
while True:
self._result = self._window.exec_()
if self._result == QtGui.QDialog.Accepted:
origin = str(self._origin.text()).strip()
if not origin:
iface.error(_("No origin provided!"))
continue
mirror = str(self._mirror.text()).strip()
if not mirror:
iface.error(_("No mirror provided!"))
continue
break
origin = mirror = None
break
self._window.hide()
return origin, mirror
# vim:ts=4:sw=4:et
|
googleapis/python-talent
|
refs/heads/master
|
samples/snippets/job_search_create_tenant.py
|
1
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START job_search_create_tenant]
from google.cloud import talent
import six
def create_tenant(project_id, external_id):
"""Create Tenant for scoping resources, e.g. companies and jobs"""
client = talent.TenantServiceClient()
# project_id = 'Your Google Cloud Project ID'
# external_id = 'Your Unique Identifier for Tenant'
if isinstance(project_id, six.binary_type):
project_id = project_id.decode("utf-8")
if isinstance(external_id, six.binary_type):
external_id = external_id.decode("utf-8")
parent = f"projects/{project_id}"
tenant = talent.Tenant(external_id=external_id)
response = client.create_tenant(parent=parent, tenant=tenant)
print("Created Tenant")
print(f"Name: {response.name}")
print(f"External ID: {response.external_id}")
return response.name
# [END job_search_create_tenant]
|
bluevoda/BloggyBlog
|
refs/heads/master
|
lib/python3.4/site-packages/django/contrib/gis/db/backends/mysql/base.py
|
444
|
from django.db.backends.mysql.base import \
DatabaseWrapper as MySQLDatabaseWrapper
from .features import DatabaseFeatures
from .introspection import MySQLIntrospection
from .operations import MySQLOperations
from .schema import MySQLGISSchemaEditor
class DatabaseWrapper(MySQLDatabaseWrapper):
SchemaEditorClass = MySQLGISSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = MySQLOperations(self)
self.introspection = MySQLIntrospection(self)
|
japeto/Vigtech-Services
|
refs/heads/master
|
env/lib/python2.7/site-packages/django/contrib/contenttypes/management.py
|
476
|
from django.apps import apps
from django.db import DEFAULT_DB_ALIAS, router
from django.utils import six
from django.utils.six.moves import input
def update_contenttypes(app_config, verbosity=2, interactive=True, using=DEFAULT_DB_ALIAS, **kwargs):
"""
Creates content types for models in the given app, removing any model
entries that no longer have a matching model class.
"""
if not app_config.models_module:
return
try:
ContentType = apps.get_model('contenttypes', 'ContentType')
except LookupError:
return
if not router.allow_migrate_model(using, ContentType):
return
ContentType.objects.clear_cache()
app_label = app_config.label
app_models = {
model._meta.model_name: model
for model in app_config.get_models()}
if not app_models:
return
# Get all the content types
content_types = {
ct.model: ct
for ct in ContentType.objects.using(using).filter(app_label=app_label)
}
to_remove = [
ct
for (model_name, ct) in six.iteritems(content_types)
if model_name not in app_models
]
cts = [
ContentType(
app_label=app_label,
model=model_name,
)
for (model_name, model) in six.iteritems(app_models)
if model_name not in content_types
]
ContentType.objects.using(using).bulk_create(cts)
if verbosity >= 2:
for ct in cts:
print("Adding content type '%s | %s'" % (ct.app_label, ct.model))
# Confirm that the content type is stale before deletion.
if to_remove:
if interactive:
content_type_display = '\n'.join(
' %s | %s' % (ct.app_label, ct.model)
for ct in to_remove
)
ok_to_delete = input("""The following content types are stale and need to be deleted:
%s
Any objects related to these content types by a foreign key will also
be deleted. Are you sure you want to delete these content types?
If you're unsure, answer 'no'.
Type 'yes' to continue, or 'no' to cancel: """ % content_type_display)
else:
ok_to_delete = False
if ok_to_delete == 'yes':
for ct in to_remove:
if verbosity >= 2:
print("Deleting stale content type '%s | %s'" % (ct.app_label, ct.model))
ct.delete()
else:
if verbosity >= 2:
print("Stale content types remain.")
|
Matt-Deacalion/django
|
refs/heads/master
|
django/conf/locale/el/formats.py
|
446
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd/m/Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'd/m/Y P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', '%Y-%m-%d', # '25/10/2006', '25/10/06', '2006-10-25',
]
DATETIME_INPUT_FORMATS = [
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
|
romain-dartigues/ansible
|
refs/heads/devel
|
test/units/modules/network/nxos/test_nxos_feature.py
|
68
|
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.nxos import nxos_feature
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosFeatureModule(TestNxosModule):
module = nxos_feature
def setUp(self):
super(TestNxosFeatureModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.nxos.nxos_feature.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_feature.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_capabilities = patch('ansible.modules.network.nxos.nxos_feature.get_capabilities')
self.get_capabilities = self.mock_get_capabilities.start()
self.get_capabilities.return_value = {'network_api': 'cliconf'}
def tearDown(self):
super(TestNxosFeatureModule, self).tearDown()
self.mock_run_commands.stop()
self.mock_load_config.stop()
self.mock_get_capabilities.stop()
def load_fixtures(self, commands=None, device=''):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = '%s.txt' % str(command).replace(' ', '_')
output.append(load_fixture('nxos_feature', filename))
return output
self.run_commands.side_effect = load_from_file
self.load_config.return_value = None
def test_nxos_feature_enable(self):
set_module_args(dict(feature='nve', state='enabled'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['terminal dont-ask', 'feature nv overlay'])
def test_nxos_feature_disable(self):
set_module_args(dict(feature='ospf', state='disabled'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['terminal dont-ask', 'no feature ospf'])
|
fabiobatalha/processing
|
refs/heads/master
|
export/kbart.py
|
1
|
# coding: utf-8
"""
Este processamento gera uma tabulação de periódicos seguindo o formato Kbart.
Formato de saída:
"Título do Periódico","ISSN impresso","ISSN online","Data do primeiro número","volume","número","Data do último número publicado","volume","número","url issues","ID SciELO"
"""
import argparse
import logging
import codecs
import utils
logger = logging.getLogger(__name__)
def _config_logging(logging_level='INFO', logging_file=None):
allowed_levels = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger.setLevel(allowed_levels.get(logging_level, 'INFO'))
if logging_file:
hl = logging.FileHandler(logging_file, mode='a')
else:
hl = logging.StreamHandler()
hl.setFormatter(formatter)
hl.setLevel(allowed_levels.get(logging_level, 'INFO'))
logger.addHandler(hl)
return logger
class Dumper(object):
def __init__(self, collection, issns=None, output_file=None):
self._ratchet = utils.ratchet_server()
self._articlemeta = utils.articlemeta_server()
self._publicationstats = utils.publicationstats_server()
self.collection = collection
self.issns = issns
self.output_file = codecs.open(output_file, 'w', encoding='utf-8') if output_file else output_file
header = [
u"Título do Periódico (publication_title)",
u"ISSN impresso (print_identifier)",
u"ISSN online (online_identifier)",
u"Data do primeiro fascículo (date_first_issue_online)",
u"volume do primeiro fascículo (num_first_vol_online)",
u"número do primeiro fascículo (num_first_issue_online)",
u"Data do último fascículo publicado (date_last_issue_online)",
u"volume do último fascículo publicado (num_last_vol_online)",
u"número do último fascículo publicado (num_last_issue_online)",
u"url de fascículos (title_url)",
u"primeiro autor (first_author)",
u"ID do periódico no SciELO (title_id)",
u"informação de embargo (embargo_info)",
u"cobertura (coverage_depth)",
u"informação sobre cobertura (coverage_notes)",
u"nome do publicador (publisher_name)",
u"tipo de publicação (publication_type)",
u"data de publicação monográfica impressa (date_monograph_published_print)",
u"data de publicação monográfica online (date_monograph_published_online)",
u"volume de monografia (monograph_volume)",
u"edição de monografia (monograph_edition)",
u"primeiro editor (first_editor)",
u"ID de publicação pai (parent_publication_title_id)",
u"ID de publicação prévia (preceding_publication_title_id)",
u"tipo de acesso (access_type)"
]
self.write(','.join(header))
def _first_included_document_by_journal(self, issn, collection):
fid = self._publicationstats.first_included_document_by_journal(issn, collection)
if not fid:
return None
document = self._articlemeta.document(fid['pid'], fid['collection'])
return document
def _last_included_document_by_journal(self, issn, collection):
lid = self._publicationstats.last_included_document_by_journal(issn, collection)
if not lid:
return None
document = self._articlemeta.document(lid['pid'], lid['collection'])
return document
def write(self, line):
if not self.output_file:
print(line.encode('utf-8'))
else:
self.output_file.write('%s\r\n' % line)
def run(self):
for item in self.items():
self.write(item)
def items(self):
if not self.issns:
self.issns = [None]
for issn in self.issns:
for data in self._articlemeta.journals(collection=self.collection, issn=issn):
logger.debug('Reading document: %s' % data.scielo_issn)
yield self.fmt_csv(data)
def fmt_csv(self, data):
line = []
first_document = self._first_included_document_by_journal(data.scielo_issn, data.collection_acronym)
last_document = self._last_included_document_by_journal(data.scielo_issn, data.collection_acronym)
line.append(data.title)
line.append(data.print_issn or '')
line.append(data.electronic_issn or '')
line.append(first_document.publication_date or '' if first_document else '')
line.append(first_document.volume or '' if first_document else '')
line.append(first_document.issue or '' if first_document else '')
if data.current_status != 'current':
line.append(last_document.publication_date or '' if last_document else '')
line.append(last_document.volume or '' if last_document else '')
line.append(last_document.issue or '' if last_document else '')
else:
line += ['', '', '']
line.append(data.url().replace('sci_serial', 'sci_issues'))
line.append('') # first_author
line.append(data.scielo_issn or '')
line.append('') # embargo_info
line.append('') # coverage_depth
line.append('') # coverage_notes
line.append(data.publisher_name if data.publisher_name else '') # publisher_name
line.append('Serial') # publication_type
line.append('') # date_monograph_published_print
line.append('') # date_monograph_published_online
line.append('') # monograph_volume
line.append('') # monograph_edition
line.append('') # first_editor
line.append('') # parent_publication_title_id
line.append('') # preceding_publication_title_id
line.append('F') # access_type
joined_line = ','.join(['"%s"' % i.replace('"', '""') for i in line])
return joined_line
def main():
parser = argparse.ArgumentParser(
description='Export journals list in Kabart format'
)
parser.add_argument(
'issns',
nargs='*',
help='ISSN\'s separated by spaces'
)
parser.add_argument(
'--collection',
'-c',
help='Collection Acronym'
)
parser.add_argument(
'--output_file',
'-r',
help='File to receive the dumped data'
)
parser.add_argument(
'--logging_file',
'-o',
help='Full path to the log file'
)
parser.add_argument(
'--logging_level',
'-l',
default='DEBUG',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='Logggin level'
)
args = parser.parse_args()
_config_logging(args.logging_level, args.logging_file)
logger.info('Dumping data for: %s' % args.collection)
issns = None
if len(args.issns) > 0:
issns = utils.ckeck_given_issns(args.issns)
dumper = Dumper(args.collection, issns, args.output_file)
dumper.run()
|
jimsize/PySolFC
|
refs/heads/master
|
pysollib/winsystems/__init__.py
|
2
|
#!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------##
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------##
from pysollib.settings import WIN_SYSTEM
if WIN_SYSTEM == 'win32':
from pysollib.winsystems import win32 as gui
elif WIN_SYSTEM == 'aqua':
from pysollib.winsystems import aqua as gui
else: # 'x11'
from pysollib.winsystems import x11 as gui
init_root_window = gui.init_root_window
TkSettings = gui.TkSettings
|
ualikhansars/Gwent
|
refs/heads/master
|
lib/python2.7/site-packages/pip/req/req_set.py
|
338
|
from __future__ import absolute_import
from collections import defaultdict
from itertools import chain
import logging
import os
from pip._vendor import pkg_resources
from pip._vendor import requests
from pip.compat import expanduser
from pip.download import (is_file_url, is_dir_url, is_vcs_url, url_to_path,
unpack_url)
from pip.exceptions import (InstallationError, BestVersionAlreadyInstalled,
DistributionNotFound, PreviousBuildDirError,
HashError, HashErrors, HashUnpinned,
DirectoryUrlHashUnsupported, VcsHashUnsupported,
UnsupportedPythonVersion)
from pip.req.req_install import InstallRequirement
from pip.utils import (
display_path, dist_in_usersite, ensure_dir, normalize_path)
from pip.utils.hashes import MissingHashes
from pip.utils.logging import indent_log
from pip.utils.packaging import check_dist_requires_python
from pip.vcs import vcs
from pip.wheel import Wheel
logger = logging.getLogger(__name__)
class Requirements(object):
def __init__(self):
self._keys = []
self._dict = {}
def keys(self):
return self._keys
def values(self):
return [self._dict[key] for key in self._keys]
def __contains__(self, item):
return item in self._keys
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._dict[key] = value
def __getitem__(self, key):
return self._dict[key]
def __repr__(self):
values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()]
return 'Requirements({%s})' % ', '.join(values)
class DistAbstraction(object):
"""Abstracts out the wheel vs non-wheel prepare_files logic.
The requirements for anything installable are as follows:
- we must be able to determine the requirement name
(or we can't correctly handle the non-upgrade case).
- we must be able to generate a list of run-time dependencies
without installing any additional packages (or we would
have to either burn time by doing temporary isolated installs
or alternatively violate pips 'don't start installing unless
all requirements are available' rule - neither of which are
desirable).
- for packages with setup requirements, we must also be able
to determine their requirements without installing additional
packages (for the same reason as run-time dependencies)
- we must be able to create a Distribution object exposing the
above metadata.
"""
def __init__(self, req_to_install):
self.req_to_install = req_to_install
def dist(self, finder):
"""Return a setuptools Dist object."""
raise NotImplementedError(self.dist)
def prep_for_dist(self):
"""Ensure that we can get a Dist for this requirement."""
raise NotImplementedError(self.dist)
def make_abstract_dist(req_to_install):
"""Factory to make an abstract dist object.
Preconditions: Either an editable req with a source_dir, or satisfied_by or
a wheel link, or a non-editable req with a source_dir.
:return: A concrete DistAbstraction.
"""
if req_to_install.editable:
return IsSDist(req_to_install)
elif req_to_install.link and req_to_install.link.is_wheel:
return IsWheel(req_to_install)
else:
return IsSDist(req_to_install)
class IsWheel(DistAbstraction):
def dist(self, finder):
return list(pkg_resources.find_distributions(
self.req_to_install.source_dir))[0]
def prep_for_dist(self):
# FIXME:https://github.com/pypa/pip/issues/1112
pass
class IsSDist(DistAbstraction):
def dist(self, finder):
dist = self.req_to_install.get_dist()
# FIXME: shouldn't be globally added:
if dist.has_metadata('dependency_links.txt'):
finder.add_dependency_links(
dist.get_metadata_lines('dependency_links.txt')
)
return dist
def prep_for_dist(self):
self.req_to_install.run_egg_info()
self.req_to_install.assert_source_matches_version()
class Installed(DistAbstraction):
def dist(self, finder):
return self.req_to_install.satisfied_by
def prep_for_dist(self):
pass
class RequirementSet(object):
def __init__(self, build_dir, src_dir, download_dir, upgrade=False,
upgrade_strategy=None, ignore_installed=False, as_egg=False,
target_dir=None, ignore_dependencies=False,
force_reinstall=False, use_user_site=False, session=None,
pycompile=True, isolated=False, wheel_download_dir=None,
wheel_cache=None, require_hashes=False,
ignore_requires_python=False):
"""Create a RequirementSet.
:param wheel_download_dir: Where still-packed .whl files should be
written to. If None they are written to the download_dir parameter.
Separate to download_dir to permit only keeping wheel archives for
pip wheel.
:param download_dir: Where still packed archives should be written to.
If None they are not saved, and are deleted immediately after
unpacking.
:param wheel_cache: The pip wheel cache, for passing to
InstallRequirement.
"""
if session is None:
raise TypeError(
"RequirementSet() missing 1 required keyword argument: "
"'session'"
)
self.build_dir = build_dir
self.src_dir = src_dir
# XXX: download_dir and wheel_download_dir overlap semantically and may
# be combined if we're willing to have non-wheel archives present in
# the wheelhouse output by 'pip wheel'.
self.download_dir = download_dir
self.upgrade = upgrade
self.upgrade_strategy = upgrade_strategy
self.ignore_installed = ignore_installed
self.force_reinstall = force_reinstall
self.requirements = Requirements()
# Mapping of alias: real_name
self.requirement_aliases = {}
self.unnamed_requirements = []
self.ignore_dependencies = ignore_dependencies
self.ignore_requires_python = ignore_requires_python
self.successfully_downloaded = []
self.successfully_installed = []
self.reqs_to_cleanup = []
self.as_egg = as_egg
self.use_user_site = use_user_site
self.target_dir = target_dir # set from --target option
self.session = session
self.pycompile = pycompile
self.isolated = isolated
if wheel_download_dir:
wheel_download_dir = normalize_path(wheel_download_dir)
self.wheel_download_dir = wheel_download_dir
self._wheel_cache = wheel_cache
self.require_hashes = require_hashes
# Maps from install_req -> dependencies_of_install_req
self._dependencies = defaultdict(list)
def __str__(self):
reqs = [req for req in self.requirements.values()
if not req.comes_from]
reqs.sort(key=lambda req: req.name.lower())
return ' '.join([str(req.req) for req in reqs])
def __repr__(self):
reqs = [req for req in self.requirements.values()]
reqs.sort(key=lambda req: req.name.lower())
reqs_str = ', '.join([str(req.req) for req in reqs])
return ('<%s object; %d requirement(s): %s>'
% (self.__class__.__name__, len(reqs), reqs_str))
def add_requirement(self, install_req, parent_req_name=None,
extras_requested=None):
"""Add install_req as a requirement to install.
:param parent_req_name: The name of the requirement that needed this
added. The name is used because when multiple unnamed requirements
resolve to the same name, we could otherwise end up with dependency
links that point outside the Requirements set. parent_req must
already be added. Note that None implies that this is a user
supplied requirement, vs an inferred one.
:param extras_requested: an iterable of extras used to evaluate the
environement markers.
:return: Additional requirements to scan. That is either [] if
the requirement is not applicable, or [install_req] if the
requirement is applicable and has just been added.
"""
name = install_req.name
if not install_req.match_markers(extras_requested):
logger.warning("Ignoring %s: markers '%s' don't match your "
"environment", install_req.name,
install_req.markers)
return []
# This check has to come after we filter requirements with the
# environment markers.
if install_req.link and install_req.link.is_wheel:
wheel = Wheel(install_req.link.filename)
if not wheel.supported():
raise InstallationError(
"%s is not a supported wheel on this platform." %
wheel.filename
)
install_req.as_egg = self.as_egg
install_req.use_user_site = self.use_user_site
install_req.target_dir = self.target_dir
install_req.pycompile = self.pycompile
install_req.is_direct = (parent_req_name is None)
if not name:
# url or path requirement w/o an egg fragment
self.unnamed_requirements.append(install_req)
return [install_req]
else:
try:
existing_req = self.get_requirement(name)
except KeyError:
existing_req = None
if (parent_req_name is None and existing_req and not
existing_req.constraint and
existing_req.extras == install_req.extras and not
existing_req.req.specifier == install_req.req.specifier):
raise InstallationError(
'Double requirement given: %s (already in %s, name=%r)'
% (install_req, existing_req, name))
if not existing_req:
# Add requirement
self.requirements[name] = install_req
# FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
result = [install_req]
else:
# Assume there's no need to scan, and that we've already
# encountered this for scanning.
result = []
if not install_req.constraint and existing_req.constraint:
if (install_req.link and not (existing_req.link and
install_req.link.path == existing_req.link.path)):
self.reqs_to_cleanup.append(install_req)
raise InstallationError(
"Could not satisfy constraints for '%s': "
"installation from path or url cannot be "
"constrained to a version" % name)
# If we're now installing a constraint, mark the existing
# object for real installation.
existing_req.constraint = False
existing_req.extras = tuple(
sorted(set(existing_req.extras).union(
set(install_req.extras))))
logger.debug("Setting %s extras to: %s",
existing_req, existing_req.extras)
# And now we need to scan this.
result = [existing_req]
# Canonicalise to the already-added object for the backref
# check below.
install_req = existing_req
if parent_req_name:
parent_req = self.get_requirement(parent_req_name)
self._dependencies[parent_req].append(install_req)
return result
def has_requirement(self, project_name):
name = project_name.lower()
if (name in self.requirements and
not self.requirements[name].constraint or
name in self.requirement_aliases and
not self.requirements[self.requirement_aliases[name]].constraint):
return True
return False
@property
def has_requirements(self):
return list(req for req in self.requirements.values() if not
req.constraint) or self.unnamed_requirements
@property
def is_download(self):
if self.download_dir:
self.download_dir = expanduser(self.download_dir)
if os.path.exists(self.download_dir):
return True
else:
logger.critical('Could not find download directory')
raise InstallationError(
"Could not find or access download directory '%s'"
% display_path(self.download_dir))
return False
def get_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements:
return self.requirements[name]
if name in self.requirement_aliases:
return self.requirements[self.requirement_aliases[name]]
raise KeyError("No project with the name %r" % project_name)
def uninstall(self, auto_confirm=False):
for req in self.requirements.values():
if req.constraint:
continue
req.uninstall(auto_confirm=auto_confirm)
req.commit_uninstall()
def prepare_files(self, finder):
"""
Prepare process. Create temp directories, download and/or unpack files.
"""
# make the wheelhouse
if self.wheel_download_dir:
ensure_dir(self.wheel_download_dir)
# If any top-level requirement has a hash specified, enter
# hash-checking mode, which requires hashes from all.
root_reqs = self.unnamed_requirements + self.requirements.values()
require_hashes = (self.require_hashes or
any(req.has_hash_options for req in root_reqs))
if require_hashes and self.as_egg:
raise InstallationError(
'--egg is not allowed with --require-hashes mode, since it '
'delegates dependency resolution to setuptools and could thus '
'result in installation of unhashed packages.')
# Actually prepare the files, and collect any exceptions. Most hash
# exceptions cannot be checked ahead of time, because
# req.populate_link() needs to be called before we can make decisions
# based on link type.
discovered_reqs = []
hash_errors = HashErrors()
for req in chain(root_reqs, discovered_reqs):
try:
discovered_reqs.extend(self._prepare_file(
finder,
req,
require_hashes=require_hashes,
ignore_dependencies=self.ignore_dependencies))
except HashError as exc:
exc.req = req
hash_errors.append(exc)
if hash_errors:
raise hash_errors
def _is_upgrade_allowed(self, req):
return self.upgrade and (
self.upgrade_strategy == "eager" or (
self.upgrade_strategy == "only-if-needed" and req.is_direct
)
)
def _check_skip_installed(self, req_to_install, finder):
"""Check if req_to_install should be skipped.
This will check if the req is installed, and whether we should upgrade
or reinstall it, taking into account all the relevant user options.
After calling this req_to_install will only have satisfied_by set to
None if the req_to_install is to be upgraded/reinstalled etc. Any
other value will be a dist recording the current thing installed that
satisfies the requirement.
Note that for vcs urls and the like we can't assess skipping in this
routine - we simply identify that we need to pull the thing down,
then later on it is pulled down and introspected to assess upgrade/
reinstalls etc.
:return: A text reason for why it was skipped, or None.
"""
# Check whether to upgrade/reinstall this req or not.
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
upgrade_allowed = self._is_upgrade_allowed(req_to_install)
# Is the best version is installed.
best_installed = False
if upgrade_allowed:
# For link based requirements we have to pull the
# tree down and inspect to assess the version #, so
# its handled way down.
if not (self.force_reinstall or req_to_install.link):
try:
finder.find_requirement(
req_to_install, upgrade_allowed)
except BestVersionAlreadyInstalled:
best_installed = True
except DistributionNotFound:
# No distribution found, so we squash the
# error - it will be raised later when we
# re-try later to do the install.
# Why don't we just raise here?
pass
if not best_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
# Figure out a nice message to say why we're skipping this.
if best_installed:
skip_reason = 'already up-to-date'
elif self.upgrade_strategy == "only-if-needed":
skip_reason = 'not upgraded as not directly required'
else:
skip_reason = 'already satisfied'
return skip_reason
else:
return None
def _prepare_file(self,
finder,
req_to_install,
require_hashes=False,
ignore_dependencies=False):
"""Prepare a single requirements file.
:return: A list of additional InstallRequirements to also install.
"""
# Tell user what we are doing for this requirement:
# obtain (editable), skipping, processing (local url), collecting
# (remote url or package name)
if req_to_install.constraint or req_to_install.prepared:
return []
req_to_install.prepared = True
# ###################### #
# # print log messages # #
# ###################### #
if req_to_install.editable:
logger.info('Obtaining %s', req_to_install)
else:
# satisfied_by is only evaluated by calling _check_skip_installed,
# so it must be None here.
assert req_to_install.satisfied_by is None
if not self.ignore_installed:
skip_reason = self._check_skip_installed(
req_to_install, finder)
if req_to_install.satisfied_by:
assert skip_reason is not None, (
'_check_skip_installed returned None but '
'req_to_install.satisfied_by is set to %r'
% (req_to_install.satisfied_by,))
logger.info(
'Requirement %s: %s', skip_reason,
req_to_install)
else:
if (req_to_install.link and
req_to_install.link.scheme == 'file'):
path = url_to_path(req_to_install.link.url)
logger.info('Processing %s', display_path(path))
else:
logger.info('Collecting %s', req_to_install)
with indent_log():
# ################################ #
# # vcs update or unpack archive # #
# ################################ #
if req_to_install.editable:
if require_hashes:
raise InstallationError(
'The editable requirement %s cannot be installed when '
'requiring hashes, because there is no single file to '
'hash.' % req_to_install)
req_to_install.ensure_has_source_dir(self.src_dir)
req_to_install.update_editable(not self.is_download)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
req_to_install.archive(self.download_dir)
req_to_install.check_if_exists()
elif req_to_install.satisfied_by:
if require_hashes:
logger.debug(
'Since it is already installed, we are trusting this '
'package without checking its hash. To ensure a '
'completely repeatable environment, install into an '
'empty virtualenv.')
abstract_dist = Installed(req_to_install)
else:
# @@ if filesystem packages are not marked
# editable in a req, a non deterministic error
# occurs when the script attempts to unpack the
# build directory
req_to_install.ensure_has_source_dir(self.build_dir)
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
# FIXME: this won't upgrade when there's an existing
# package unpacked in `req_to_install.source_dir`
if os.path.exists(
os.path.join(req_to_install.source_dir, 'setup.py')):
raise PreviousBuildDirError(
"pip can't proceed with requirements '%s' due to a"
" pre-existing build directory (%s). This is "
"likely due to a previous installation that failed"
". pip is being responsible and not assuming it "
"can delete this. Please delete it and try again."
% (req_to_install, req_to_install.source_dir)
)
req_to_install.populate_link(
finder,
self._is_upgrade_allowed(req_to_install),
require_hashes
)
# We can't hit this spot and have populate_link return None.
# req_to_install.satisfied_by is None here (because we're
# guarded) and upgrade has no impact except when satisfied_by
# is not None.
# Then inside find_requirement existing_applicable -> False
# If no new versions are found, DistributionNotFound is raised,
# otherwise a result is guaranteed.
assert req_to_install.link
link = req_to_install.link
# Now that we have the real link, we can tell what kind of
# requirements we have and raise some more informative errors
# than otherwise. (For example, we can raise VcsHashUnsupported
# for a VCS URL rather than HashMissing.)
if require_hashes:
# We could check these first 2 conditions inside
# unpack_url and save repetition of conditions, but then
# we would report less-useful error messages for
# unhashable requirements, complaining that there's no
# hash provided.
if is_vcs_url(link):
raise VcsHashUnsupported()
elif is_file_url(link) and is_dir_url(link):
raise DirectoryUrlHashUnsupported()
if (not req_to_install.original_link and
not req_to_install.is_pinned):
# Unpinned packages are asking for trouble when a new
# version is uploaded. This isn't a security check, but
# it saves users a surprising hash mismatch in the
# future.
#
# file:/// URLs aren't pinnable, so don't complain
# about them not being pinned.
raise HashUnpinned()
hashes = req_to_install.hashes(
trust_internet=not require_hashes)
if require_hashes and not hashes:
# Known-good hashes are missing for this requirement, so
# shim it with a facade object that will provoke hash
# computation and then raise a HashMissing exception
# showing the user what the hash should be.
hashes = MissingHashes()
try:
download_dir = self.download_dir
# We always delete unpacked sdists after pip ran.
autodelete_unpacked = True
if req_to_install.link.is_wheel \
and self.wheel_download_dir:
# when doing 'pip wheel` we download wheels to a
# dedicated dir.
download_dir = self.wheel_download_dir
if req_to_install.link.is_wheel:
if download_dir:
# When downloading, we only unpack wheels to get
# metadata.
autodelete_unpacked = True
else:
# When installing a wheel, we use the unpacked
# wheel.
autodelete_unpacked = False
unpack_url(
req_to_install.link, req_to_install.source_dir,
download_dir, autodelete_unpacked,
session=self.session, hashes=hashes)
except requests.HTTPError as exc:
logger.critical(
'Could not install requirement %s because '
'of error %s',
req_to_install,
exc,
)
raise InstallationError(
'Could not install requirement %s because '
'of HTTP error %s for URL %s' %
(req_to_install, exc, req_to_install.link)
)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
# Make a .zip of the source_dir we already created.
if req_to_install.link.scheme in vcs.all_schemes:
req_to_install.archive(self.download_dir)
# req_to_install.req is only avail after unpack for URL
# pkgs repeat check_if_exists to uninstall-on-upgrade
# (#14)
if not self.ignore_installed:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(
req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
logger.info(
'Requirement already satisfied (use '
'--upgrade to upgrade): %s',
req_to_install,
)
# ###################### #
# # parse dependencies # #
# ###################### #
dist = abstract_dist.dist(finder)
try:
check_dist_requires_python(dist)
except UnsupportedPythonVersion as e:
if self.ignore_requires_python:
logger.warning(e.args[0])
else:
req_to_install.remove_temporary_source()
raise
more_reqs = []
def add_req(subreq, extras_requested):
sub_install_req = InstallRequirement(
str(subreq),
req_to_install,
isolated=self.isolated,
wheel_cache=self._wheel_cache,
)
more_reqs.extend(self.add_requirement(
sub_install_req, req_to_install.name,
extras_requested=extras_requested))
# We add req_to_install before its dependencies, so that we
# can refer to it when adding dependencies.
if not self.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
self.add_requirement(req_to_install, None)
if not ignore_dependencies:
if (req_to_install.extras):
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
missing_requested = sorted(
set(req_to_install.extras) - set(dist.extras)
)
for missing in missing_requested:
logger.warning(
'%s does not provide the extra \'%s\'',
dist, missing
)
available_requested = sorted(
set(dist.extras) & set(req_to_install.extras)
)
for subreq in dist.requires(available_requested):
add_req(subreq, extras_requested=available_requested)
# cleanup tmp src
self.reqs_to_cleanup.append(req_to_install)
if not req_to_install.editable and not req_to_install.satisfied_by:
# XXX: --no-install leads this to report 'Successfully
# downloaded' for only non-editable reqs, even though we took
# action on them.
self.successfully_downloaded.append(req_to_install)
return more_reqs
def cleanup_files(self):
"""Clean up files, remove builds."""
logger.debug('Cleaning up...')
with indent_log():
for req in self.reqs_to_cleanup:
req.remove_temporary_source()
def _to_install(self):
"""Create the installation order.
The installation order is topological - requirements are installed
before the requiring thing. We break cycles at an arbitrary point,
and make no other guarantees.
"""
# The current implementation, which we may change at any point
# installs the user specified things in the order given, except when
# dependencies must come earlier to achieve topological order.
order = []
ordered_reqs = set()
def schedule(req):
if req.satisfied_by or req in ordered_reqs:
return
if req.constraint:
return
ordered_reqs.add(req)
for dep in self._dependencies[req]:
schedule(dep)
order.append(req)
for install_req in self.requirements.values():
schedule(install_req)
return order
def install(self, install_options, global_options=(), *args, **kwargs):
"""
Install everything in this set (after having downloaded and unpacked
the packages)
"""
to_install = self._to_install()
if to_install:
logger.info(
'Installing collected packages: %s',
', '.join([req.name for req in to_install]),
)
with indent_log():
for requirement in to_install:
if requirement.conflicts_with:
logger.info(
'Found existing installation: %s',
requirement.conflicts_with,
)
with indent_log():
requirement.uninstall(auto_confirm=True)
try:
requirement.install(
install_options,
global_options,
*args,
**kwargs
)
except:
# if install did not succeed, rollback previous uninstall
if (requirement.conflicts_with and not
requirement.install_succeeded):
requirement.rollback_uninstall()
raise
else:
if (requirement.conflicts_with and
requirement.install_succeeded):
requirement.commit_uninstall()
requirement.remove_temporary_source()
self.successfully_installed = to_install
|
nzavagli/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Pillow-2.9.0/Tests/test_image_rotate.py
|
12
|
from helper import unittest, PillowTestCase, hopper
class TestImageRotate(PillowTestCase):
def test_rotate(self):
def rotate(mode):
im = hopper(mode)
out = im.rotate(45)
self.assertEqual(out.mode, mode)
self.assertEqual(out.size, im.size) # default rotate clips output
out = im.rotate(45, expand=1)
self.assertEqual(out.mode, mode)
self.assertNotEqual(out.size, im.size)
for mode in "1", "P", "L", "RGB", "I", "F":
rotate(mode)
if __name__ == '__main__':
unittest.main()
# End of file
|
novafloss/django-json-dbindex
|
refs/heads/master
|
json_dbindex/pgcommands.py
|
1
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014 Rodolphe Quiédeville <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of django-json-dbindex nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
import logging
from django.db import connections
def index_exists(index, database='default'):
"""Execute raw sql
"""
cursor = connections[database].cursor()
qry = "SELECT COUNT(indexname) FROM pg_indexes WHERE indexname = %s"
cursor.execute(qry, [index['name']])
row = cursor.fetchone()
cursor.close()
return row[0] == 1
def execute_raw(sql, database='default', parms=None):
"""
Execute a raw SQL command
sql (string) : SQL command
database (string): the database name configured in settings
"""
try:
cursor = connections[database].cursor()
if parms is not None:
cursor.execute(sql, parms)
else:
cursor.execute(sql)
cursor.close()
return 0
except Exception, e:
logging.error('Cant execute %s -- Exception raised %s' % (sql, e))
return 1
def drop_index(index, database='default'):
"""
Check if index exists and drop it
index (dict) : index description
"""
if 'database' in index:
database = index['database']
if index_exists(index, database):
logging.info("Will drop %s" % index['name'])
res = execute_raw(index['cmd'], database)
logging.info("%s dropped" % index['name'])
else:
res = 1
logging.info("%s doesn't exists" % index['name'])
return res
def create_index(index, database='default'):
"""
Create an index
index (dict) : index description
{"name": "foo",
"database": "default",
"cmd": "CREATE INDEX foo_idx ON table (column)"
}
"""
if 'database' in index:
database = index['database']
if index_exists(index, database):
logging.info("%s still exists" % index['name'])
res = 1
else:
logging.info("Will create %s" % index['name'])
res = execute_raw(index['cmd'], database)
logging.info("%s created" % index['name'])
return res
def create_extensions(extensions, database='default'):
"""
Create all extensions
"""
for extension in extensions:
cmd = "CREATE EXTENSION IF NOT EXISTS %s" % (extension)
logging.info("Will create extension %s on database %s" % (extension,
database))
res = execute_raw(cmd,
database=database)
logging.info("%s created" % extension)
return res
|
minghuascode/pyj
|
refs/heads/master
|
library/pyjamas/ui/MenuBar.py
|
1
|
# Copyright 2006 James Tauber and contributors
# Copyright (C) 2009 Luke Kenneth Casson Leighton <[email protected]>
# Copyright (C) 2009 Pavel Mironchyk <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas import Factory
from pyjamas import DeferredCommand
from pyjamas.ui.Widget import Widget
from pyjamas.ui.MenuItem import MenuItem
from pyjamas.ui.MenuBarPopupPanel import MenuBarPopupPanel
from pyjamas.ui import Event
from pyjamas.ui.MultiListener import MultiListener
class MenuBar(Widget):
_props = [("vertical", "Vertical", "Vertical", None),
("itemsPerRow", "ItemsPerRow", "ItemsPerRow", None),
]
def __init__(self, vertical=False, **kwargs):
self.setVertical(kwargs.pop('Vertical', vertical))
if 'StyleName' not in kwargs or kwargs['StyleName'] == 'gwt-MenuBar':
kwargs['StyleName'] = self.getDefaultStyleName()
self.body = None
self.items = []
self.parentMenu = None
self.popup = None
self.selectedItem = None
self.shownChildMenu = None
self.autoOpen = False
self.itemsPerRow = None
if kwargs.has_key('Element'):
table = kwargs.pop('Element')
fc = DOM.getFirstChild(table)
if fc:
self.body = fc
else:
self.body = DOM.createTBody()
DOM.appendChild(table, self.body)
else:
table = DOM.createTable()
DOM.setAttribute(table, "cellPadding", "0")
DOM.setAttribute(table, "cellSpacing", "0")
self.body = DOM.createTBody()
DOM.appendChild(table, self.body)
outer = DOM.createDiv()
DOM.appendChild(outer, table)
self.setElement(outer)
Widget.__init__(self, **kwargs)
@classmethod
def _getProps(self):
return Widget._getProps() + self._props
def _setWeirdProps(self, props, builderstate):
""" covers creating the sub-menus and linking the event handlers.
"""
self.clearItems() # really tricky to update, so just blow away.
items = {}
for (k, v) in props.items():
if not isinstance(k, int):
continue
items[int(k)] = v
items = items.items()
items.sort()
last_level = 0
menu = self
menus = [menu]
for prop in items:
print prop
level, name, label, handler = prop[1]
if level < last_level:
menus = menus[:level+1]
menu = menus[level]
elif level > last_level:
menu = MenuBar(vertical=True)
lastitem = menus[-1].items[-1]
lastitem.setSubMenu(menu)
setattr(lastitem, name, menu)
menus.append(menu)
item = menu.addItem(label)
if handler and builderstate is not None and \
builderstate.eventTarget is not None:
# add a menu listener callback
menuItemListener = MultiListener(builderstate.eventTarget,
execute=handler)
item.setCommand(menuItemListener)
last_level = level
def setVertical(self, vertical):
self.vertical = vertical
def getVertical(self):
return self.vertical
def _checkVerticalContainer(self):
""" use this to delay effect of self.vertical being set.
self.setVertical can now be used, rather than self.vertical
force-set in constructor
"""
if DOM.getChildCount(self.body) == 0:
DOM.appendChild(self.body, DOM.createTR())
def getDefaultStyleName(self):
if self.vertical:
return "gwt-MenuBar " + "gwt-MenuBar-vertical"
return "gwt-MenuBar " + "gwt-MenuBar-horizontal"
def setStyleName(self, StyleName, **kwargs):
if not StyleName or StyleName == 'gwt-MenuBar':
StyleName = self.getDefaultStyleName()
super(MenuBar, self).setStyleName(StyleName, **kwargs)
# also callable as:
# addItem(item)
# addItem(text, cmd)
# addItem(text, popup)
# addItem(text, asHTML, cmd)
def addItem(self, item, asHTML=None, popup=None):
if not hasattr(item, "setSubMenu"):
item = MenuItem(item, asHTML, popup)
if self.vertical:
tr = DOM.createTR()
DOM.appendChild(self.body, tr)
else:
self._checkVerticalContainer()
if len(self.items) == self.itemsPerRow:
DOM.appendChild(self.body, DOM.createTR())
count = DOM.getChildCount(self.body)
tr = DOM.getChild(self.body, count-1)
DOM.appendChild(tr, item.getElement())
item.setParentMenu(self)
item.setSelectionStyle(False)
self.items.append(item)
return item
def clearItems(self):
while self.items:
self.removeItem(0)
self.items = []
def setItemsPerRow(self, items):
self.itemsPerRow = items
def getItemsPerRow(self):
return self.itemsPerRow
def getAutoOpen(self):
return self.autoOpen
def onBrowserEvent(self, event):
Widget.onBrowserEvent(self, event)
item = self.findItem(DOM.eventGetTarget(event))
if item is None:
return False
type = DOM.eventGetType(event)
if type == "click":
self.doItemAction(item, True)
return True
elif type == "mouseover":
self.itemOver(item)
elif type == "mouseout":
self.itemOver(None)
return False
def onPopupClosed(self, sender, autoClosed):
if autoClosed:
self.closeAllParents()
self.onHide()
self.shownChildMenu = None
self.popup = None
def removeItem(self, item):
if isinstance(item, int):
idx = item
else:
try:
idx = self.items.index(item)
except ValueError:
return
container = self.getItemContainerElement(idx)
DOM.removeChild(container, DOM.getChild(container, idx))
del self.items[idx]
def setAutoOpen(self, autoOpen):
self.autoOpen = autoOpen
def closeAllParents(self):
curMenu = self
while curMenu is not None:
curMenu.close()
if curMenu.parentMenu is None and \
curMenu.selectedItem is not None:
curMenu.selectedItem.setSelectionStyle(False)
curMenu.selectedItem = None
curMenu = curMenu.parentMenu
def doItemAction(self, item, fireCommand):
if (self.shownChildMenu is not None) and \
(item.getSubMenu() == self.shownChildMenu):
return
if (self.shownChildMenu is not None):
self.shownChildMenu.onHide()
self.popup.hide()
if item.getSubMenu() is None:
if fireCommand:
self.closeAllParents()
cmd = item.getCommand()
if cmd is not None:
DeferredCommand.add(cmd)
return
self.selectItem(item)
self.popup = MenuBarPopupPanel(item)
self.popup.addPopupListener(self)
if self.vertical:
self.popup.setPopupPosition(self.getAbsoluteLeft() +
self.getOffsetWidth() - 1,
item.getAbsoluteTop())
else:
self.popup.setPopupPosition(item.getAbsoluteLeft(),
self.getAbsoluteTop() +
self.getOffsetHeight() - 1)
self.shownChildMenu = item.getSubMenu()
sub_menu = item.getSubMenu()
sub_menu.parentMenu = self
self.popup.show()
def onDetach(self):
if self.popup is not None:
self.popup.hide()
Widget.onDetach(self)
def itemOver(self, item):
if item is None:
if (self.selectedItem is not None):
if self.selectedItem.getSubMenu() != None:
if (self.shownChildMenu == self.selectedItem.getSubMenu()):
return
else:
self.selectItem(item)
return
self.selectItem(item)
if item is not None:
if (self.shownChildMenu is not None) or \
(self.parentMenu is not None) or self.autoOpen:
self.doItemAction(item, False)
def close(self):
if self.parentMenu is not None:
self.parentMenu.popup.hide()
def findItem(self, hItem):
for item in self.items:
if DOM.isOrHasChild(item.getElement(), hItem):
return item
return None
def getItemContainerElement(self, item):
if self.vertical:
return self.body
else:
self._checkVerticalContainer()
if self.itemsPerRow:
row = items / self.itemsPerRow
else:
row = 0
return DOM.getChild(self.body, row)
def onHide(self):
if self.shownChildMenu is not None:
self.shownChildMenu.onHide()
self.popup.hide()
def onShow(self):
if len(self.items) > 0:
self.selectItem(self.items[0])
def selectItem(self, item):
if item == self.selectedItem:
return
if self.selectedItem is not None:
self.selectedItem.setSelectionStyle(False)
if item is not None:
item.setSelectionStyle(True)
self.selectedItem = item
Factory.registerClass('pyjamas.ui.MenuBar', 'MenuBar', MenuBar)
|
dulems/hue
|
refs/heads/master
|
desktop/core/ext-py/python-ldap-2.3.13/Demo/rename.py
|
40
|
import ldap
from getpass import getpass
# Create LDAPObject instance
l = ldap.initialize('ldap://localhost:1389',trace_level=1)
print 'Password:'
cred = getpass()
try:
# Set LDAP protocol version used
l.set_option(ldap.OPT_PROTOCOL_VERSION,3)
# Try a bind to provoke failure if protocol version is not supported
l.bind_s('cn=root,dc=stroeder,dc=com',cred,ldap.AUTH_SIMPLE)
print 'Using rename_s():'
l.rename_s(
'uid=fred,ou=Unstructured testing tree,dc=stroeder,dc=com',
'cn=Fred Feuerstein',
'dc=stroeder,dc=com',
0
)
l.rename_s(
'cn=Fred Feuerstein,dc=stroeder,dc=com',
'uid=fred',
'ou=Unstructured testing tree,dc=stroeder,dc=com',
0
)
m = l.rename(
'uid=fred,ou=Unstructured testing tree,dc=stroeder,dc=com',
'cn=Fred Feuerstein',
'dc=stroeder,dc=com',
0
)
r = l.result(m,1)
m = l.rename(
'cn=Fred Feuerstein,dc=stroeder,dc=com',
'uid=fred',
'ou=Unstructured testing tree,dc=stroeder,dc=com',
0
)
r = l.result(m,1)
finally:
l.unbind_s()
|
masci/toodo
|
refs/heads/master
|
toodo/appengine_config.py
|
1
|
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), 'lib'))
|
hardikk/newfies-dialer
|
refs/heads/master
|
newfies/api/api_playgrounds/survey_playground.py
|
4
|
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2013 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <[email protected]>
#
from django.utils.translation import gettext as _
from apiplayground import APIPlayground
class SurveyAPIPlayground(APIPlayground):
schema = {
"title": _("survey"),
"base_url": "http://localhost/api/v1/",
"resources": [
{
"name": "/survey/",
"description": _("this resource allows you to manage survey."),
"endpoints": [
{
"method": "GET",
"url": "/api/v1/survey/",
"description": _("returns all surveys")
},
{
"method": "GET",
"url": "/api/v1/survey/{survey-id}/",
"description": _("returns a specific survey")
},
{
"method": "POST",
"url": "/api/v1/survey/",
"description": _("create new survey"),
"parameters": [{
"name": "name",
"type": "string",
"is_required": True,
"default": "Sample Campaign"
},
{
"name": "description",
"type": "string"
},
]
},
{
"method": "PUT",
"url": "/api/v1/survey/{survey-id}/",
"description": _("update survey"),
"parameters": [{
"name": "name",
"type": "string",
"is_required": True,
"default": "Sample Campaign"
},
{
"name": "description",
"type": "string"
},
]
},
{
"method": "DELETE",
"url": "/api/v1/survey/{survey-id}/",
"description": _("delete survey"),
}
]
},
]
}
|
miconof/headphones
|
refs/heads/master
|
lib/unidecode/x05e.py
|
250
|
data = (
'Za ', # 0x00
'Bi ', # 0x01
'Shi ', # 0x02
'Bu ', # 0x03
'Ding ', # 0x04
'Shuai ', # 0x05
'Fan ', # 0x06
'Nie ', # 0x07
'Shi ', # 0x08
'Fen ', # 0x09
'Pa ', # 0x0a
'Zhi ', # 0x0b
'Xi ', # 0x0c
'Hu ', # 0x0d
'Dan ', # 0x0e
'Wei ', # 0x0f
'Zhang ', # 0x10
'Tang ', # 0x11
'Dai ', # 0x12
'Ma ', # 0x13
'Pei ', # 0x14
'Pa ', # 0x15
'Tie ', # 0x16
'Fu ', # 0x17
'Lian ', # 0x18
'Zhi ', # 0x19
'Zhou ', # 0x1a
'Bo ', # 0x1b
'Zhi ', # 0x1c
'Di ', # 0x1d
'Mo ', # 0x1e
'Yi ', # 0x1f
'Yi ', # 0x20
'Ping ', # 0x21
'Qia ', # 0x22
'Juan ', # 0x23
'Ru ', # 0x24
'Shuai ', # 0x25
'Dai ', # 0x26
'Zheng ', # 0x27
'Shui ', # 0x28
'Qiao ', # 0x29
'Zhen ', # 0x2a
'Shi ', # 0x2b
'Qun ', # 0x2c
'Xi ', # 0x2d
'Bang ', # 0x2e
'Dai ', # 0x2f
'Gui ', # 0x30
'Chou ', # 0x31
'Ping ', # 0x32
'Zhang ', # 0x33
'Sha ', # 0x34
'Wan ', # 0x35
'Dai ', # 0x36
'Wei ', # 0x37
'Chang ', # 0x38
'Sha ', # 0x39
'Qi ', # 0x3a
'Ze ', # 0x3b
'Guo ', # 0x3c
'Mao ', # 0x3d
'Du ', # 0x3e
'Hou ', # 0x3f
'Zheng ', # 0x40
'Xu ', # 0x41
'Mi ', # 0x42
'Wei ', # 0x43
'Wo ', # 0x44
'Fu ', # 0x45
'Yi ', # 0x46
'Bang ', # 0x47
'Ping ', # 0x48
'Tazuna ', # 0x49
'Gong ', # 0x4a
'Pan ', # 0x4b
'Huang ', # 0x4c
'Dao ', # 0x4d
'Mi ', # 0x4e
'Jia ', # 0x4f
'Teng ', # 0x50
'Hui ', # 0x51
'Zhong ', # 0x52
'Shan ', # 0x53
'Man ', # 0x54
'Mu ', # 0x55
'Biao ', # 0x56
'Guo ', # 0x57
'Ze ', # 0x58
'Mu ', # 0x59
'Bang ', # 0x5a
'Zhang ', # 0x5b
'Jiong ', # 0x5c
'Chan ', # 0x5d
'Fu ', # 0x5e
'Zhi ', # 0x5f
'Hu ', # 0x60
'Fan ', # 0x61
'Chuang ', # 0x62
'Bi ', # 0x63
'Hei ', # 0x64
'[?] ', # 0x65
'Mi ', # 0x66
'Qiao ', # 0x67
'Chan ', # 0x68
'Fen ', # 0x69
'Meng ', # 0x6a
'Bang ', # 0x6b
'Chou ', # 0x6c
'Mie ', # 0x6d
'Chu ', # 0x6e
'Jie ', # 0x6f
'Xian ', # 0x70
'Lan ', # 0x71
'Gan ', # 0x72
'Ping ', # 0x73
'Nian ', # 0x74
'Qian ', # 0x75
'Bing ', # 0x76
'Bing ', # 0x77
'Xing ', # 0x78
'Gan ', # 0x79
'Yao ', # 0x7a
'Huan ', # 0x7b
'You ', # 0x7c
'You ', # 0x7d
'Ji ', # 0x7e
'Yan ', # 0x7f
'Pi ', # 0x80
'Ting ', # 0x81
'Ze ', # 0x82
'Guang ', # 0x83
'Zhuang ', # 0x84
'Mo ', # 0x85
'Qing ', # 0x86
'Bi ', # 0x87
'Qin ', # 0x88
'Dun ', # 0x89
'Chuang ', # 0x8a
'Gui ', # 0x8b
'Ya ', # 0x8c
'Bai ', # 0x8d
'Jie ', # 0x8e
'Xu ', # 0x8f
'Lu ', # 0x90
'Wu ', # 0x91
'[?] ', # 0x92
'Ku ', # 0x93
'Ying ', # 0x94
'Di ', # 0x95
'Pao ', # 0x96
'Dian ', # 0x97
'Ya ', # 0x98
'Miao ', # 0x99
'Geng ', # 0x9a
'Ci ', # 0x9b
'Fu ', # 0x9c
'Tong ', # 0x9d
'Pang ', # 0x9e
'Fei ', # 0x9f
'Xiang ', # 0xa0
'Yi ', # 0xa1
'Zhi ', # 0xa2
'Tiao ', # 0xa3
'Zhi ', # 0xa4
'Xiu ', # 0xa5
'Du ', # 0xa6
'Zuo ', # 0xa7
'Xiao ', # 0xa8
'Tu ', # 0xa9
'Gui ', # 0xaa
'Ku ', # 0xab
'Pang ', # 0xac
'Ting ', # 0xad
'You ', # 0xae
'Bu ', # 0xaf
'Ding ', # 0xb0
'Cheng ', # 0xb1
'Lai ', # 0xb2
'Bei ', # 0xb3
'Ji ', # 0xb4
'An ', # 0xb5
'Shu ', # 0xb6
'Kang ', # 0xb7
'Yong ', # 0xb8
'Tuo ', # 0xb9
'Song ', # 0xba
'Shu ', # 0xbb
'Qing ', # 0xbc
'Yu ', # 0xbd
'Yu ', # 0xbe
'Miao ', # 0xbf
'Sou ', # 0xc0
'Ce ', # 0xc1
'Xiang ', # 0xc2
'Fei ', # 0xc3
'Jiu ', # 0xc4
'He ', # 0xc5
'Hui ', # 0xc6
'Liu ', # 0xc7
'Sha ', # 0xc8
'Lian ', # 0xc9
'Lang ', # 0xca
'Sou ', # 0xcb
'Jian ', # 0xcc
'Pou ', # 0xcd
'Qing ', # 0xce
'Jiu ', # 0xcf
'Jiu ', # 0xd0
'Qin ', # 0xd1
'Ao ', # 0xd2
'Kuo ', # 0xd3
'Lou ', # 0xd4
'Yin ', # 0xd5
'Liao ', # 0xd6
'Dai ', # 0xd7
'Lu ', # 0xd8
'Yi ', # 0xd9
'Chu ', # 0xda
'Chan ', # 0xdb
'Tu ', # 0xdc
'Si ', # 0xdd
'Xin ', # 0xde
'Miao ', # 0xdf
'Chang ', # 0xe0
'Wu ', # 0xe1
'Fei ', # 0xe2
'Guang ', # 0xe3
'Koc ', # 0xe4
'Kuai ', # 0xe5
'Bi ', # 0xe6
'Qiang ', # 0xe7
'Xie ', # 0xe8
'Lin ', # 0xe9
'Lin ', # 0xea
'Liao ', # 0xeb
'Lu ', # 0xec
'[?] ', # 0xed
'Ying ', # 0xee
'Xian ', # 0xef
'Ting ', # 0xf0
'Yong ', # 0xf1
'Li ', # 0xf2
'Ting ', # 0xf3
'Yin ', # 0xf4
'Xun ', # 0xf5
'Yan ', # 0xf6
'Ting ', # 0xf7
'Di ', # 0xf8
'Po ', # 0xf9
'Jian ', # 0xfa
'Hui ', # 0xfb
'Nai ', # 0xfc
'Hui ', # 0xfd
'Gong ', # 0xfe
'Nian ', # 0xff
)
|
nishad-jobsglobal/odoo-marriot
|
refs/heads/master
|
addons/mail/tests/common.py
|
56
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class TestMail(common.SavepointCase):
@classmethod
def _init_mock_build_email(cls):
cls._build_email_args_list = []
cls._build_email_kwargs_list = []
def setUp(self):
super(TestMail, self).setUp()
self._build_email_args_list[:] = []
self._build_email_kwargs_list[:] = []
@classmethod
def setUpClass(cls):
super(TestMail, cls).setUpClass()
cr, uid = cls.cr, cls.uid
def build_email(self, *args, **kwargs):
cls._build_email_args_list.append(args)
cls._build_email_kwargs_list.append(kwargs)
return build_email.origin(self, *args, **kwargs)
def send_email(self, cr, uid, message, *args, **kwargs):
return message['Message-Id']
def mail_group_message_get_recipient_values(self, cr, uid, id, notif_message=None, recipient_ids=None, context=None):
return self.pool['mail.thread'].message_get_recipient_values(cr, uid, id, notif_message=notif_message, recipient_ids=recipient_ids, context=context)
cls._init_mock_build_email()
cls.registry('ir.mail_server')._patch_method('build_email', build_email)
cls.registry('ir.mail_server')._patch_method('send_email', send_email)
cls.registry('mail.group')._patch_method('message_get_recipient_values', mail_group_message_get_recipient_values)
# Usefull models
cls.ir_model = cls.registry('ir.model')
cls.ir_model_data = cls.registry('ir.model.data')
cls.ir_attachment = cls.registry('ir.attachment')
cls.mail_alias = cls.registry('mail.alias')
cls.mail_thread = cls.registry('mail.thread')
cls.mail_group = cls.registry('mail.group')
cls.mail_mail = cls.registry('mail.mail')
cls.mail_message = cls.registry('mail.message')
cls.mail_notification = cls.registry('mail.notification')
cls.mail_followers = cls.registry('mail.followers')
cls.mail_message_subtype = cls.registry('mail.message.subtype')
cls.res_users = cls.registry('res.users')
cls.res_partner = cls.registry('res.partner')
# Find Employee group
cls.group_employee_id = cls.env.ref('base.group_user').id or False
# Partner Data
# User Data: employee, noone
cls.user_employee_id = cls.res_users.create(cr, uid, {
'name': 'Ernest Employee',
'login': 'ernest',
'alias_name': 'ernest',
'email': '[email protected]',
'signature': '--\nErnest',
'notify_email': 'always',
'groups_id': [(6, 0, [cls.group_employee_id])]
}, {'no_reset_password': True})
cls.user_noone_id = cls.res_users.create(cr, uid, {
'name': 'Noemie NoOne',
'login': 'noemie',
'alias_name': 'noemie',
'email': '[email protected]',
'signature': '--\nNoemie',
'notify_email': 'always',
'groups_id': [(6, 0, [])]
}, {'no_reset_password': True})
# Test users to use through the various tests
cls.res_users.write(cr, uid, uid, {'name': 'Administrator'})
cls.user_raoul_id = cls.res_users.create(cr, uid, {
'name': 'Raoul Grosbedon',
'signature': 'SignRaoul',
'email': '[email protected]',
'login': 'raoul',
'alias_name': 'raoul',
'groups_id': [(6, 0, [cls.group_employee_id])]
}, {'no_reset_password': True})
cls.user_bert_id = cls.res_users.create(cr, uid, {
'name': 'Bert Tartignole',
'signature': 'SignBert',
'email': '[email protected]',
'login': 'bert',
'alias_name': 'bert',
'groups_id': [(6, 0, [])]
}, {'no_reset_password': True})
cls.user_raoul = cls.res_users.browse(cr, uid, cls.user_raoul_id)
cls.user_bert = cls.res_users.browse(cr, uid, cls.user_bert_id)
cls.user_admin = cls.res_users.browse(cr, uid, uid)
cls.partner_admin_id = cls.user_admin.partner_id.id
cls.partner_raoul_id = cls.user_raoul.partner_id.id
cls.partner_bert_id = cls.user_bert.partner_id.id
# Test 'pigs' group to use through the various tests
cls.group_pigs_id = cls.mail_group.create(
cr, uid,
{'name': 'Pigs', 'description': 'Fans of Pigs, unite !', 'alias_name': 'group+pigs'},
{'mail_create_nolog': True}
)
cls.group_pigs = cls.mail_group.browse(cr, uid, cls.group_pigs_id)
# Test mail.group: public to provide access to everyone
cls.group_jobs_id = cls.mail_group.create(cr, uid, {'name': 'Jobs', 'public': 'public'})
# Test mail.group: private to restrict access
cls.group_priv_id = cls.mail_group.create(cr, uid, {'name': 'Private', 'public': 'private'})
@classmethod
def tearDownClass(cls):
# Remove mocks
cls.registry('ir.mail_server')._revert_method('build_email')
cls.registry('ir.mail_server')._revert_method('send_email')
cls.registry('mail.group')._revert_method('message_get_recipient_values')
super(TestMail, cls).tearDownClass()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.