repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
ubc/edx-platform | common/djangoapps/cors_csrf/migrations/0001_initial.py | 98 | 4870 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'XDomainProxyConfiguration'
db.create_table('cors_csrf_xdomainproxyconfiguration', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('change_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('changed_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.PROTECT)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=False)),
('whitelist', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('cors_csrf', ['XDomainProxyConfiguration'])
def backwards(self, orm):
# Deleting model 'XDomainProxyConfiguration'
db.delete_table('cors_csrf_xdomainproxyconfiguration')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cors_csrf.xdomainproxyconfiguration': {
'Meta': {'object_name': 'XDomainProxyConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'whitelist': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['cors_csrf']
| agpl-3.0 |
fw1121/BDA_py_demos | demos_ch3/demo3_2.py | 19 | 6319 | """Bayesian Data Analysis, 3rd ed
Chapter 3, demo 2
Visualise factored sampling and the corresponding marginal and conditional densities.
"""
from __future__ import division
import os
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# import from utilities
import os
util_path = '../utilities_and_data' # provide path to utilities
util_path = os.path.abspath(util_path)
if util_path not in os.sys.path and os.path.exists(util_path):
os.sys.path.insert(0, util_path)
import sinvchi2
# Edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
# data
y = np.array([93, 112, 122, 135, 122, 150, 118, 90, 124, 114])
# sufficient statistics
n = len(y)
s2 = np.var(y, ddof=1) # Here ddof=1 is used to get the sample estimate.
my = np.mean(y)
# Factorize the joint posterior p(mu,sigma2|y) to p(sigma2|y)p(mu|sigma2,y)
# Sample from the joint posterior using this factorization
# sample from p(sigma2|y)
nsamp = 1000
sigma2 = sinvchi2.rvs(n-1, s2, size=nsamp)
# sample from p(mu|sigma2,y)
mu = my + np.sqrt(sigma2/n)*np.random.randn(*sigma2.shape)
# display sigma instead of sigma2
sigma = np.sqrt(sigma2)
# For mu compute the density in these points
tl1 = [90, 150]
t1 = np.linspace(tl1[0], tl1[1], 1000)
# For sigma compute the density in these points
tl2 = [10, 60]
t2 = np.linspace(tl2[0], tl2[1], 1000)
# evaluate the joint density in grid
# note that the following is not normalized, but for plotting
# contours it does not matter
Z = stats.norm.pdf(t1, my, t2[:,np.newaxis]/np.sqrt(n))
Z *= (sinvchi2.pdf(t2**2, n-1, s2)*2*t2)[:,np.newaxis]
# compute the exact marginal density for sigma
# multiplication by 2*t2 is due to the transformation of variable
# z=t2^2, see BDA3 p. 21
pm_sigma = sinvchi2.pdf(t2**2, n-1, s2)*2*t2
# N.B. this was already calculated in the joint distribution case
# ====== Illustrate the sampling with interactive plot
# create figure
plotgrid = gridspec.GridSpec(1, 2, width_ratios=[3,2])
fig = plt.figure(figsize=(12,8))
# plot the joint distribution
ax0 = plt.subplot(plotgrid[0,0])
# plot the contour plot of the exact posterior (c_levels is used to give
# a vector of linearly spaced values at which levels contours are drawn)
c_levels = np.linspace(1e-5, Z.max(), 6)[:-1]
plt.contour(t1, t2, Z, c_levels, colors='blue')
# decorate
plt.xlim(tl1)
plt.ylim(tl2)
plt.xlabel('$\mu$', fontsize=20)
plt.ylabel('$\sigma$', fontsize=20)
plt.title('joint posterior')
plt.legend((plt.Line2D([], [], color='blue'),), ('exact contour plot',))
# plot the marginal of sigma
ax1 = plt.subplot(plotgrid[0,1])
plt.plot(pm_sigma, t2, 'b', linewidth=1.5)
# decorate
plt.ylim(tl2)
plt.title('marginal of $\sigma$')
plt.xticks(())
# Function for interactively updating the figure
def update_figure(event):
if icontainer.stage == 0:
icontainer.stage += 1
# first sample of sigma2
line, = ax0.plot(tl1, [sigma[0], sigma[0]], 'k--', linewidth=1.5)
icontainer.legend_h.append(line)
icontainer.legend_s.append('sample from the marginal of $\sigma$')
icontainer.prev_line1 = line
ax0.legend(icontainer.legend_h, icontainer.legend_s)
fig.canvas.draw()
elif icontainer.stage == 1:
icontainer.stage += 1
# the conditional distribution of mu given sigma2
line, = ax0.plot(
t1,
sigma[0] + stats.norm.pdf(t1, my, np.sqrt(sigma2[0]/n))*100,
'g--',
linewidth=1.5
)
icontainer.legend_h.append(line)
icontainer.legend_s.append('conditional distribution of $\mu$')
icontainer.prev_line2 = line
ax0.legend(icontainer.legend_h, icontainer.legend_s)
fig.canvas.draw()
elif icontainer.stage == 2:
icontainer.stage += 1
# sample mu given sigma2
scat = ax0.scatter(mu[0], sigma[0], 40, color='g')
icontainer.legend_h.append(scat)
icontainer.legend_s.append('sample from joint posterior')
icontainer.prev_scat = scat
ax0.legend(icontainer.legend_h, icontainer.legend_s)
fig.canvas.draw()
elif icontainer.stage == 3:
# remove the previous lines
ax0.lines.remove(icontainer.prev_line1)
ax0.lines.remove(icontainer.prev_line2)
# resize the last scatter sample
icontainer.prev_scat.get_sizes()[0] = 8
# draw next sample
icontainer.i1 += 1
i1 = icontainer.i1
# first sample of sigma2
icontainer.prev_line1, = ax0.plot(
tl1, [sigma[i1], sigma[i1]], 'k--', linewidth=1.5
)
# the conditional distribution of mu given sigma2
icontainer.prev_line2, = ax0.plot(
t1,
sigma[i1] + stats.norm.pdf(t1, my, np.sqrt(sigma2[i1]/n))*100,
'g--',
linewidth=1.5
)
# sample mu given sigma2
icontainer.prev_scat = ax0.scatter(mu[i1], sigma[i1], 40, color='g')
# check if the last sample
if icontainer.i1 == icontainer.ndraw-1:
icontainer.stage += 1
fig.canvas.draw()
elif icontainer.stage == 4:
icontainer.stage += 1
# remove the previous lines
ax0.lines.remove(icontainer.prev_line1)
ax0.lines.remove(icontainer.prev_line2)
# resize the last scatter sample
icontainer.prev_scat.get_sizes()[0] = 8
# remove the helper text
plt.suptitle('')
# remove the extra legend entries
icontainer.legend_h.pop(2)
icontainer.legend_h.pop(1)
icontainer.legend_s.pop(2)
icontainer.legend_s.pop(1)
ax0.legend(icontainer.legend_h, icontainer.legend_s)
# plot the remaining samples
icontainer.i1 += 1
i1 = icontainer.i1
ax0.scatter(mu[i1:], sigma[i1:], 8, color='g')
fig.canvas.draw()
# Store the information of the current stage of the figure
class icontainer(object):
stage = 0
i1 = 0
legend_h = [plt.Line2D([], [], color='blue'),]
legend_s = ['exact contour plot',]
prev_line1 = None
prev_line2 = None
prev_scat = None
ndraw = 6
plt.suptitle('Press any key to continue', fontsize=20)
fig.canvas.mpl_connect('key_press_event', update_figure)
plt.show()
| gpl-3.0 |
jrversteegh/softsailor | deps/numpy-1.6.1/numpy/f2py/cb_rules.py | 36 | 20543 | #!/usr/bin/env python
"""
Build call-back mechanism for f2py2e.
Copyright 2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/07/20 11:27:58 $
Pearu Peterson
"""
__version__ = "$Revision: 1.53 $"[10:-1]
import __version__
f2py_version = __version__.version
import pprint
import sys
import types
errmess=sys.stderr.write
outmess=sys.stdout.write
show=pprint.pprint
from auxfuncs import *
import cfuncs
################## Rules for callback function ##############
cb_routine_rules={
'cbtypedefs':'typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);',
'body':"""
#begintitle#
PyObject *#name#_capi = NULL;/*was Py_None*/
PyTupleObject *#name#_args_capi = NULL;
int #name#_nofargs = 0;
jmp_buf #name#_jmpbuf;
/*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/
#static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) {
\tPyTupleObject *capi_arglist = #name#_args_capi;
\tPyObject *capi_return = NULL;
\tPyObject *capi_tmp = NULL;
\tint capi_j,capi_i = 0;
\tint capi_longjmp_ok = 1;
#decl#
#ifdef F2PY_REPORT_ATEXIT
f2py_cb_start_clock();
#endif
\tCFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\");
\tCFUNCSMESSPY(\"cb:#name#_capi=\",#name#_capi);
\tif (#name#_capi==NULL) {
\t\tcapi_longjmp_ok = 0;
\t\t#name#_capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\");
\t}
\tif (#name#_capi==NULL) {
\t\tPyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\");
\t\tgoto capi_fail;
\t}
\tif (F2PyCapsule_Check(#name#_capi)) {
\t#name#_typedef #name#_cptr;
\t#name#_cptr = F2PyCapsule_AsVoidPtr(#name#_capi);
\t#returncptr#(*#name#_cptr)(#optargs_nm##args_nm##strarglens_nm#);
\t#return#
\t}
\tif (capi_arglist==NULL) {
\t\tcapi_longjmp_ok = 0;
\t\tcapi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\");
\t\tif (capi_tmp) {
\t\t\tcapi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp);
\t\t\tif (capi_arglist==NULL) {
\t\t\t\tPyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\");
\t\t\t\tgoto capi_fail;
\t\t\t}
\t\t} else {
\t\t\tPyErr_Clear();
\t\t\tcapi_arglist = (PyTupleObject *)Py_BuildValue(\"()\");
\t\t}
\t}
\tif (capi_arglist == NULL) {
\t\tPyErr_SetString(#modulename#_error,\"Callback #argname# argument list is not set.\\n\");
\t\tgoto capi_fail;
\t}
#setdims#
#pyobjfrom#
\tCFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist);
\tCFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\");
#ifdef F2PY_REPORT_ATEXIT
f2py_cb_start_call_clock();
#endif
\tcapi_return = PyObject_CallObject(#name#_capi,(PyObject *)capi_arglist);
#ifdef F2PY_REPORT_ATEXIT
f2py_cb_stop_call_clock();
#endif
\tCFUNCSMESSPY(\"cb:capi_return=\",capi_return);
\tif (capi_return == NULL) {
\t\tfprintf(stderr,\"capi_return is NULL\\n\");
\t\tgoto capi_fail;
\t}
\tif (capi_return == Py_None) {
\t\tPy_DECREF(capi_return);
\t\tcapi_return = Py_BuildValue(\"()\");
\t}
\telse if (!PyTuple_Check(capi_return)) {
\t\tcapi_return = Py_BuildValue(\"(N)\",capi_return);
\t}
\tcapi_j = PyTuple_Size(capi_return);
\tcapi_i = 0;
#frompyobj#
\tCFUNCSMESS(\"cb:#name#:successful\\n\");
\tPy_DECREF(capi_return);
#ifdef F2PY_REPORT_ATEXIT
f2py_cb_stop_clock();
#endif
\tgoto capi_return_pt;
capi_fail:
\tfprintf(stderr,\"Call-back #name# failed.\\n\");
\tPy_XDECREF(capi_return);
\tif (capi_longjmp_ok)
\t\tlongjmp(#name#_jmpbuf,-1);
capi_return_pt:
\t;
#return#
}
#endtitle#
""",
'need':['setjmp.h','CFUNCSMESS'],
'maxnofargs':'#maxnofargs#',
'nofoptargs':'#nofoptargs#',
'docstr':"""\
\tdef #argname#(#docsignature#): return #docreturn#\\n\\
#docstrsigns#""",
'latexdocstr':"""
{{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}}
#routnote#
#latexdocstrsigns#""",
'docstrshort':'def #argname#(#docsignature#): return #docreturn#'
}
cb_rout_rules=[
{# Init
'separatorsfor':{'decl':'\n',
'args':',','optargs':'','pyobjfrom':'\n','freemem':'\n',
'args_td':',','optargs_td':'',
'args_nm':',','optargs_nm':'',
'frompyobj':'\n','setdims':'\n',
'docstrsigns':'\\n"\n"',
'latexdocstrsigns':'\n',
'latexdocstrreq':'\n','latexdocstropt':'\n',
'latexdocstrout':'\n','latexdocstrcbs':'\n',
},
'decl':'/*decl*/','pyobjfrom':'/*pyobjfrom*/','frompyobj':'/*frompyobj*/',
'args':[],'optargs':'','return':'','strarglens':'','freemem':'/*freemem*/',
'args_td':[],'optargs_td':'','strarglens_td':'',
'args_nm':[],'optargs_nm':'','strarglens_nm':'',
'noargs':'',
'setdims':'/*setdims*/',
'docstrsigns':'','latexdocstrsigns':'',
'docstrreq':'\tRequired arguments:',
'docstropt':'\tOptional arguments:',
'docstrout':'\tReturn objects:',
'docstrcbs':'\tCall-back functions:',
'docreturn':'','docsign':'','docsignopt':'',
'latexdocstrreq':'\\noindent Required arguments:',
'latexdocstropt':'\\noindent Optional arguments:',
'latexdocstrout':'\\noindent Return objects:',
'latexdocstrcbs':'\\noindent Call-back functions:',
'routnote':{hasnote:'--- #note#',l_not(hasnote):''},
},{ # Function
'decl':'\t#ctype# return_value;',
'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->");'},
'\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n");',
{debugcapi:'\tfprintf(stderr,"#showvalueformat#.\\n",return_value);'}
],
'need':['#ctype#_from_pyobj',{debugcapi:'CFUNCSMESS'},'GETSCALARFROMPYTUPLE'],
'return':'\treturn return_value;',
'_check':l_and(isfunction,l_not(isstringfunction),l_not(iscomplexfunction))
},
{# String function
'pyobjfrom':{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#name#:%d:\\n",return_value_len);'},
'args':'#ctype# return_value,int return_value_len',
'args_nm':'return_value,&return_value_len',
'args_td':'#ctype# ,int',
'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->\\"");'},
"""\tif (capi_j>capi_i)
\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,return_value,return_value_len);""",
{debugcapi:'\tfprintf(stderr,"#showvalueformat#\\".\\n",return_value);'}
],
'need':['#ctype#_from_pyobj',{debugcapi:'CFUNCSMESS'},
'string.h','GETSTRFROMPYTUPLE'],
'return':'return;',
'_check':isstringfunction
},
{# Complex function
'optargs':"""
#ifndef F2PY_CB_RETURNCOMPLEX
#ctype# *return_value
#endif
""",
'optargs_nm':"""
#ifndef F2PY_CB_RETURNCOMPLEX
return_value
#endif
""",
'optargs_td':"""
#ifndef F2PY_CB_RETURNCOMPLEX
#ctype# *
#endif
""",
'decl':"""
#ifdef F2PY_CB_RETURNCOMPLEX
\t#ctype# return_value;
#endif
""",
'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting return_value->");'},
"""\
\tif (capi_j>capi_i)
#ifdef F2PY_CB_RETURNCOMPLEX
\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\");
#else
\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,return_value,#ctype#,\"#ctype#_from_pyobj failed in converting return_value of call-back function #name# to C #ctype#\\n\");
#endif
""",
{debugcapi:"""
#ifdef F2PY_CB_RETURNCOMPLEX
\tfprintf(stderr,\"#showvalueformat#.\\n\",(return_value).r,(return_value).i);
#else
\tfprintf(stderr,\"#showvalueformat#.\\n\",(*return_value).r,(*return_value).i);
#endif
"""}
],
'return':"""
#ifdef F2PY_CB_RETURNCOMPLEX
\treturn return_value;
#else
\treturn;
#endif
""",
'need':['#ctype#_from_pyobj',{debugcapi:'CFUNCSMESS'},
'string.h','GETSCALARFROMPYTUPLE','#ctype#'],
'_check':iscomplexfunction
},
{'docstrout':'\t\t#pydocsignout#',
'latexdocstrout':['\\item[]{{}\\verb@#pydocsignout#@{}}',
{hasnote:'--- #note#'}],
'docreturn':'#rname#,',
'_check':isfunction},
{'_check':issubroutine,'return':'return;'}
]
cb_arg_rules=[
{ # Doc
'docstropt':{l_and(isoptional,isintent_nothide):'\t\t#pydocsign#'},
'docstrreq':{l_and(isrequired,isintent_nothide):'\t\t#pydocsign#'},
'docstrout':{isintent_out:'\t\t#pydocsignout#'},
'latexdocstropt':{l_and(isoptional,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote:'--- #note#'}]},
'latexdocstrreq':{l_and(isrequired,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote:'--- #note#'}]},
'latexdocstrout':{isintent_out:['\\item[]{{}\\verb@#pydocsignout#@{}}',
{l_and(hasnote,isintent_hide):'--- #note#',
l_and(hasnote,isintent_nothide):'--- See above.'}]},
'docsign':{l_and(isrequired,isintent_nothide):'#varname#,'},
'docsignopt':{l_and(isoptional,isintent_nothide):'#varname#,'},
'depend':''
},
{
'args':{
l_and (isscalar,isintent_c):'#ctype# #varname_i#',
l_and (isscalar,l_not(isintent_c)):'#ctype# *#varname_i#_cb_capi',
isarray:'#ctype# *#varname_i#',
isstring:'#ctype# #varname_i#'
},
'args_nm':{
l_and (isscalar,isintent_c):'#varname_i#',
l_and (isscalar,l_not(isintent_c)):'#varname_i#_cb_capi',
isarray:'#varname_i#',
isstring:'#varname_i#'
},
'args_td':{
l_and (isscalar,isintent_c):'#ctype#',
l_and (isscalar,l_not(isintent_c)):'#ctype# *',
isarray:'#ctype# *',
isstring:'#ctype#'
},
'strarglens':{isstring:',int #varname_i#_cb_len'}, # untested with multiple args
'strarglens_td':{isstring:',int'}, # untested with multiple args
'strarglens_nm':{isstring:',#varname_i#_cb_len'}, # untested with multiple args
},
{ # Scalars
'decl':{l_not(isintent_c):'\t#ctype# #varname_i#=(*#varname_i#_cb_capi);'},
'error': {l_and(isintent_c,isintent_out,
throw_error('intent(c,out) is forbidden for callback scalar arguments')):\
''},
'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->");'},
{isintent_out:'\tif (capi_j>capi_i)\n\t\tGETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname_i#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'},
{l_and(debugcapi,l_and(l_not(iscomplex),isintent_c)):'\tfprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'},
{l_and(debugcapi,l_and(l_not(iscomplex),l_not(isintent_c))):'\tfprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'},
{l_and(debugcapi,l_and(iscomplex,isintent_c)):'\tfprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'},
{l_and(debugcapi,l_and(iscomplex,l_not(isintent_c))):'\tfprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'},
],
'need':[{isintent_out:['#ctype#_from_pyobj','GETSCALARFROMPYTUPLE']},
{debugcapi:'CFUNCSMESS'}],
'_check':isscalar
},{
'pyobjfrom':[{isintent_in:"""\
\tif (#name#_nofargs>capi_i)
\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_#ctype#1(#varname_i#)))
\t\t\tgoto capi_fail;"""},
{isintent_inout:"""\
\tif (#name#_nofargs>capi_i)
\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyarr_from_p_#ctype#1(#varname_i#_cb_capi)))
\t\t\tgoto capi_fail;"""}],
'need':[{isintent_in:'pyobj_from_#ctype#1'},
{isintent_inout:'pyarr_from_p_#ctype#1'},
{iscomplex:'#ctype#'}],
'_check':l_and(isscalar,isintent_nothide),
'_optional':''
},{# String
'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->\\"");'},
"""\tif (capi_j>capi_i)
\t\tGETSTRFROMPYTUPLE(capi_return,capi_i++,#varname_i#,#varname_i#_cb_len);""",
{debugcapi:'\tfprintf(stderr,"#showvalueformat#\\":%d:.\\n",#varname_i#,#varname_i#_cb_len);'},
],
'need':['#ctype#','GETSTRFROMPYTUPLE',
{debugcapi:'CFUNCSMESS'},'string.h'],
'_check':l_and(isstring,isintent_out)
},{
'pyobjfrom':[{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#varname#=\\"#showvalueformat#\\":%d:\\n",#varname_i#,#varname_i#_cb_len);'},
{isintent_in:"""\
\tif (#name#_nofargs>capi_i)
\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_#ctype#1size(#varname_i#,#varname_i#_cb_len)))
\t\t\tgoto capi_fail;"""},
{isintent_inout:"""\
\tif (#name#_nofargs>capi_i) {
\t\tint #varname_i#_cb_dims[] = {#varname_i#_cb_len};
\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyarr_from_p_#ctype#1(#varname_i#,#varname_i#_cb_dims)))
\t\t\tgoto capi_fail;
\t}"""}],
'need':[{isintent_in:'pyobj_from_#ctype#1size'},
{isintent_inout:'pyarr_from_p_#ctype#1'}],
'_check':l_and(isstring,isintent_nothide),
'_optional':''
},
# Array ...
{
'decl':'\tnpy_intp #varname_i#_Dims[#rank#] = {#rank*[-1]#};',
'setdims':'\t#cbsetdims#;',
'_check':isarray,
'_depend':''
},
{
'pyobjfrom':[{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#varname#\\n");'},
{isintent_c:"""\
\tif (#name#_nofargs>capi_i) {
\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_CARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */
""",
l_not(isintent_c):"""\
\tif (#name#_nofargs>capi_i) {
\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_FARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */
""",
},
"""
\t\tif (tmp_arr==NULL)
\t\t\tgoto capi_fail;
\t\tif (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,(PyObject *)tmp_arr))
\t\t\tgoto capi_fail;
}"""],
'_check':l_and(isarray,isintent_nothide,l_or(isintent_in,isintent_inout)),
'_optional':'',
},{
'frompyobj':[{debugcapi:'\tCFUNCSMESS("cb:Getting #varname#->");'},
"""\tif (capi_j>capi_i) {
\t\tPyArrayObject *rv_cb_arr = NULL;
\t\tif ((capi_tmp = PyTuple_GetItem(capi_return,capi_i++))==NULL) goto capi_fail;
\t\trv_cb_arr = array_from_pyobj(#atype#,#varname_i#_Dims,#rank#,F2PY_INTENT_IN""",
{isintent_c:'|F2PY_INTENT_C'},
""",capi_tmp);
\t\tif (rv_cb_arr == NULL) {
\t\t\tfprintf(stderr,\"rv_cb_arr is NULL\\n\");
\t\t\tgoto capi_fail;
\t\t}
\t\tMEMCOPY(#varname_i#,rv_cb_arr->data,PyArray_NBYTES(rv_cb_arr));
\t\tif (capi_tmp != (PyObject *)rv_cb_arr) {
\t\t\tPy_DECREF(rv_cb_arr);
\t\t}
\t}""",
{debugcapi:'\tfprintf(stderr,"<-.\\n");'},
],
'need':['MEMCOPY',{iscomplexarray:'#ctype#'}],
'_check':l_and(isarray,isintent_out)
},{
'docreturn':'#varname#,',
'_check':isintent_out
}
]
################## Build call-back module #############
cb_map={}
def buildcallbacks(m):
global cb_map
cb_map[m['name']]=[]
for bi in m['body']:
if bi['block']=='interface':
for b in bi['body']:
if b:
buildcallback(b,m['name'])
else:
errmess('warning: empty body for %s\n' % (m['name']))
def buildcallback(rout,um):
global cb_map
import capi_maps
outmess('\tConstructing call-back function "cb_%s_in_%s"\n'%(rout['name'],um))
args,depargs=getargs(rout)
capi_maps.depargs=depargs
var=rout['vars']
vrd=capi_maps.cb_routsign2map(rout,um)
rd=dictappend({},vrd)
cb_map[um].append([rout['name'],rd['name']])
for r in cb_rout_rules:
if ('_check' in r and r['_check'](rout)) or ('_check' not in r):
ar=applyrules(r,vrd,rout)
rd=dictappend(rd,ar)
savevrd={}
for i,a in enumerate(args):
vrd=capi_maps.cb_sign2map(a,var[a], index=i)
savevrd[a]=vrd
for r in cb_arg_rules:
if '_depend' in r:
continue
if '_optional' in r and isoptional(var[a]):
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar=applyrules(r,vrd,var[a])
rd=dictappend(rd,ar)
if '_break' in r:
break
for a in args:
vrd=savevrd[a]
for r in cb_arg_rules:
if '_depend' in r:
continue
if ('_optional' not in r) or ('_optional' in r and isrequired(var[a])):
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar=applyrules(r,vrd,var[a])
rd=dictappend(rd,ar)
if '_break' in r:
break
for a in depargs:
vrd=savevrd[a]
for r in cb_arg_rules:
if '_depend' not in r:
continue
if '_optional' in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar=applyrules(r,vrd,var[a])
rd=dictappend(rd,ar)
if '_break' in r:
break
if 'args' in rd and 'optargs' in rd:
if type(rd['optargs'])==type([]):
rd['optargs']=rd['optargs']+["""
#ifndef F2PY_CB_RETURNCOMPLEX
,
#endif
"""]
rd['optargs_nm']=rd['optargs_nm']+["""
#ifndef F2PY_CB_RETURNCOMPLEX
,
#endif
"""]
rd['optargs_td']=rd['optargs_td']+["""
#ifndef F2PY_CB_RETURNCOMPLEX
,
#endif
"""]
if type(rd['docreturn'])==types.ListType:
rd['docreturn']=stripcomma(replace('#docreturn#',{'docreturn':rd['docreturn']}))
optargs=stripcomma(replace('#docsignopt#',
{'docsignopt':rd['docsignopt']}
))
if optargs=='':
rd['docsignature']=stripcomma(replace('#docsign#',{'docsign':rd['docsign']}))
else:
rd['docsignature']=replace('#docsign#[#docsignopt#]',
{'docsign':rd['docsign'],
'docsignopt':optargs,
})
rd['latexdocsignature']=rd['docsignature'].replace('_','\\_')
rd['latexdocsignature']=rd['latexdocsignature'].replace(',',', ')
rd['docstrsigns']=[]
rd['latexdocstrsigns']=[]
for k in ['docstrreq','docstropt','docstrout','docstrcbs']:
if k in rd and type(rd[k])==types.ListType:
rd['docstrsigns']=rd['docstrsigns']+rd[k]
k='latex'+k
if k in rd and type(rd[k])==types.ListType:
rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\
['\\begin{description}']+rd[k][1:]+\
['\\end{description}']
if 'args' not in rd:
rd['args']=''
rd['args_td']=''
rd['args_nm']=''
if not (rd.get('args') or rd.get('optargs') or rd.get('strarglens')):
rd['noargs'] = 'void'
ar=applyrules(cb_routine_rules,rd)
cfuncs.callbacks[rd['name']]=ar['body']
if type(ar['need'])==str:
ar['need']=[ar['need']]
if 'need' in rd:
for t in cfuncs.typedefs.keys():
if t in rd['need']:
ar['need'].append(t)
cfuncs.typedefs_generated[rd['name']+'_typedef'] = ar['cbtypedefs']
ar['need'].append(rd['name']+'_typedef')
cfuncs.needs[rd['name']]=ar['need']
capi_maps.lcb2_map[rd['name']]={'maxnofargs':ar['maxnofargs'],
'nofoptargs':ar['nofoptargs'],
'docstr':ar['docstr'],
'latexdocstr':ar['latexdocstr'],
'argname':rd['argname']
}
outmess('\t %s\n'%(ar['docstrshort']))
#print ar['body']
return
################## Build call-back function #############
| gpl-3.0 |
fretscha/pfa | config/settings/local.py | 1 | 2385 | # -*- coding: utf-8 -*-
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATE_DEBUG = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='CHANGEME!!!')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
# Session Security inactivity
# ------------------------------------------------------------------------------
SESSION_EXPIRE_AT_BROWSER_CLOSE = env.bool("DJANGO_SESSION_EXPIRE_AT_BROWSER_CLOSE", default=True)
SESSION_SECURITY_WARN_AFTER = env.int("DJANGO_SESSION_SECURITY_WARN_AFTER", default = 120)
SESSION_SECURITY_EXPIRE_AFTER = env.int("DJANGO_SESSION_SECURITY_EXPIRE_AFTER", default = 180)
#SESSION_SECURITY_PASSIVE_URLS = env.int("DJANGO_SESSION_SECURITY_PASSIVE_URLS", default = "")
| bsd-3-clause |
ihatevim/spotbot | plugins/media.py | 1 | 8305 | # IMDb lookup plugin by Ghetto Wizard (2011).
from util import hook, http
import re
import datetime
from urllib2 import URLError
from zipfile import ZipFile
from cStringIO import StringIO
from lxml import etree
from util import hook, http, web
base_url = "http://thetvdb.com/api/"
api_key = "469B73127CA0C411"
# http://thetvdb.com/api/GetSeries.php?seriesname=clannad
def get_zipped_xml(*args, **kwargs):
try:
path = kwargs.pop("path")
except KeyError:
raise KeyError("must specify a path for the zipped file to be read")
zip_buffer = StringIO(http.get(*args, **kwargs))
return etree.parse(ZipFile(zip_buffer, "r").open(path))
def get_series_info(seriesname):
res = {"error": None, "ended": False, "episodes": None, "name": None}
# http://thetvdb.com/wiki/index.php/API:GetSeries
try:
query = http.get_xml(base_url + 'GetSeries.php', seriesname=seriesname)
except URLError:
res["error"] = "error contacting thetvdb.com"
return res
series_id = ""
try: series_id = query.xpath('//id/text()')
except: print "Failed"
if not series_id:
result = "\x02Could not find show:\x02 %s" % seriesname
else:
series_name = query.xpath('//SeriesName/text()')[0]
overview = query.xpath('//Overview/text()')[0]
firstaired = query.xpath('//FirstAired/text()')[0]
#imdb_id = query.xpath('//IMDB_ID/text()')[0]
#imdb_url = web.isgd("http://www.imdb.com/title/%s" % imdb_id)
tvdb_url = web.isgd("http://thetvdb.com/?tab=series&id=%s" % series_id[0])
status = tv_next(seriesname)
result = '\x02%s\x02 (%s) \x02-\x02 \x02%s\x02 - [%s] - %s' % (series_name, firstaired, status, tvdb_url, overview)
return result
def get_episodes_for_series(seriesname):
res = {"error": None, "ended": False, "episodes": None, "name": None}
# http://thetvdb.com/wiki/index.php/API:GetSeries
try:
query = http.get_xml(base_url + 'GetSeries.php', seriesname=seriesname)
except URLError:
res["error"] = "error contacting thetvdb.com"
return res
series_id = query.xpath('//seriesid/text()')
if not series_id:
res["error"] = "unknown tv series (using www.thetvdb.com)"
return res
series_id = series_id[0]
try:
series = get_zipped_xml(base_url + '%s/series/%s/all/en.zip' %
(api_key, series_id), path="en.xml")
except URLError:
res["error"] = "error contacting thetvdb.com"
return res
series_name = series.xpath('//SeriesName/text()')[0]
if series.xpath('//Status/text()')[0] == 'Ended':
res["ended"] = True
res["episodes"] = series.xpath('//Episode')
res["name"] = series_name
return res
def get_episode_info(episode):
first_aired = episode.findtext("FirstAired")
try:
airdate = datetime.date(*map(int, first_aired.split('-')))
except (ValueError, TypeError):
return None
episode_num = "S%02dE%02d" % (int(episode.findtext("SeasonNumber")),
int(episode.findtext("EpisodeNumber")))
episode_name = episode.findtext("EpisodeName")
# in the event of an unannounced episode title, users either leave the
# field out (None) or fill it with TBA
if episode_name == "TBA":
episode_name = None
episode_desc = '%s' % episode_num
if episode_name:
episode_desc += ' - %s' % episode_name
return (first_aired, airdate, episode_desc)
@hook.command
@hook.command('show')
@hook.command('series')
def tv(inp):
".tv <series> -- get info for the <series>"
return get_series_info(inp)
@hook.command('next')
@hook.command
def tv_next(inp):
".tv_next <series> -- get the next episode of <series>"
episodes = get_episodes_for_series(inp)
if episodes["error"]:
return episodes["error"]
series_name = episodes["name"]
ended = episodes["ended"]
episodes = episodes["episodes"]
if ended:
return "%s has ended." % series_name
next_eps = []
today = datetime.date.today()
for episode in reversed(episodes):
ep_info = get_episode_info(episode)
if ep_info is None:
continue
(first_aired, airdate, episode_desc) = ep_info
if airdate > today:
next_eps = ['%s (%s)' % (first_aired, episode_desc)]
elif airdate == today:
next_eps = ['Today (%s)' % episode_desc] + next_eps
else:
#we're iterating in reverse order with newest episodes last
#so, as soon as we're past today, break out of loop
break
if not next_eps:
return "No new episodes scheduled for %s" % series_name
if len(next_eps) == 1:
#return "The next episode of %s airs %s" % (series_name, next_eps[0])
return "Next episode: %s" % (next_eps[0])
else:
next_eps = ', '.join(next_eps)
return "Airs: %s" % (next_eps)
@hook.command
@hook.command('tv_prev')
@hook.command('prev')
@hook.command('last')
def tv_last(inp):
".tv_last <series> -- gets the most recently aired episode of <series>"
episodes = get_episodes_for_series(inp)
if episodes["error"]:
return episodes["error"]
series_name = episodes["name"]
ended = episodes["ended"]
episodes = episodes["episodes"]
prev_ep = None
today = datetime.date.today()
for episode in reversed(episodes):
ep_info = get_episode_info(episode)
if ep_info is None:
continue
(first_aired, airdate, episode_desc) = ep_info
if airdate < today:
#iterating in reverse order, so the first episode encountered
#before today was the most recently aired
prev_ep = '%s (%s)' % (first_aired, episode_desc)
break
if not prev_ep:
return "There are no previously aired episodes for %s" % series_name
if ended:
return '%s ended. The last episode aired %s' % (series_name, prev_ep)
return "The last episode of %s aired %s" % (series_name, prev_ep)
id_re = re.compile("tt\d+")
@hook.command('movie')
@hook.command
def imdb(inp):
"imdb <movie> -- Gets information about <movie> from IMDb."
strip = inp.strip()
if id_re.match(strip):
content = http.get_json("http://www.omdbapi.com/", i=strip)
else:
content = http.get_json("http://www.omdbapi.com/", t=strip)
if content.get('Error', None) == 'Movie not found!':
return 'Movie not found!'
elif content['Response'] == 'True':
content['URL'] = 'http://www.imdb.com/title/%(imdbID)s' % content
out = '\x02%(Title)s\x02 (%(Year)s) (%(Genre)s): %(Plot)s'
if content['Runtime'] != 'N/A':
out += ' \x02%(Runtime)s\x02.'
if content['imdbRating'] != 'N/A' and content['imdbVotes'] != 'N/A':
out += ' \x02%(imdbRating)s/10\x02 with \x02%(imdbVotes)s\x02' \
' votes.'
out += ' %(URL)s'
return out % content
else:
return 'Unknown error.'
api_root = 'http://api.rottentomatoes.com/api/public/v1.0/'
movie_search_url = api_root + 'movies.json'
movie_reviews_url = api_root + 'movies/%s/reviews.json'
@hook.command('rt')
@hook.command
def rottentomatoes(inp,bot=None):
'.rt <title> -- gets ratings for <title> from Rotten Tomatoes'
api_key = bot.config.get("api_keys", {}).get("rottentomatoes", None)
if not api_key:
return "error: no api key set"
results = http.get_json(movie_search_url, q=inp, apikey=api_key)
if results['total'] == 0:
return 'no results'
movie = results['movies'][0]
title = movie['title']
id = movie['id']
critics_score = movie['ratings']['critics_score']
audience_score = movie['ratings']['audience_score']
url = movie['links']['alternate']
if critics_score == -1:
return
reviews = http.get_json(movie_reviews_url % id, apikey=api_key, review_type='all')
review_count = reviews['total']
fresh = critics_score * review_count / 100
rotten = review_count - fresh
return u"%s - critics: \x02%d%%\x02 (%d\u2191/%d\u2193) audience: \x02%d%%\x02 - %s" % (title, critics_score, fresh, rotten, audience_score, url)
| gpl-3.0 |
pombredanne/mitmproxy | test/test_script.py | 24 | 3236 | import os
import time
import mock
from libmproxy import script, flow
import tutils
def test_simple():
s = flow.State()
fm = flow.FlowMaster(None, s)
sp = tutils.test_data.path("scripts/a.py")
p = script.Script("%s --var 40" % sp, fm)
assert "here" in p.ns
assert p.run("here") == 41
assert p.run("here") == 42
tutils.raises(script.ScriptError, p.run, "errargs")
# Check reload
p.load()
assert p.run("here") == 41
def test_duplicate_flow():
s = flow.State()
fm = flow.FlowMaster(None, s)
fm.load_script(tutils.test_data.path("scripts/duplicate_flow.py"))
f = tutils.tflow()
fm.handle_request(f)
assert fm.state.flow_count() == 2
assert not fm.state.view[0].request.is_replay
assert fm.state.view[1].request.is_replay
def test_err():
s = flow.State()
fm = flow.FlowMaster(None, s)
tutils.raises(
"not found",
script.Script, "nonexistent", fm
)
tutils.raises(
"not a file",
script.Script, tutils.test_data.path("scripts"), fm
)
tutils.raises(
script.ScriptError,
script.Script, tutils.test_data.path("scripts/syntaxerr.py"), fm
)
tutils.raises(
script.ScriptError,
script.Script, tutils.test_data.path("scripts/loaderr.py"), fm
)
scr = script.Script(tutils.test_data.path("scripts/unloaderr.py"), fm)
tutils.raises(script.ScriptError, scr.unload)
def test_concurrent():
s = flow.State()
fm = flow.FlowMaster(None, s)
fm.load_script(tutils.test_data.path("scripts/concurrent_decorator.py"))
with mock.patch("libmproxy.controller.DummyReply.__call__") as m:
f1, f2 = tutils.tflow(), tutils.tflow()
t_start = time.time()
fm.handle_request(f1)
f1.reply()
fm.handle_request(f2)
f2.reply()
# Two instantiations
assert m.call_count == 0 # No calls yet.
assert (time.time() - t_start) < 0.09
def test_concurrent2():
s = flow.State()
fm = flow.FlowMaster(None, s)
s = script.Script(
tutils.test_data.path("scripts/concurrent_decorator.py"),
fm)
s.load()
m = mock.Mock()
class Dummy:
def __init__(self):
self.response = self
self.error = self
self.reply = m
t_start = time.time()
for hook in ("clientconnect",
"serverconnect",
"response",
"error",
"clientconnect"):
d = Dummy()
s.run(hook, d)
d.reply()
while (time.time() - t_start) < 20 and m.call_count <= 5:
if m.call_count == 5:
return
time.sleep(0.001)
assert False
def test_concurrent_err():
s = flow.State()
fm = flow.FlowMaster(None, s)
tutils.raises(
"Concurrent decorator not supported for 'start' method",
script.Script,
tutils.test_data.path("scripts/concurrent_decorator_err.py"),
fm)
def test_command_parsing():
s = flow.State()
fm = flow.FlowMaster(None, s)
absfilepath = os.path.normcase(tutils.test_data.path("scripts/a.py"))
s = script.Script(absfilepath, fm)
assert os.path.isfile(s.args[0])
| mit |
cosmo-ethz/CosmoHammer | Tests/test_SampleFileUtil.py | 1 | 2747 | """
Test the TestSampleFileUtil module.
Execute with py.test -v
"""
from __future__ import print_function, division, absolute_import, unicode_literals
import tempfile
import os
import numpy
import cosmoHammer.Constants as c
from cosmoHammer.util.SampleFileUtil import SampleFileUtil
class TestSampleFileUtil(object):
prefix = "test"
def createFileUtil(self):
tempPath = tempfile.mkdtemp()
tempPath = os.path.join(tempPath, self.prefix)
fileUtil = SampleFileUtil(tempPath, True)
return fileUtil, tempPath
def test_not_master(self):
tempPath = tempfile.mkdtemp()
SampleFileUtil(tempPath, False)
fileList = os.listdir(tempPath)
assert len(fileList) == 0
def test_persistBurninValues(self):
fileUtil, tempPath = self.createFileUtil()
pos = numpy.ones((10,5))
prob = numpy.zeros(10)
fileUtil.persistBurninValues(pos, prob, None)
cPos = numpy.loadtxt(tempPath + c.BURNIN_SUFFIX)
cProb = numpy.loadtxt(tempPath + c.BURNIN_PROB_SUFFIX)
assert (pos == cPos).all()
assert (prob == cProb).all()
def test_persistSamplingValues(self):
fileUtil, tempPath = self.createFileUtil()
pos = numpy.ones((10,5))
prob = numpy.zeros(10)
fileUtil.persistSamplingValues(pos, prob, None)
cPos = numpy.loadtxt(tempPath + c.FILE_SUFFIX)
cProb = numpy.loadtxt(tempPath + c.PROB_SUFFIX)
assert (pos == cPos).all()
assert (prob == cProb).all()
def test_importFromFile(self):
fileUtil, tempPath = self.createFileUtil()
pos = numpy.ones((10,5))
prob = numpy.zeros(10)
fileUtil.persistSamplingValues(pos, prob, None)
cPos = fileUtil.importFromFile(tempPath + c.FILE_SUFFIX)
cProb = fileUtil.importFromFile(tempPath + c.PROB_SUFFIX)
assert (pos == cPos).all()
assert (prob == cProb).all()
def test_storeRandomState(self):
fileUtil, tempPath = self.createFileUtil()
rstate = numpy.random.mtrand.RandomState()
fileUtil.storeRandomState(tempPath+c.BURNIN_STATE_SUFFIX, rstate)
cRstate = fileUtil.importRandomState(tempPath+c.BURNIN_STATE_SUFFIX)
print(rstate.get_state())
oState = rstate.get_state()
nState = cRstate.get_state()
assert oState[0] == nState[0]
assert all(oState[1] == nState[1])
assert oState[2] == nState[2]
assert oState[3] == nState[3]
assert oState[4] == nState[4]
| gpl-3.0 |
sigma-random/avmplus | test/cmdline/testVMbaseConcurrencySelfTest.py | 8 | 2383 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from cmdutils import *
import os
def run():
r=RunTestLib()
r.run_test('VMbaseConcurrency(-Dselftest=vmbase,concurrency)','%s -Dselftest=vmbase,concurrency' % r.avm,expectedcode=0,
expectedout=[
"\['start', 'vmbase', 'concurrency'\]",
"\['test', 'vmbase', 'concurrency', 'mutexes'\]",
"\['pass', 'vmbase', 'concurrency', 'mutexes'\]",
"\['test', 'vmbase', 'concurrency', 'conditions'\]",
"\['pass', 'vmbase', 'concurrency', 'conditions'\]",
"\['test', 'vmbase', 'concurrency', 'atomic_counter'\]",
"\['pass', 'vmbase', 'concurrency', 'atomic_counter'\]",
"\['test', 'vmbase', 'concurrency', 'compare_and_swap_without_barrier'\]",
"\['pass', 'vmbase', 'concurrency', 'compare_and_swap_without_barrier'\]",
"\['test', 'vmbase', 'concurrency', 'compare_and_swap_with_barrier'\]",
"\['pass', 'vmbase', 'concurrency', 'compare_and_swap_with_barrier'\]",
"\['test', 'vmbase', 'concurrency', 'memory_barrier'\]",
"\['pass', 'vmbase', 'concurrency', 'memory_barrier'\]",
"\['test', 'vmbase', 'concurrency', 'condition_with_wait'\]",
"\['pass', 'vmbase', 'concurrency', 'condition_with_wait'\]",
"\['test', 'vmbase', 'concurrency', 'sleep'\]",
"\['pass', 'vmbase', 'concurrency', 'sleep'\]",
"\['test', 'vmbase', 'concurrency', 'vmthreadlocal'\]",
"\['pass', 'vmbase', 'concurrency', 'vmthreadlocal'\]",
"\['test', 'vmbase', 'concurrency', 'join'\]",
"\['pass', 'vmbase', 'concurrency', 'join'\]",
"\['end', 'vmbase', 'concurrency'\]"
]
)
if __name__ == '__main__':
r=RunTestLib()
run()
| mpl-2.0 |
lepistone/odoo | addons/plugin_thunderbird/plugin_thunderbird.py | 92 | 2075 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class plugin_thunderbird_installer(osv.osv_memory):
_name = 'plugin_thunderbird.installer'
_inherit = 'res.config.installer'
_columns = {
'thunderbird': fields.boolean('Thunderbird Plug-in', help="Allows you to select an object that you would like to add to your email and its attachments."),
'plugin_name': fields.char('File name', size=64),
'plugin_file': fields.char('Thunderbird Plug-in', size=256, readonly=True, help="Thunderbird plug-in file. Save this file and install it in Thunderbird."),
}
_defaults = {
'thunderbird': True,
'plugin_name': 'openerp_plugin.xpi',
}
def default_get(self, cr, uid, fields, context=None):
res = super(plugin_thunderbird_installer, self).default_get(cr, uid, fields, context)
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
res['plugin_file'] = base_url + '/plugin_thunderbird/static/openerp_plugin.xpi'
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
datalogics/scons | test/Scanner/scan-once.py | 2 | 2747 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that Scanners are called just once.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', r"""
import os.path
def scan(node, env, envkey, arg):
print 'XScanner: node =', os.path.split(str(node))[1]
return []
def exists_check(node, env):
return os.path.exists(str(node))
XScanner = Scanner(name = 'XScanner',
function = scan,
argument = None,
scan_check = exists_check,
skeys = ['.x'])
def echo(env, target, source):
t = os.path.split(str(target[0]))[1]
s = os.path.split(str(source[0]))[1]
print 'create %s from %s' % (t, s)
Echo = Builder(action = Action(echo, None),
src_suffix = '.x',
suffix = '.x')
env = Environment(BUILDERS = {'Echo':Echo}, SCANNERS = [XScanner])
f1 = env.Echo(source=['file1'], target=['file2'])
f2 = env.Echo(source=['file2'], target=['file3'])
f3 = env.Echo(source=['file3'], target=['file4'])
""")
test.write('file1.x', 'file1.x\n')
test.run(stdout = test.wrap_stdout("""\
XScanner: node = file1.x
create file2.x from file1.x
create file3.x from file2.x
create file4.x from file3.x
"""))
test.write('file2.x', 'file2.x\n')
test.run(stdout = test.wrap_stdout("""\
XScanner: node = file1.x
XScanner: node = file2.x
create file3.x from file2.x
create file4.x from file3.x
"""))
test.write('file3.x', 'file3.x\n')
test.run(stdout = test.wrap_stdout("""\
XScanner: node = file1.x
XScanner: node = file2.x
XScanner: node = file3.x
create file4.x from file3.x
"""))
test.pass_test()
| mit |
Arable/evepod | lib/python2.7/site-packages/gunicorn/config.py | 23 | 37678 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import copy
import grp
import inspect
try:
import argparse
except ImportError: # python 2.6
from . import argparse_compat as argparse
import os
import pwd
import sys
import textwrap
import types
from gunicorn import __version__
from gunicorn.errors import ConfigError
from gunicorn import six
from gunicorn import util
KNOWN_SETTINGS = []
PLATFORM = sys.platform
def wrap_method(func):
def _wrapped(instance, *args, **kwargs):
return func(*args, **kwargs)
return _wrapped
def make_settings(ignore=None):
settings = {}
ignore = ignore or ()
for s in KNOWN_SETTINGS:
setting = s()
if setting.name in ignore:
continue
settings[setting.name] = setting.copy()
return settings
class Config(object):
def __init__(self, usage=None, prog=None):
self.settings = make_settings()
self.usage = usage
self.prog = prog or os.path.basename(sys.argv[0])
self.env_orig = os.environ.copy()
def __getattr__(self, name):
if name not in self.settings:
raise AttributeError("No configuration setting for: %s" % name)
return self.settings[name].get()
def __setattr__(self, name, value):
if name != "settings" and name in self.settings:
raise AttributeError("Invalid access!")
super(Config, self).__setattr__(name, value)
def set(self, name, value):
if name not in self.settings:
raise AttributeError("No configuration setting for: %s" % name)
self.settings[name].set(value)
def parser(self):
kwargs = {
"usage": self.usage,
"prog": self.prog
}
parser = argparse.ArgumentParser(**kwargs)
parser.add_argument("-v", "--version",
action="version", default=argparse.SUPPRESS,
version="%(prog)s (version " + __version__ + ")\n",
help="show program's version number and exit")
parser.add_argument("args", nargs="*", help=argparse.SUPPRESS)
keys = list(self.settings)
def sorter(k):
return (self.settings[k].section, self.settings[k].order)
keys = sorted(self.settings, key=self.settings.__getitem__)
for k in keys:
self.settings[k].add_option(parser)
return parser
@property
def worker_class(self):
uri = self.settings['worker_class'].get()
worker_class = util.load_class(uri)
if hasattr(worker_class, "setup"):
worker_class.setup()
return worker_class
@property
def workers(self):
return self.settings['workers'].get()
@property
def address(self):
s = self.settings['bind'].get()
return [util.parse_address(six.bytes_to_str(bind)) for bind in s]
@property
def uid(self):
return self.settings['user'].get()
@property
def gid(self):
return self.settings['group'].get()
@property
def proc_name(self):
pn = self.settings['proc_name'].get()
if pn is not None:
return pn
else:
return self.settings['default_proc_name'].get()
@property
def logger_class(self):
uri = self.settings['logger_class'].get()
logger_class = util.load_class(uri, default="simple",
section="gunicorn.loggers")
if hasattr(logger_class, "install"):
logger_class.install()
return logger_class
@property
def is_ssl(self):
return self.certfile or self.keyfile
@property
def ssl_options(self):
opts = {}
if self.certfile:
opts['certfile'] = self.certfile
if self.keyfile:
opts['keyfile'] = self.keyfile
return opts
@property
def env(self):
raw_env = self.settings['raw_env'].get()
env = {}
if not raw_env:
return env
for e in raw_env:
s = six.bytes_to_str(e)
try:
k, v = s.split('=')
except ValueError:
raise RuntimeError("environement setting %r invalid" % s)
env[k] = v
return env
class SettingMeta(type):
def __new__(cls, name, bases, attrs):
super_new = super(SettingMeta, cls).__new__
parents = [b for b in bases if isinstance(b, SettingMeta)]
if not parents:
return super_new(cls, name, bases, attrs)
attrs["order"] = len(KNOWN_SETTINGS)
attrs["validator"] = wrap_method(attrs["validator"])
new_class = super_new(cls, name, bases, attrs)
new_class.fmt_desc(attrs.get("desc", ""))
KNOWN_SETTINGS.append(new_class)
return new_class
def fmt_desc(cls, desc):
desc = textwrap.dedent(desc).strip()
setattr(cls, "desc", desc)
setattr(cls, "short", desc.splitlines()[0])
class Setting(object):
name = None
value = None
section = None
cli = None
validator = None
type = None
meta = None
action = None
default = None
short = None
desc = None
nargs = None
const = None
def __init__(self):
if self.default is not None:
self.set(self.default)
def add_option(self, parser):
if not self.cli:
return
args = tuple(self.cli)
help_txt = "%s [%s]" % (self.short, self.default)
help_txt = help_txt.replace("%", "%%")
kwargs = {
"dest": self.name,
"action": self.action or "store",
"type": self.type or str,
"default": None,
"help": help_txt
}
if self.meta is not None:
kwargs['metavar'] = self.meta
if kwargs["action"] != "store":
kwargs.pop("type")
if self.nargs is not None:
kwargs["nargs"] = self.nargs
if self.const is not None:
kwargs["const"] = self.const
parser.add_argument(*args, **kwargs)
def copy(self):
return copy.copy(self)
def get(self):
return self.value
def set(self, val):
assert six.callable(self.validator), "Invalid validator: %s" % self.name
self.value = self.validator(val)
def __lt__(self, other):
return (self.section == other.section and
self.order < other.order)
__cmp__ = __lt__
Setting = SettingMeta('Setting', (Setting,), {})
def validate_bool(val):
if isinstance(val, bool):
return val
if not isinstance(val, six.string_types):
raise TypeError("Invalid type for casting: %s" % val)
if val.lower().strip() == "true":
return True
elif val.lower().strip() == "false":
return False
else:
raise ValueError("Invalid boolean: %s" % val)
def validate_dict(val):
if not isinstance(val, dict):
raise TypeError("Value is not a dictionary: %s " % val)
return val
def validate_pos_int(val):
if not isinstance(val, six.integer_types):
val = int(val, 0)
else:
# Booleans are ints!
val = int(val)
if val < 0:
raise ValueError("Value must be positive: %s" % val)
return val
def validate_string(val):
if val is None:
return None
if not isinstance(val, six.string_types):
raise TypeError("Not a string: %s" % val)
return val.strip()
def validate_list_string(val):
if not val:
return []
# legacy syntax
if isinstance(val, six.string_types):
val = [val]
return [validate_string(v) for v in val]
def validate_string_to_list(val):
val = validate_string(val)
if not val:
return []
return [v.strip() for v in val.split(",") if v]
def validate_class(val):
if inspect.isfunction(val) or inspect.ismethod(val):
val = val()
if inspect.isclass(val):
return val
return validate_string(val)
def validate_callable(arity):
def _validate_callable(val):
if isinstance(val, six.string_types):
try:
mod_name, obj_name = val.rsplit(".", 1)
except ValueError:
raise TypeError("Value '%s' is not import string. "
"Format: module[.submodules...].object" % val)
try:
mod = __import__(mod_name, fromlist=[obj_name])
val = getattr(mod, obj_name)
except ImportError as e:
raise TypeError(str(e))
except AttributeError:
raise TypeError("Can not load '%s' from '%s'"
"" % (obj_name, mod_name))
if not six.callable(val):
raise TypeError("Value is not six.callable: %s" % val)
if arity != -1 and arity != len(inspect.getargspec(val)[0]):
raise TypeError("Value must have an arity of: %s" % arity)
return val
return _validate_callable
def validate_user(val):
if val is None:
return os.geteuid()
if isinstance(val, int):
return val
elif val.isdigit():
return int(val)
else:
try:
return pwd.getpwnam(val).pw_uid
except KeyError:
raise ConfigError("No such user: '%s'" % val)
def validate_group(val):
if val is None:
return os.getegid()
if isinstance(val, int):
return val
elif val.isdigit():
return int(val)
else:
try:
return grp.getgrnam(val).gr_gid
except KeyError:
raise ConfigError("No such group: '%s'" % val)
def validate_post_request(val):
val = validate_callable(-1)(val)
largs = len(inspect.getargspec(val)[0])
if largs == 4:
return val
elif largs == 3:
return lambda worker, req, env, _r: val(worker, req, env)
elif largs == 2:
return lambda worker, req, _e, _r: val(worker, req)
else:
raise TypeError("Value must have an arity of: 4")
def validate_chdir(val):
# valid if the value is a string
val = validate_string(val)
# transform relative paths
path = os.path.abspath(os.path.normpath(os.path.join(util.getcwd(), val)))
# test if the path exists
if not os.path.exists(path):
raise ConfigError("can't chdir to %r" % val)
return path
def validate_file(val):
if val is None:
return None
# valid if the value is a string
val = validate_string(val)
# transform relative paths
path = os.path.abspath(os.path.normpath(os.path.join(util.getcwd(), val)))
# test if the path exists
if not os.path.exists(path):
raise ConfigError("%r not found" % val)
return path
def get_default_config_file():
config_path = os.path.join(os.path.abspath(os.getcwd()),
'gunicorn.conf.py')
if os.path.exists(config_path):
return config_path
return None
class ConfigFile(Setting):
name = "config"
section = "Config File"
cli = ["-c", "--config"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
The path to a Gunicorn config file.
Only has an effect when specified on the command line or as part of an
application specific configuration.
"""
class Bind(Setting):
name = "bind"
action = "append"
section = "Server Socket"
cli = ["-b", "--bind"]
meta = "ADDRESS"
validator = validate_list_string
if 'PORT' in os.environ:
default = ['0.0.0.0:{0}'.format(os.environ.get('PORT'))]
else:
default = ['127.0.0.1:8000']
desc = """\
The socket to bind.
A string of the form: 'HOST', 'HOST:PORT', 'unix:PATH'. An IP is a valid
HOST.
Multiple addresses can be bound. ex.::
$ gunicorn -b 127.0.0.1:8000 -b [::1]:8000 test:app
will bind the `test:app` application on localhost both on ipv6
and ipv4 interfaces.
"""
class Backlog(Setting):
name = "backlog"
section = "Server Socket"
cli = ["--backlog"]
meta = "INT"
validator = validate_pos_int
type = int
default = 2048
desc = """\
The maximum number of pending connections.
This refers to the number of clients that can be waiting to be served.
Exceeding this number results in the client getting an error when
attempting to connect. It should only affect servers under significant
load.
Must be a positive integer. Generally set in the 64-2048 range.
"""
class Workers(Setting):
name = "workers"
section = "Worker Processes"
cli = ["-w", "--workers"]
meta = "INT"
validator = validate_pos_int
type = int
default = 1
desc = """\
The number of worker process for handling requests.
A positive integer generally in the 2-4 x $(NUM_CORES) range. You'll
want to vary this a bit to find the best for your particular
application's work load.
"""
class WorkerClass(Setting):
name = "worker_class"
section = "Worker Processes"
cli = ["-k", "--worker-class"]
meta = "STRING"
validator = validate_class
default = "sync"
desc = """\
The type of workers to use.
The default class (sync) should handle most 'normal' types of
workloads. You'll want to read
http://docs.gunicorn.org/en/latest/design.html for information
on when you might want to choose one of the other worker
classes.
A string referring to one of the following bundled classes:
* ``sync``
* ``eventlet`` - Requires eventlet >= 0.9.7
* ``gevent`` - Requires gevent >= 0.12.2 (?)
* ``tornado`` - Requires tornado >= 0.2
Optionally, you can provide your own worker by giving gunicorn a
python path to a subclass of gunicorn.workers.base.Worker. This
alternative syntax will load the gevent class:
``gunicorn.workers.ggevent.GeventWorker``. Alternatively the syntax
can also load the gevent class with ``egg:gunicorn#gevent``
"""
class WorkerConnections(Setting):
name = "worker_connections"
section = "Worker Processes"
cli = ["--worker-connections"]
meta = "INT"
validator = validate_pos_int
type = int
default = 1000
desc = """\
The maximum number of simultaneous clients.
This setting only affects the Eventlet and Gevent worker types.
"""
class MaxRequests(Setting):
name = "max_requests"
section = "Worker Processes"
cli = ["--max-requests"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
The maximum number of requests a worker will process before restarting.
Any value greater than zero will limit the number of requests a work
will process before automatically restarting. This is a simple method
to help limit the damage of memory leaks.
If this is set to zero (the default) then the automatic worker
restarts are disabled.
"""
class Timeout(Setting):
name = "timeout"
section = "Worker Processes"
cli = ["-t", "--timeout"]
meta = "INT"
validator = validate_pos_int
type = int
default = 30
desc = """\
Workers silent for more than this many seconds are killed and restarted.
Generally set to thirty seconds. Only set this noticeably higher if
you're sure of the repercussions for sync workers. For the non sync
workers it just means that the worker process is still communicating and
is not tied to the length of time required to handle a single request.
"""
class GracefulTimeout(Setting):
name = "graceful_timeout"
section = "Worker Processes"
cli = ["--graceful-timeout"]
meta = "INT"
validator = validate_pos_int
type = int
default = 30
desc = """\
Timeout for graceful workers restart.
Generally set to thirty seconds. How max time worker can handle
request after got restart signal. If the time is up worker will
be force killed.
"""
class Keepalive(Setting):
name = "keepalive"
section = "Worker Processes"
cli = ["--keep-alive"]
meta = "INT"
validator = validate_pos_int
type = int
default = 2
desc = """\
The number of seconds to wait for requests on a Keep-Alive connection.
Generally set in the 1-5 seconds range.
"""
class LimitRequestLine(Setting):
name = "limit_request_line"
section = "Security"
cli = ["--limit-request-line"]
meta = "INT"
validator = validate_pos_int
type = int
default = 4094
desc = """\
The maximum size of HTTP request line in bytes.
This parameter is used to limit the allowed size of a client's
HTTP request-line. Since the request-line consists of the HTTP
method, URI, and protocol version, this directive places a
restriction on the length of a request-URI allowed for a request
on the server. A server needs this value to be large enough to
hold any of its resource names, including any information that
might be passed in the query part of a GET request. Value is a number
from 0 (unlimited) to 8190.
This parameter can be used to prevent any DDOS attack.
"""
class LimitRequestFields(Setting):
name = "limit_request_fields"
section = "Security"
cli = ["--limit-request-fields"]
meta = "INT"
validator = validate_pos_int
type = int
default = 100
desc = """\
Limit the number of HTTP headers fields in a request.
This parameter is used to limit the number of headers in a request to
prevent DDOS attack. Used with the `limit_request_field_size` it allows
more safety. By default this value is 100 and can't be larger than
32768.
"""
class LimitRequestFieldSize(Setting):
name = "limit_request_field_size"
section = "Security"
cli = ["--limit-request-field_size"]
meta = "INT"
validator = validate_pos_int
type = int
default = 8190
desc = """\
Limit the allowed size of an HTTP request header field.
Value is a number from 0 (unlimited) to 8190. to set the limit
on the allowed size of an HTTP request header field.
"""
class Debug(Setting):
name = "debug"
section = "Debugging"
cli = ["--debug"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Turn on debugging in the server.
This limits the number of worker processes to 1 and changes some error
handling that's sent to clients.
"""
class Spew(Setting):
name = "spew"
section = "Debugging"
cli = ["--spew"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Install a trace function that spews every line executed by the server.
This is the nuclear option.
"""
class ConfigCheck(Setting):
name = "check_config"
section = "Debugging"
cli = ["--check-config", ]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Check the configuration..
"""
class PreloadApp(Setting):
name = "preload_app"
section = "Server Mechanics"
cli = ["--preload"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Load application code before the worker processes are forked.
By preloading an application you can save some RAM resources as well as
speed up server boot times. Although, if you defer application loading
to each worker process, you can reload your application code easily by
restarting workers.
"""
class Chdir(Setting):
name = "chdir"
section = "Server Mechanics"
cli = ["--chdir"]
validator = validate_chdir
default = util.getcwd()
desc = """\
Chdir to specified directory before apps loading.
"""
class Daemon(Setting):
name = "daemon"
section = "Server Mechanics"
cli = ["-D", "--daemon"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Daemonize the Gunicorn process.
Detaches the server from the controlling terminal and enters the
background.
"""
class Env(Setting):
name = "raw_env"
action = "append"
section = "Server Mechanic"
cli = ["-e", "--env"]
meta = "ENV"
validator = validate_list_string
default = []
desc = """\
Set environment variable (key=value).
Pass variables to the execution environment. Ex.::
$ gunicorn -b 127.0.0.1:8000 --env FOO=1 test:app
and test for the foo variable environement in your application.
"""
class Pidfile(Setting):
name = "pidfile"
section = "Server Mechanics"
cli = ["-p", "--pid"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
A filename to use for the PID file.
If not set, no PID file will be written.
"""
class User(Setting):
name = "user"
section = "Server Mechanics"
cli = ["-u", "--user"]
meta = "USER"
validator = validate_user
default = os.geteuid()
desc = """\
Switch worker processes to run as this user.
A valid user id (as an integer) or the name of a user that can be
retrieved with a call to pwd.getpwnam(value) or None to not change
the worker process user.
"""
class Group(Setting):
name = "group"
section = "Server Mechanics"
cli = ["-g", "--group"]
meta = "GROUP"
validator = validate_group
default = os.getegid()
desc = """\
Switch worker process to run as this group.
A valid group id (as an integer) or the name of a user that can be
retrieved with a call to pwd.getgrnam(value) or None to not change
the worker processes group.
"""
class Umask(Setting):
name = "umask"
section = "Server Mechanics"
cli = ["-m", "--umask"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
A bit mask for the file mode on files written by Gunicorn.
Note that this affects unix socket permissions.
A valid value for the os.umask(mode) call or a string compatible with
int(value, 0) (0 means Python guesses the base, so values like "0",
"0xFF", "0022" are valid for decimal, hex, and octal representations)
"""
class TmpUploadDir(Setting):
name = "tmp_upload_dir"
section = "Server Mechanics"
meta = "DIR"
validator = validate_string
default = None
desc = """\
Directory to store temporary request data as they are read.
This may disappear in the near future.
This path should be writable by the process permissions set for Gunicorn
workers. If not specified, Gunicorn will choose a system generated
temporary directory.
"""
class SecureSchemeHeader(Setting):
name = "secure_scheme_headers"
section = "Server Mechanics"
validator = validate_dict
default = {
"X-FORWARDED-PROTOCOL": "ssl",
"X-FORWARDED-PROTO": "https",
"X-FORWARDED-SSL": "on"
}
desc = """\
A dictionary containing headers and values that the front-end proxy
uses to indicate HTTPS requests. These tell gunicorn to set
wsgi.url_scheme to "https", so your application can tell that the
request is secure.
The dictionary should map upper-case header names to exact string
values. The value comparisons are case-sensitive, unlike the header
names, so make sure they're exactly what your front-end proxy sends
when handling HTTPS requests.
It is important that your front-end proxy configuration ensures that
the headers defined here can not be passed directly from the client.
"""
class XForwardedFor(Setting):
name = "x_forwarded_for_header"
section = "Server Mechanics"
meta = "STRING"
validator = validate_string
default = 'X-FORWARDED-FOR'
desc = """\
Set the X-Forwarded-For header that identify the originating IP
address of the client connection to gunicorn via a proxy.
"""
class ForwardedAllowIPS(Setting):
name = "forwarded_allow_ips"
section = "Server Mechanics"
meta = "STRING"
validator = validate_string_to_list
default = "127.0.0.1"
desc = """\
Front-end's IPs from which allowed to handle X-Forwarded-* headers.
(comma separate).
Set to "*" to disable checking of Front-end IPs (useful for setups
where you don't know in advance the IP address of Front-end, but
you still trust the environment)
"""
class AccessLog(Setting):
name = "accesslog"
section = "Logging"
cli = ["--access-logfile"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
The Access log file to write to.
"-" means log to stderr.
"""
class AccessLogFormat(Setting):
name = "access_log_format"
section = "Logging"
cli = ["--access-logformat"]
meta = "STRING"
validator = validate_string
default = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
desc = """\
The Access log format .
By default:
%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"
h: remote address
l: '-'
u: currently '-', may be user name in future releases
t: date of the request
r: status line (ex: GET / HTTP/1.1)
s: status
b: response length or '-'
f: referer
a: user agent
T: request time in seconds
D: request time in microseconds,
p: process ID
{Header}i: request header
{Header}o: response header
"""
class ErrorLog(Setting):
name = "errorlog"
section = "Logging"
cli = ["--error-logfile", "--log-file"]
meta = "FILE"
validator = validate_string
default = "-"
desc = """\
The Error log file to write to.
"-" means log to stderr.
"""
class Loglevel(Setting):
name = "loglevel"
section = "Logging"
cli = ["--log-level"]
meta = "LEVEL"
validator = validate_string
default = "info"
desc = """\
The granularity of Error log outputs.
Valid level names are:
* debug
* info
* warning
* error
* critical
"""
class LoggerClass(Setting):
name = "logger_class"
section = "Logging"
cli = ["--logger-class"]
meta = "STRING"
validator = validate_class
default = "simple"
desc = """\
The logger you want to use to log events in gunicorn.
The default class (``gunicorn.glogging.Logger``) handle most of
normal usages in logging. It provides error and access logging.
You can provide your own worker by giving gunicorn a
python path to a subclass like gunicorn.glogging.Logger.
Alternatively the syntax can also load the Logger class
with `egg:gunicorn#simple`
"""
class LogConfig(Setting):
name = "logconfig"
section = "Logging"
cli = ["--log-config"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
The log config file to use.
Gunicorn uses the standard Python logging module's Configuration
file format.
"""
class SyslogTo(Setting):
name = "syslog_addr"
section = "Logging"
cli = ["--log-syslog-to"]
meta = "SYSLOG_ADDR"
validator = validate_string
if PLATFORM == "darwin":
default = "unix:///var/run/syslog"
elif PLATFORM in ('freebsd', 'dragonfly', ):
default = "unix:///var/run/log"
elif PLATFORM == "openbsd":
default = "unix:///dev/log"
else:
default = "udp://localhost:514"
desc = """\
Address to send syslog messages
"""
class Syslog(Setting):
name = "syslog"
section = "Logging"
cli = ["--log-syslog"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
Log to syslog.
"""
class SyslogPrefix(Setting):
name = "syslog_prefix"
section = "Logging"
cli = ["--log-syslog-prefix"]
meta = "SYSLOG_PREFIX"
validator = validate_string
default = None
desc = """\
makes gunicorn use the parameter as program-name in the syslog entries.
All entries will be prefixed by gunicorn.<prefix>. By default the program
name is the name of the process.
"""
class SyslogFacility(Setting):
name = "syslog_facility"
section = "Logging"
cli = ["--log-syslog-facility"]
meta = "SYSLOG_FACILITY"
validator = validate_string
default = "user"
desc = """\
Syslog facility name
"""
class EnableStdioInheritance(Setting):
name = "enable_stdio_inheritance"
section = "Logging"
cli = ["-R", "--enable-stdio-inheritance"]
validator = validate_bool
default = False
action = "store_true"
desc = """\
Enable stdio inheritance
Enable inheritance for stdio file descriptors in daemon mode.
Note: To disable the python stdout buffering, you can to set the user
environment variable ``PYTHONUNBUFFERED`` .
"""
class Procname(Setting):
name = "proc_name"
section = "Process Naming"
cli = ["-n", "--name"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
A base to use with setproctitle for process naming.
This affects things like ``ps`` and ``top``. If you're going to be
running more than one instance of Gunicorn you'll probably want to set a
name to tell them apart. This requires that you install the setproctitle
module.
It defaults to 'gunicorn'.
"""
class DefaultProcName(Setting):
name = "default_proc_name"
section = "Process Naming"
validator = validate_string
default = "gunicorn"
desc = """\
Internal setting that is adjusted for each type of application.
"""
class DjangoSettings(Setting):
name = "django_settings"
section = "Django"
cli = ["--settings"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
The Python path to a Django settings module. (deprecated)
e.g. 'myproject.settings.main'. If this isn't provided, the
DJANGO_SETTINGS_MODULE environment variable will be used.
**DEPRECATED**: use the --env argument instead.
"""
class PythonPath(Setting):
name = "pythonpath"
section = "Server Mechanics"
cli = ["--pythonpath"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
A directory to add to the Python path.
e.g.
'/home/djangoprojects/myproject'.
"""
class Paste(Setting):
name = "paste"
section = "Server Mechanics"
cli = ["--paster"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
Load a paste.deploy config file.
"""
class OnStarting(Setting):
name = "on_starting"
section = "Server Hooks"
validator = validate_callable(1)
type = six.callable
def on_starting(server):
pass
default = staticmethod(on_starting)
desc = """\
Called just before the master process is initialized.
The callable needs to accept a single instance variable for the Arbiter.
"""
class OnReload(Setting):
name = "on_reload"
section = "Server Hooks"
validator = validate_callable(1)
type = six.callable
def on_reload(server):
pass
default = staticmethod(on_reload)
desc = """\
Called to recycle workers during a reload via SIGHUP.
The callable needs to accept a single instance variable for the Arbiter.
"""
class WhenReady(Setting):
name = "when_ready"
section = "Server Hooks"
validator = validate_callable(1)
type = six.callable
def when_ready(server):
pass
default = staticmethod(when_ready)
desc = """\
Called just after the server is started.
The callable needs to accept a single instance variable for the Arbiter.
"""
class Prefork(Setting):
name = "pre_fork"
section = "Server Hooks"
validator = validate_callable(2)
type = six.callable
def pre_fork(server, worker):
pass
default = staticmethod(pre_fork)
desc = """\
Called just before a worker is forked.
The callable needs to accept two instance variables for the Arbiter and
new Worker.
"""
class Postfork(Setting):
name = "post_fork"
section = "Server Hooks"
validator = validate_callable(2)
type = six.callable
def post_fork(server, worker):
pass
default = staticmethod(post_fork)
desc = """\
Called just after a worker has been forked.
The callable needs to accept two instance variables for the Arbiter and
new Worker.
"""
class PostWorkerInit(Setting):
name = "post_worker_init"
section = "Server Hooks"
validator = validate_callable(1)
type = six.callable
def post_worker_init(worker):
pass
default = staticmethod(post_worker_init)
desc = """\
Called just after a worker has initialized the application.
The callable needs to accept one instance variable for the initialized
Worker.
"""
class PreExec(Setting):
name = "pre_exec"
section = "Server Hooks"
validator = validate_callable(1)
type = six.callable
def pre_exec(server):
pass
default = staticmethod(pre_exec)
desc = """\
Called just before a new master process is forked.
The callable needs to accept a single instance variable for the Arbiter.
"""
class PreRequest(Setting):
name = "pre_request"
section = "Server Hooks"
validator = validate_callable(2)
type = six.callable
def pre_request(worker, req):
worker.log.debug("%s %s" % (req.method, req.path))
default = staticmethod(pre_request)
desc = """\
Called just before a worker processes the request.
The callable needs to accept two instance variables for the Worker and
the Request.
"""
class PostRequest(Setting):
name = "post_request"
section = "Server Hooks"
validator = validate_post_request
type = six.callable
def post_request(worker, req, environ, resp):
pass
default = staticmethod(post_request)
desc = """\
Called after a worker processes the request.
The callable needs to accept two instance variables for the Worker and
the Request.
"""
class WorkerExit(Setting):
name = "worker_exit"
section = "Server Hooks"
validator = validate_callable(2)
type = six.callable
def worker_exit(server, worker):
pass
default = staticmethod(worker_exit)
desc = """\
Called just after a worker has been exited.
The callable needs to accept two instance variables for the Arbiter and
the just-exited Worker.
"""
class NumWorkersChanged(Setting):
name = "nworkers_changed"
section = "Server Hooks"
validator = validate_callable(3)
type = six.callable
def nworkers_changed(server, new_value, old_value):
pass
default = staticmethod(nworkers_changed)
desc = """\
Called just after num_workers has been changed.
The callable needs to accept an instance variable of the Arbiter and
two integers of number of workers after and before change.
If the number of workers is set for the first time, old_value would be
None.
"""
class ProxyProtocol(Setting):
name = "proxy_protocol"
section = "Server Mechanics"
cli = ["--proxy-protocol"]
validator = validate_bool
default = False
action = "store_true"
desc = """\
Enable detect PROXY protocol (PROXY mode).
Allow using Http and Proxy together. It's may be useful for work with
stunnel as https frondend and gunicorn as http server.
PROXY protocol: http://haproxy.1wt.eu/download/1.5/doc/proxy-protocol.txt
Example for stunnel config::
[https]
protocol = proxy
accept = 443
connect = 80
cert = /etc/ssl/certs/stunnel.pem
key = /etc/ssl/certs/stunnel.key
"""
class ProxyAllowFrom(Setting):
name = "proxy_allow_ips"
section = "Server Mechanics"
cli = ["--proxy-allow-from"]
validator = validate_string_to_list
default = "127.0.0.1"
desc = """\
Front-end's IPs from which allowed accept proxy requests (comma separate).
"""
class KeyFile(Setting):
name = "keyfile"
section = "Ssl"
cli = ["--keyfile"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
SSL key file
"""
class CertFile(Setting):
name = "certfile"
section = "Ssl"
cli = ["--certfile"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
SSL certificate file
"""
| apache-2.0 |
prashanthr/wakatime | wakatime/packages/pygments_py2/pygments/formatters/latex.py | 72 | 17615 | # -*- coding: utf-8 -*-
"""
pygments.formatters.latex
~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for LaTeX fancyvrb output.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division
from pygments.formatter import Formatter
from pygments.lexer import Lexer
from pygments.token import Token, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, StringIO, xrange, \
iteritems
__all__ = ['LatexFormatter']
def escape_tex(text, commandprefix):
return text.replace('\\', '\x00'). \
replace('{', '\x01'). \
replace('}', '\x02'). \
replace('\x00', r'\%sZbs{}' % commandprefix). \
replace('\x01', r'\%sZob{}' % commandprefix). \
replace('\x02', r'\%sZcb{}' % commandprefix). \
replace('^', r'\%sZca{}' % commandprefix). \
replace('_', r'\%sZus{}' % commandprefix). \
replace('&', r'\%sZam{}' % commandprefix). \
replace('<', r'\%sZlt{}' % commandprefix). \
replace('>', r'\%sZgt{}' % commandprefix). \
replace('#', r'\%sZsh{}' % commandprefix). \
replace('%', r'\%sZpc{}' % commandprefix). \
replace('$', r'\%sZdl{}' % commandprefix). \
replace('-', r'\%sZhy{}' % commandprefix). \
replace("'", r'\%sZsq{}' % commandprefix). \
replace('"', r'\%sZdq{}' % commandprefix). \
replace('~', r'\%sZti{}' % commandprefix)
DOC_TEMPLATE = r'''
\documentclass{%(docclass)s}
\usepackage{fancyvrb}
\usepackage{color}
\usepackage[%(encoding)s]{inputenc}
%(preamble)s
%(styledefs)s
\begin{document}
\section*{%(title)s}
%(code)s
\end{document}
'''
## Small explanation of the mess below :)
#
# The previous version of the LaTeX formatter just assigned a command to
# each token type defined in the current style. That obviously is
# problematic if the highlighted code is produced for a different style
# than the style commands themselves.
#
# This version works much like the HTML formatter which assigns multiple
# CSS classes to each <span> tag, from the most specific to the least
# specific token type, thus falling back to the parent token type if one
# is not defined. Here, the classes are there too and use the same short
# forms given in token.STANDARD_TYPES.
#
# Highlighted code now only uses one custom command, which by default is
# \PY and selectable by the commandprefix option (and in addition the
# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
# backwards compatibility purposes).
#
# \PY has two arguments: the classes, separated by +, and the text to
# render in that style. The classes are resolved into the respective
# style commands by magic, which serves to ignore unknown classes.
#
# The magic macros are:
# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
# to render in \PY@do. Their definition determines the style.
# * \PY@reset resets \PY@it etc. to do nothing.
# * \PY@toks parses the list of classes, using magic inspired by the
# keyval package (but modified to use plusses instead of commas
# because fancyvrb redefines commas inside its environments).
# * \PY@tok processes one class, calling the \PY@tok@classname command
# if it exists.
# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
# for its class.
# * \PY resets the style, parses the classnames and then calls \PY@do.
#
# Tip: to read this code, print it out in substituted form using e.g.
# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
STYLE_TEMPLATE = r'''
\makeatletter
\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
\let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
\let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
\%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
\%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
%(styles)s
\def\%(cp)sZbs{\char`\\}
\def\%(cp)sZus{\char`\_}
\def\%(cp)sZob{\char`\{}
\def\%(cp)sZcb{\char`\}}
\def\%(cp)sZca{\char`\^}
\def\%(cp)sZam{\char`\&}
\def\%(cp)sZlt{\char`\<}
\def\%(cp)sZgt{\char`\>}
\def\%(cp)sZsh{\char`\#}
\def\%(cp)sZpc{\char`\%%}
\def\%(cp)sZdl{\char`\$}
\def\%(cp)sZhy{\char`\-}
\def\%(cp)sZsq{\char`\'}
\def\%(cp)sZdq{\char`\"}
\def\%(cp)sZti{\char`\~}
%% for compatibility with earlier versions
\def\%(cp)sZat{@}
\def\%(cp)sZlb{[}
\def\%(cp)sZrb{]}
\makeatother
'''
def _get_ttype_name(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
class LatexFormatter(Formatter):
r"""
Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
standard packages.
Without the `full` option, code is formatted as one ``Verbatim``
environment, like this:
.. sourcecode:: latex
\begin{Verbatim}[commandchars=\\\{\}]
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
\PY{k}{pass}
\end{Verbatim}
The special command used here (``\PY``) and all the other macros it needs
are output by the `get_style_defs` method.
With the `full` option, a complete LaTeX document is output, including
the command definitions in the preamble.
The `get_style_defs()` method of a `LatexFormatter` returns a string
containing ``\def`` commands defining the macros needed inside the
``Verbatim`` environments.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`docclass`
If the `full` option is enabled, this is the document class to use
(default: ``'article'``).
`preamble`
If the `full` option is enabled, this can be further preamble commands,
e.g. ``\usepackage`` (default: ``''``).
`linenos`
If set to ``True``, output line numbers (default: ``False``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`verboptions`
Additional options given to the Verbatim environment (see the *fancyvrb*
docs for possible values) (default: ``''``).
`commandprefix`
The LaTeX commands used to produce colored output are constructed
using this prefix and some letters (default: ``'PY'``).
.. versionadded:: 0.7
.. versionchanged:: 0.10
The default is now ``'PY'`` instead of ``'C'``.
`texcomments`
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
in comment tokens is not escaped so that LaTeX can render it (default:
``False``).
.. versionadded:: 1.2
`mathescape`
If set to ``True``, enables LaTeX math mode escape in comments. That
is, ``'$...$'`` inside a comment will trigger math mode (default:
``False``).
.. versionadded:: 1.2
`escapeinside`
If set to a string of length 2, enables escaping to LaTeX. Text
delimited by these 2 characters is read as LaTeX code and
typeset accordingly. It has no effect in string literals. It has
no effect in comments if `texcomments` or `mathescape` is
set. (default: ``''``).
.. versionadded:: 2.0
`envname`
Allows you to pick an alternative environment name replacing Verbatim.
The alternate environment still has to support Verbatim's option syntax.
(default: ``'Verbatim'``).
.. versionadded:: 2.0
"""
name = 'LaTeX'
aliases = ['latex', 'tex']
filenames = ['*.tex']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.docclass = options.get('docclass', 'article')
self.preamble = options.get('preamble', '')
self.linenos = get_bool_opt(options, 'linenos', False)
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.verboptions = options.get('verboptions', '')
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.commandprefix = options.get('commandprefix', 'PY')
self.texcomments = get_bool_opt(options, 'texcomments', False)
self.mathescape = get_bool_opt(options, 'mathescape', False)
self.escapeinside = options.get('escapeinside', '')
if len(self.escapeinside) == 2:
self.left = self.escapeinside[0]
self.right = self.escapeinside[1]
else:
self.escapeinside = ''
self.envname = options.get('envname', u'Verbatim')
self._create_stylesheet()
def _create_stylesheet(self):
t2n = self.ttype2name = {Token: ''}
c2d = self.cmd2def = {}
cp = self.commandprefix
def rgbcolor(col):
if col:
return ','.join(['%.2f' % (int(col[i] + col[i + 1], 16) / 255.0)
for i in (0, 2, 4)])
else:
return '1,1,1'
for ttype, ndef in self.style:
name = _get_ttype_name(ttype)
cmndef = ''
if ndef['bold']:
cmndef += r'\let\$$@bf=\textbf'
if ndef['italic']:
cmndef += r'\let\$$@it=\textit'
if ndef['underline']:
cmndef += r'\let\$$@ul=\underline'
if ndef['roman']:
cmndef += r'\let\$$@ff=\textrm'
if ndef['sans']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['mono']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['color']:
cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
rgbcolor(ndef['color']))
if ndef['border']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}' %
(rgbcolor(ndef['border']),
rgbcolor(ndef['bgcolor'])))
elif ndef['bgcolor']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\colorbox[rgb]{%s}{\strut ##1}}' %
rgbcolor(ndef['bgcolor']))
if cmndef == '':
continue
cmndef = cmndef.replace('$$', cp)
t2n[ttype] = name
c2d[name] = cmndef
def get_style_defs(self, arg=''):
"""
Return the command sequences needed to define the commands
used to format text in the verbatim environment. ``arg`` is ignored.
"""
cp = self.commandprefix
styles = []
for name, definition in iteritems(self.cmd2def):
styles.append(r'\expandafter\def\csname %s@tok@%s\endcsname{%s}' %
(cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
'styles': '\n'.join(styles)}
def format_unencoded(self, tokensource, outfile):
# TODO: add support for background colors
t2n = self.ttype2name
cp = self.commandprefix
if self.full:
realoutfile = outfile
outfile = StringIO()
outfile.write(u'\\begin{' + self.envname + u'}[commandchars=\\\\\\{\\}')
if self.linenos:
start, step = self.linenostart, self.linenostep
outfile.write(u',numbers=left' +
(start and u',firstnumber=%d' % start or u'') +
(step and u',stepnumber=%d' % step or u''))
if self.mathescape or self.texcomments or self.escapeinside:
outfile.write(u',codes={\\catcode`\\$=3\\catcode`\\^=7\\catcode`\\_=8}')
if self.verboptions:
outfile.write(u',' + self.verboptions)
outfile.write(u']\n')
for ttype, value in tokensource:
if ttype in Token.Comment:
if self.texcomments:
# Try to guess comment starting lexeme and escape it ...
start = value[0:1]
for i in xrange(1, len(value)):
if start[0] != value[i]:
break
start += value[i]
value = value[len(start):]
start = escape_tex(start, self.commandprefix)
# ... but do not escape inside comment.
value = start + value
elif self.mathescape:
# Only escape parts not inside a math environment.
parts = value.split('$')
in_math = False
for i, part in enumerate(parts):
if not in_math:
parts[i] = escape_tex(part, self.commandprefix)
in_math = not in_math
value = '$'.join(parts)
elif self.escapeinside:
text = value
value = ''
while len(text) > 0:
a, sep1, text = text.partition(self.left)
if len(sep1) > 0:
b, sep2, text = text.partition(self.right)
if len(sep2) > 0:
value += escape_tex(a, self.commandprefix) + b
else:
value += escape_tex(a + sep1 + b, self.commandprefix)
else:
value = value + escape_tex(a, self.commandprefix)
else:
value = escape_tex(value, self.commandprefix)
elif ttype not in Token.Escape:
value = escape_tex(value, self.commandprefix)
styles = []
while ttype is not Token:
try:
styles.append(t2n[ttype])
except KeyError:
# not in current style
styles.append(_get_ttype_name(ttype))
ttype = ttype.parent
styleval = '+'.join(reversed(styles))
if styleval:
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
outfile.write('\n')
if spl[-1]:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
else:
outfile.write(value)
outfile.write(u'\\end{' + self.envname + u'}\n')
if self.full:
realoutfile.write(DOC_TEMPLATE %
dict(docclass = self.docclass,
preamble = self.preamble,
title = self.title,
encoding = self.encoding or 'utf8',
styledefs = self.get_style_defs(),
code = outfile.getvalue()))
class LatexEmbeddedLexer(Lexer):
r"""
This lexer takes one lexer as argument, the lexer for the language
being formatted, and the left and right delimiters for escaped text.
First everything is scanned using the language lexer to obtain
strings and comments. All other consecutive tokens are merged and
the resulting text is scanned for escaped segments, which are given
the Token.Escape type. Finally text that is not escaped is scanned
again with the language lexer.
"""
def __init__(self, left, right, lang, **options):
self.left = left
self.right = right
self.lang = lang
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
buf = ''
idx = 0
for i, t, v in self.lang.get_tokens_unprocessed(text):
if t in Token.Comment or t in Token.String:
if buf:
for x in self.get_tokens_aux(idx, buf):
yield x
buf = ''
yield i, t, v
else:
if not buf:
idx = i
buf += v
if buf:
for x in self.get_tokens_aux(idx, buf):
yield x
def get_tokens_aux(self, index, text):
while text:
a, sep1, text = text.partition(self.left)
if a:
for i, t, v in self.lang.get_tokens_unprocessed(a):
yield index + i, t, v
index += len(a)
if sep1:
b, sep2, text = text.partition(self.right)
if sep2:
yield index + len(sep1), Token.Escape, b
index += len(sep1) + len(b) + len(sep2)
else:
yield index, Token.Error, sep1
index += len(sep1)
text = b
| bsd-3-clause |
cjwagner/test-infra | gubernator/third_party/defusedxml/common.py | 55 | 6086 | # defusedxml
#
# Copyright (c) 2013 by Christian Heimes <[email protected]>
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
"""Common constants, exceptions and helpe functions
"""
import sys
from types import MethodType
PY3 = sys.version_info[0] == 3
PY26 = sys.version_info[:2] == (2, 6)
PY31 = sys.version_info[:2] == (3, 1)
class DefusedXmlException(ValueError):
"""Base exception
"""
def __repr__(self):
return str(self)
class DTDForbidden(DefusedXmlException):
"""Document type definition is forbidden
"""
def __init__(self, name, sysid, pubid):
super(DTDForbidden, self).__init__()
self.name = name
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "DTDForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class EntitiesForbidden(DefusedXmlException):
"""Entity definition is forbidden
"""
def __init__(self, name, value, base, sysid, pubid, notation_name):
super(EntitiesForbidden, self).__init__()
self.name = name
self.value = value
self.base = base
self.sysid = sysid
self.pubid = pubid
self.notation_name = notation_name
def __str__(self):
tpl = "EntitiesForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class ExternalReferenceForbidden(DefusedXmlException):
"""Resolving an external reference is forbidden
"""
def __init__(self, context, base, sysid, pubid):
super(ExternalReferenceForbidden, self).__init__()
self.context = context
self.base = base
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "ExternalReferenceForbidden(system_id='{}', public_id={})"
return tpl.format(self.sysid, self.pubid)
class NotSupportedError(DefusedXmlException):
"""The operation is not supported
"""
def _apply_defusing(defused_mod):
assert defused_mod is sys.modules[defused_mod.__name__]
stdlib_name = defused_mod.__origin__
__import__(stdlib_name, {}, {}, ["*"])
stdlib_mod = sys.modules[stdlib_name]
stdlib_names = set(dir(stdlib_mod))
for name, obj in vars(defused_mod).items():
if name.startswith("_") or name not in stdlib_names:
continue
setattr(stdlib_mod, name, obj)
return stdlib_mod
def _generate_etree_functions(DefusedXMLParser, _TreeBuilder,
_IterParseIterator, _parse, _iterparse):
"""Factory for functions needed by etree, dependent on whether
cElementTree or ElementTree is used."""
def parse(source, parser=None, forbid_dtd=False, forbid_entities=True,
forbid_external=True):
if parser is None:
parser = DefusedXMLParser(target=_TreeBuilder(),
forbid_dtd=forbid_dtd,
forbid_entities=forbid_entities,
forbid_external=forbid_external)
return _parse(source, parser)
if PY26 or PY31:
def bind(xmlparser, funcname, hookname):
func = getattr(DefusedXMLParser, funcname)
if PY26:
# unbound -> function
func = func.__func__
method = MethodType(func, xmlparser, xmlparser.__class__)
else:
method = MethodType(func, xmlparser)
# set hook
setattr(xmlparser._parser, hookname, method)
def iterparse(source, events=None, forbid_dtd=False,
forbid_entities=True, forbid_external=True):
it = _iterparse(source, events)
xmlparser = it._parser
if forbid_dtd:
bind(xmlparser, "defused_start_doctype_decl",
"StartDoctypeDeclHandler")
if forbid_entities:
bind(xmlparser, "defused_entity_decl",
"EntityDeclHandler")
bind(xmlparser, "defused_unparsed_entity_decl",
"UnparsedEntityDeclHandler")
if forbid_external:
bind(xmlparser, "defused_external_entity_ref_handler",
"ExternalEntityRefHandler")
return it
elif PY3:
def iterparse(source, events=None, parser=None, forbid_dtd=False,
forbid_entities=True, forbid_external=True):
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
if not parser:
parser = DefusedXMLParser(target=_TreeBuilder(),
forbid_dtd=forbid_dtd,
forbid_entities=forbid_entities,
forbid_external=forbid_external)
return _IterParseIterator(source, events, parser, close_source)
else:
# Python 2.7
def iterparse(source, events=None, parser=None, forbid_dtd=False,
forbid_entities=True, forbid_external=True):
if parser is None:
parser = DefusedXMLParser(target=_TreeBuilder(),
forbid_dtd=forbid_dtd,
forbid_entities=forbid_entities,
forbid_external=forbid_external)
return _iterparse(source, events, parser)
def fromstring(text, forbid_dtd=False, forbid_entities=True,
forbid_external=True):
parser = DefusedXMLParser(target=_TreeBuilder(),
forbid_dtd=forbid_dtd,
forbid_entities=forbid_entities,
forbid_external=forbid_external)
parser.feed(text)
return parser.close()
return parse, iterparse, fromstring
| apache-2.0 |
abujehad139/damnvid | ui/dMainFrame/dMainGoPanel.py | 12 | 1325 | # -*- coding: utf-8 -*-
from ..dUI import *
def DamnMainStopButton(panel=None, parent=None):
stopbutton = wx.Button(panel, -1, DV.l('Stop'))
stopbutton.Disable()
if parent is not None:
panel.Bind(wx.EVT_BUTTON, parent.onStop, stopbutton)
return stopbutton
class DamnMainGoPanel(wx.Panel):
def __init__(self, parent):
Damnlog('Initializing DamnMainGoPanel.')
wx.Panel.__init__(self, parent, -1)
self.parent = parent
hboxwrapper4 = wx.BoxSizer(wx.HORIZONTAL)
hbox4 = wx.BoxSizer(wx.VERTICAL)
hboxwrapper4.Add(hbox4)
hboxwrapper4.Add((0, DV.border_padding))
self.SetSizer(hboxwrapper4)
parent.stopbutton = DamnMainStopButton(panel=self, parent=parent)
for button in (parent.addByFile, parent.addByURL, parent.btnRename, parent.btnMoveUp, parent.btnMoveDown, parent.deletebutton, parent.gobutton1, parent.stopbutton, parent.btnSearch):
button.SetMinSize((parent.getNiceDimensions()[0], button.GetSizeTuple()[1]))
parent.gauge1.SetMaxSize((-1, parent.stopbutton.GetSizeTuple()[1]))
parent.profiledropdown.SetMinSize(parent.getNiceDimensions())
parent.profiledropdown.Bind(wx.EVT_CHOICE, parent.onChangeProfileDropdown)
parent.profilepanel.Show()
hbox4.Add(parent.stopbutton)
hbox4.Add((0, DV.border_padding))
#vbox.Add((0,DV.border_padding))
Damnlog('DamnMainGoPanel initialized.')
| gpl-3.0 |
dieterich-lab/rp-bp | rpbp/reference_preprocessing/label_orfs.py | 1 | 21436 | #! /usr/bin/env python3
"""This script labels the ORFs based on their exon
transcript structure with respect to annotated coding sequences
"""
import argparse
import logging
import pbio.misc.logging_utils as logging_utils
import pbio.utils.bed_utils as bed_utils
from rpbp.defaults import default_num_cpus
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='''Label the ORFs based on their transcript
exon structure wrt the annotated transcripts.''')
parser.add_argument('annotated_transcripts', help='''The annotated transcripts for the genome
in BED12+ format.''')
parser.add_argument('extracted_orfs', help='''The ORFs extracted from the transcripts
in BED12+ format.''')
parser.add_argument('out', help='''The output (BED12+.gz) file.''')
parser.add_argument('-e', '--annotated-exons', help='''The annotated transcript
exons can be passed with this option. If they are not given, they will be
split from the annotated transcripts.''', default=None)
parser.add_argument('-o', '--orf-exons', help='''The exon blocks for the ORFs, in BED6+ format,
obtained from "split-bed12-blocks". If they are not given, they will be split from the
extracted ORFs.''', default=None)
parser.add_argument('-n', '--nonoverlapping-label', help='''If this option is given,
then the ORFs which do not overlap the annotated transcripts at all will be given this label.
By default, remaining oof overlapping ORFs are assigned the "overlap" label.
If not given, the ORFs outside of annotated regions are labeled as "suspect".''',
default=None)
parser.add_argument('-l', '--label-prefix', help='''This string is prepended to all labels
assigned to ORFs, e.g. to indicate ORFs from a de novo assembly (Rp-Bp assigns the label
"novel" to these, however the string is not prepended to "canonical ORFs").''',
default='')
parser.add_argument('-f', '--filter', help='''If this flag is given, then ORFs
which are completely covered by an annotated transcript are discarded. Use to filter
uninteresting ORFs from a de novo assembly.''', action='store_true')
parser.add_argument('-p', '--num-cpus', help='''The number of CPUs to use to perform
BED operations.''', type=int, default=default_num_cpus)
logging_utils.add_logging_options(parser)
args = parser.parse_args()
logging_utils.update_logging(args)
msg = "Reading annotated transcripts"
logger.info(msg)
annotated_transcripts = bed_utils.read_bed(args.annotated_transcripts)
# get the annotated transcript exons
if args.annotated_exons is None:
msg = "Splitting the annotated transcripts into exon blocks"
logger.info(msg)
annotated_exons = bed_utils.split_bed12(annotated_transcripts,
num_cpus=args.num_cpus,
progress_bar=True)
else:
msg = "Reading the annotated transcript exons"
logger.info(msg)
annotated_exons = bed_utils.read_bed(args.annotated_exons)
msg = "Reading extracted ORFs"
logger.info(msg)
extracted_orfs = bed_utils.read_bed(args.extracted_orfs)
if args.orf_exons is None:
msg = "Splitting the extracted ORFs into exon blocks"
logger.info(msg)
extracted_orf_exons = bed_utils.split_bed12(extracted_orfs,
num_cpus=args.num_cpus,
progress_bar=True)
else:
msg = "Reading the extracted ORFs exons"
logger.info(msg)
extracted_orf_exons = bed_utils.read_bed(args.orf_exons)
msg = "Found {} extracted ORFs with {} exons".format(len(extracted_orfs),
len(extracted_orf_exons))
logger.debug(msg)
# filter out the ORFs that are entirely within annotated transcripts
if args.filter:
msg = "Removing ORFs which are completely covered by the annotated transcripts"
logger.info(msg)
nonoverlapping_ids = bed_utils.subtract_bed(extracted_orf_exons,
annotated_exons,
min_a_overlap=1)
m_unfiltered = extracted_orfs['id'].isin(nonoverlapping_ids)
extracted_orfs = extracted_orfs[m_unfiltered]
# discard the unnecessary exons
m_unfiltered = extracted_orf_exons['id'].isin(nonoverlapping_ids)
extracted_orf_exons = extracted_orf_exons[m_unfiltered]
msg = "After filtering, {} extracted ORFs remain".format(len(extracted_orfs))
logger.info(msg)
# annotate and remove the ORFs which do not at all overlap the annotations
if args.nonoverlapping_label is not None:
nonoverlapping_ids = bed_utils.subtract_bed(extracted_orfs,
annotated_transcripts,
exons_a=extracted_orf_exons,
exons_b=annotated_exons)
m_nonoverlapping = extracted_orf_exons['id'].isin(nonoverlapping_ids)
extracted_orf_exons = extracted_orf_exons[~m_nonoverlapping]
m_nonoverlapping = extracted_orfs['id'].isin(nonoverlapping_ids)
extracted_orfs.loc[m_nonoverlapping, 'orf_type'] = args.nonoverlapping_label
msg = ("Found {} ORFs completely non-overlapping annotated transcripts".
format(len(nonoverlapping_ids)))
logger.info(msg)
msg = "Removing the annotated UTRs from the transcripts"
logger.info(msg)
canonical_orfs = bed_utils.retain_all_thick_only(annotated_transcripts,
num_cpus=args.num_cpus)
msg = "Splitting the canonical ORFs into exons"
logger.info(msg)
canonical_orf_exons = bed_utils.split_bed12(canonical_orfs,
num_cpus=args.num_cpus,
progress_bar=True)
msg = "Extracting annotated 5' leader regions"
logger.info(msg)
five_prime_regions = bed_utils.retain_all_five_prime_of_thick(
annotated_transcripts, num_cpus=args.num_cpus)
if len(five_prime_regions) == 0:
msg = "No annotated 5' leader regions were found"
logger.warning(msg)
msg = "Splitting the 5' leaders into exons"
logger.info(msg)
five_prime_exons = bed_utils.split_bed12(five_prime_regions,
num_cpus=args.num_cpus,
progress_bar=True)
msg = "Extracting annotated 3' trailer regions"
logger.info(msg)
three_prime_regions = bed_utils.retain_all_three_prime_of_thick(
annotated_transcripts, num_cpus=args.num_cpus)
if len(three_prime_regions) == 0:
msg = "No annotated 3' trailer regions were found"
logger.warning(msg)
msg = "Splitting the 3' trailers into exons"
logger.info(msg)
three_prime_exons = bed_utils.split_bed12(three_prime_regions,
num_cpus=args.num_cpus,
progress_bar=True)
msg = "Splitting non-coding transcripts into exons"
logger.info(msg)
m_no_thick_start = annotated_transcripts['thick_start'] == -1
m_no_thick_end = annotated_transcripts['thick_end'] == -1
m_no_thick = m_no_thick_start & m_no_thick_end
noncoding_transcripts = annotated_transcripts[m_no_thick]
noncoding_exons = bed_utils.split_bed12(noncoding_transcripts,
num_cpus=args.num_cpus,
progress_bar=True)
# First, remove all in-frame (canonical, canonical variants), and also within and oof ORFs
msg = "Marking canonical and extracted ORFs with the same stop codon"
logger.info(msg)
# first, add the "true" ORF end
m_reverse_canonical = canonical_orfs['strand'] == '-'
canonical_orfs['orf_end'] = canonical_orfs['end']
canonical_orfs.loc[m_reverse_canonical, 'orf_end'] = canonical_orfs.loc[m_reverse_canonical, 'start']
m_reverse_extracted = extracted_orfs['strand'] == '-'
extracted_orfs['orf_end'] = extracted_orfs['end']
extracted_orfs.loc[m_reverse_extracted, 'orf_end'] = extracted_orfs.loc[m_reverse_extracted, 'start']
# then, find extracted ORFs with the same "orf_end" (and seqname, strand) as canonical ORFs
merge_fields = ['seqname', 'strand', 'orf_end']
canonical_extracted_orf_ends = canonical_orfs.merge(extracted_orfs,
on=merge_fields,
suffixes=['_canonical', '_extracted'])
# finally, pull this into a set
zip_it = zip(canonical_extracted_orf_ends['id_canonical'],
canonical_extracted_orf_ends['id_extracted'])
canonical_extracted_matching_ends = {(c, a) for c, a in zip_it}
msg = "Finding ORFs which exactly overlap the canonical ORFs"
logger.info(msg)
exact_matches = bed_utils.get_bed_overlaps(canonical_orf_exons,
extracted_orf_exons,
min_a_overlap=1,
min_b_overlap=1)
exact_match_orf_ids = {m.b_info for m in exact_matches}
m_exact_orf_matches = extracted_orf_exons['id'].isin(exact_match_orf_ids)
extracted_orf_exons = extracted_orf_exons[~m_exact_orf_matches]
m_canonical = extracted_orfs['id'].isin(exact_match_orf_ids)
label = 'canonical'
extracted_orfs.loc[m_canonical, 'orf_type'] = label
msg = "Found {} canonical ORFs".format(len(exact_match_orf_ids))
logger.info(msg)
msg = "Finding truncated canonical ORFs"
logger.info(msg)
truncated_matches = bed_utils.get_bed_overlaps(canonical_orf_exons,
extracted_orf_exons,
min_b_overlap=1)
truncated_match_ids = {m.b_info for m in truncated_matches
if (m.a_info, m.b_info) in canonical_extracted_matching_ends}
m_truncated_matches = extracted_orf_exons['id'].isin(truncated_match_ids)
extracted_orf_exons = extracted_orf_exons[~m_truncated_matches]
m_canonical_truncated = extracted_orfs['id'].isin(truncated_match_ids)
msg = "Finding extended canonical ORFs"
logger.info(msg)
extended_matches = bed_utils.get_bed_overlaps(canonical_orf_exons,
extracted_orf_exons,
min_a_overlap=1)
# For standard assembly, we also need to make sure that
# all extended matches are fully contained within the
# transcript structure (i.e start upstream but otherwise
# have the same structure).
if args.nonoverlapping_label is None:
transcript_matches = bed_utils.get_bed_overlaps(annotated_exons,
extracted_orf_exons,
min_b_overlap=1)
transcript_match_pairs = {(m.a_info, m.b_info) for m in transcript_matches}
extended_match_ids = {m.b_info for m in extended_matches
if (m.a_info, m.b_info) in transcript_match_pairs
and (m.a_info, m.b_info) in canonical_extracted_matching_ends}
else:
extended_match_ids = {m.b_info for m in extended_matches
if (m.a_info, m.b_info) in canonical_extracted_matching_ends}
m_extended_matches = extracted_orf_exons['id'].isin(extended_match_ids)
extracted_orf_exons = extracted_orf_exons[~m_extended_matches]
m_canonical_extended = extracted_orfs['id'].isin(extended_match_ids)
m_canonical_variants = m_canonical_truncated | m_canonical_extended
label = "{}canonical_variant".format(args.label_prefix)
extracted_orfs.loc[m_canonical_variants, 'orf_type'] = label
msg = "Found {} canonical_variant ORFs".\
format(len(extended_match_ids | truncated_match_ids))
logger.info(msg)
msg = ("Finding within canonical ORFs that do not share an "
"annotated stop codon with a canonical ORF (e.g. in "
"frame stop, out-of-frame)")
logger.info(msg)
within_ids = {m.b_info for m in truncated_matches
if m.b_info not in truncated_match_ids}
m_within_matches = extracted_orf_exons['id'].isin(within_ids)
extracted_orf_exons = extracted_orf_exons[~m_within_matches]
m_within = extracted_orfs['id'].isin(within_ids)
label = "{}within".format(args.label_prefix)
extracted_orfs.loc[m_within, 'orf_type'] = label
msg = "Found {} within ORFs".format(len(within_ids))
logger.info(msg)
# find all overlapping ORFs
msg = "Finding all UTR overlap matches"
logger.info(msg)
out_of_frame_matches = bed_utils.get_bed_overlaps(canonical_orf_exons,
extracted_orf_exons)
leader_matches = bed_utils.get_bed_overlaps(five_prime_exons,
extracted_orf_exons)
trailer_matches = bed_utils.get_bed_overlaps(three_prime_exons,
extracted_orf_exons)
msg = ("Labeling ORFs which have (out-of-frame) overlaps with both a "
"canonical ORF and annotated leaders or trailers")
logger.info(msg)
# We need to choose how to ensure that up-/downstream overlaps are unique.
# Where an ORF overlaps both the 5'UTR and the 3'UTR of different same
# sense overlapping transcripts, it is assigned by default to the downstream overlap.
# For de novo, everything is labeled as overlap.
leader_match_pairs = {(m.a_info, m.b_info) for m in leader_matches}
trailer_match_pairs = {(m.a_info, m.b_info) for m in trailer_matches}
if args.nonoverlapping_label is None:
# For standard assembly, we also need to make sure that
# all overlap matches are fully contained within the
# transcript structure.
transcript_matches = bed_utils.get_bed_overlaps(annotated_exons,
extracted_orf_exons,
min_b_overlap=1)
transcript_match_pairs = {(m.a_info, m.b_info) for m in transcript_matches}
leader_overlap_pairs = {(m.a_info, m.b_info) for m in out_of_frame_matches
if (m.a_info, m.b_info) in leader_match_pairs
and (m.a_info, m.b_info) not in trailer_match_pairs
and (m.a_info, m.b_info) in transcript_match_pairs}
trailer_overlap_pairs = {(m.a_info, m.b_info) for m in out_of_frame_matches
if (m.a_info, m.b_info) in trailer_match_pairs
and (m.a_info, m.b_info) not in leader_match_pairs
and (m.a_info, m.b_info) in transcript_match_pairs}
# We do not assign preference where the ORF overlaps both sides
# of the coding sequence on the same transcript, any ORF
# satisfying both will be labeled simply as overlap.
overlap_ids = {m.b_info for m in out_of_frame_matches
if (m.a_info, m.b_info) in leader_match_pairs
and (m.a_info, m.b_info) in trailer_match_pairs
and (m.a_info, m.b_info) in transcript_match_pairs}
trailer_overlap_ids = {pair[1] for pair in trailer_overlap_pairs
if pair[1] not in overlap_ids}
leader_overlap_ids = {pair[1] for pair in leader_overlap_pairs
if pair[1] not in trailer_overlap_ids
and pair[1] not in overlap_ids}
m_overlap_matches = extracted_orf_exons['id'].isin(overlap_ids)
extracted_orf_exons = extracted_orf_exons[~m_overlap_matches]
m_leader_overlap_matches = extracted_orf_exons['id'].isin(leader_overlap_ids)
extracted_orf_exons = extracted_orf_exons[~m_leader_overlap_matches]
m_five_prime_overlap = extracted_orfs['id'].isin(leader_overlap_ids)
label = "{}five_prime_overlap".format(args.label_prefix)
extracted_orfs.loc[m_five_prime_overlap, 'orf_type'] = label
m_trailer_overlap_matches = extracted_orf_exons['id'].isin(trailer_overlap_ids)
extracted_orf_exons = extracted_orf_exons[~m_trailer_overlap_matches]
m_three_prime_overlap = extracted_orfs['id'].isin(trailer_overlap_ids)
label = "{}three_prime_overlap".format(args.label_prefix)
extracted_orfs.loc[m_three_prime_overlap, 'orf_type'] = label
msg = "Found {} five_prime_overlap ORFs".format(len(leader_overlap_ids))
logger.info(msg)
msg = "Found {} three_prime_overlap ORFs".format(len(trailer_overlap_ids))
logger.info(msg)
else:
overlap_ids = {m.b_info for m in out_of_frame_matches}
overlap_ids |= {m.b_info for m in leader_matches}
overlap_ids |= {m.b_info for m in trailer_matches}
m_overlap_matches = extracted_orf_exons['id'].isin(overlap_ids)
extracted_orf_exons = extracted_orf_exons[~m_overlap_matches]
m_overlap = extracted_orfs['id'].isin(overlap_ids)
label = "{}overlap".format(args.label_prefix)
extracted_orfs.loc[m_overlap, 'orf_type'] = label
msg = "Found {} overlap ORFs".format(len(overlap_ids))
logger.info(msg)
msg = "Finding ORFs completely within 5' or 3' leaders"
logger.info(msg)
leader_matches = bed_utils.get_bed_overlaps(five_prime_exons,
extracted_orf_exons,
min_b_overlap=1)
leader_ids = {m.b_info for m in leader_matches}
m_leader_matches = extracted_orf_exons['id'].isin(leader_ids)
extracted_orf_exons = extracted_orf_exons[~m_leader_matches]
m_five_prime = extracted_orfs['id'].isin(leader_ids)
label = "{}five_prime".format(args.label_prefix)
extracted_orfs.loc[m_five_prime, 'orf_type'] = label
msg = "Found {} five_prime ORFs".format(len(leader_ids))
logger.info(msg)
trailer_matches = bed_utils.get_bed_overlaps(three_prime_exons,
extracted_orf_exons,
min_b_overlap=1)
trailer_ids = {m.b_info for m in trailer_matches}
m_trailer_matches = extracted_orf_exons['id'].isin(trailer_ids)
extracted_orf_exons = extracted_orf_exons[~m_trailer_matches]
m_three_prime = extracted_orfs['id'].isin(trailer_ids)
label = "{}three_prime".format(args.label_prefix)
extracted_orfs.loc[m_three_prime, 'orf_type'] = label
msg = "Found {} three_prime ORFs".format(len(trailer_ids))
logger.info(msg)
msg = "Finding ORFs completely within annotated, non-coding transcripts"
logger.info(msg)
noncoding_matches = bed_utils.get_bed_overlaps(noncoding_exons,
extracted_orf_exons,
min_b_overlap=1)
noncoding_ids = {m.b_info for m in noncoding_matches}
m_noncoding_matches = extracted_orf_exons['id'].isin(noncoding_ids)
extracted_orf_exons = extracted_orf_exons[~m_noncoding_matches]
m_noncoding = extracted_orfs['id'].isin(noncoding_ids)
label = "{}noncoding".format(args.label_prefix)
extracted_orfs.loc[m_noncoding, 'orf_type'] = label
msg = "Found {} noncoding ORFs".format(len(noncoding_ids))
logger.info(msg)
# all of the remaining ORFs fall into the "suspect" category
suspect_ids = {orf_id for orf_id in extracted_orf_exons['id']}
m_suspect = extracted_orfs['id'].isin(suspect_ids)
label = "{}suspect".format(args.label_prefix)
extracted_orfs.loc[m_suspect, 'orf_type'] = label
n_suspect_ids = len(suspect_ids)
msg = "Remaining {} ORFs labeled as suspect".format(n_suspect_ids)
logger.info(msg)
m_no_orf_type = extracted_orfs['orf_type'].isnull()
msg = "Found {} unlabeled ORFs".format(sum(m_no_orf_type))
logger.info(msg)
msg = "Writing ORFs with labels to disk"
logger.info(msg)
extracted_orfs = bed_utils.sort(extracted_orfs)
msg = ("The ORF labels will be written to {} in the next major release.".
format(args.out))
logger.warning(msg)
additional_columns = ['orf_num', 'orf_len', 'orf_type']
fields = bed_utils.bed12_field_names + additional_columns
orfs_genomic = extracted_orfs[fields]
bed_utils.write_bed(orfs_genomic, args.extracted_orfs)
label_columns = ['id', 'duplicates', 'orf_type']
extracted_orfs = extracted_orfs[label_columns]
bed_utils.write_bed(extracted_orfs, args.out)
if __name__ == '__main__':
main()
| mit |
umitproject/tease-o-matic | django/templatetags/future.py | 226 | 3486 | from django.conf import settings
from django.template import Library, Node, Template, TemplateSyntaxError
from django.template.defaulttags import kwarg_re, include_is_allowed, SsiNode, URLNode
from django.utils.encoding import smart_str
register = Library()
@register.tag
def ssi(parser, token):
"""
Outputs the contents of a given file into the page.
Like a simple "include" tag, the ``ssi`` tag includes the contents
of another file -- which must be specified using an absolute path --
in the current page::
{% ssi "/home/html/ljworld.com/includes/right_generic.html" %}
If the optional "parsed" parameter is given, the contents of the included
file are evaluated as template code, with the current context::
{% ssi "/home/html/ljworld.com/includes/right_generic.html" parsed %}
"""
bits = token.contents.split()
parsed = False
if len(bits) not in (2, 3):
raise TemplateSyntaxError("'ssi' tag takes one argument: the path to"
" the file to be included")
if len(bits) == 3:
if bits[2] == 'parsed':
parsed = True
else:
raise TemplateSyntaxError("Second (optional) argument to %s tag"
" must be 'parsed'" % bits[0])
filepath = parser.compile_filter(bits[1])
return SsiNode(filepath, parsed, legacy_filepath=False)
@register.tag
def url(parser, token):
"""
Returns an absolute URL matching given view with its parameters.
This is a way to define links that aren't tied to a particular URL
configuration::
{% url "path.to.some_view" arg1 arg2 %}
or
{% url "path.to.some_view" name1=value1 name2=value2 %}
The first argument is a path to a view. It can be an absolute python path
or just ``app_name.view_name`` without the project name if the view is
located inside the project. Other arguments are comma-separated values
that will be filled in place of positional and keyword arguments in the
URL. All arguments for the URL should be present.
For example if you have a view ``app_name.client`` taking client's id and
the corresponding line in a URLconf looks like this::
('^client/(\d+)/$', 'app_name.client')
and this app's URLconf is included into the project's URLconf under some
path::
('^clients/', include('project_name.app_name.urls'))
then in a template you can create a link for a certain client like this::
{% url "app_name.client" client.id %}
The URL will look like ``/clients/client/123/``.
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument"
" (path to a view)" % bits[0])
viewname = parser.compile_filter(bits[1])
args = []
kwargs = {}
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to url tag")
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return URLNode(viewname, args, kwargs, asvar, legacy_view_name=False)
| bsd-3-clause |
OwaJawa/kaggle_diabetic | config.py | 5 | 2232 | from datetime import datetime
import pprint
import os
import numpy as np
from util import mkdir
from data import FEATURE_DIR
mkdir(FEATURE_DIR)
class Config(object):
def __init__(self, layers, cnf=None):
self.layers = layers
self.cnf = cnf
pprint.pprint(cnf)
def get(self, k, default=None):
return self.cnf.get(k, default)
@property
def weights_epoch(self):
path = "weights/{}/epochs".format(self.cnf['name'])
mkdir(path)
return os.path.join(path, '{epoch}_{timestamp}_{loss}.pkl')
@property
def weights_best(self):
path = "weights/{}/best".format(self.cnf['name'])
mkdir(path)
return os.path.join(path, '{epoch}_{timestamp}_{loss}.pkl')
@property
def weights_file(self):
path = "weights/{}".format(self.cnf['name'])
mkdir(path)
return os.path.join(path, 'weights.pkl')
@property
def retrain_weights_file(self):
path = "weights/{}/retrain".format(self.cnf['name'])
mkdir(path)
return os.path.join(path, 'weights.pkl')
@property
def final_weights_file(self):
path = "weights/{}".format(self.cnf['name'])
mkdir(path)
return os.path.join(path, 'weights_final.pkl')
def get_features_fname(self, n_iter, skip=0, test=False):
fname = '{}_{}_mean_iter_{}_skip_{}.npy'.format(
self.cnf['name'], ('test' if test else 'train'), n_iter, skip)
return os.path.join(FEATURE_DIR, fname)
def get_std_fname(self, n_iter, skip=0, test=False):
fname = '{}_{}_std_iter_{}_skip_{}.npy'.format(
self.cnf['name'], ('test' if test else 'train'), n_iter, skip)
return os.path.join(FEATURE_DIR, fname)
def save_features(self, X, n_iter, skip=0, test=False):
np.save(open(self.get_features_fname(n_iter, skip=skip,
test=test), 'wb'), X)
def save_std(self, X, n_iter, skip=0, test=False):
np.save(open(self.get_std_fname(n_iter, skip=skip,
test=test), 'wb'), X)
def load_features(self, test=False):
return np.load(open(self.get_features_fname(test=test)))
| mit |
pstratem/python-bitcoinlib | examples/make-bootstrap-rpc.py | 5 | 1552 | #!/usr/bin/env python3
# Copyright (C) 2013-2014 The python-bitcoinlib developers
#
# This file is part of python-bitcoinlib.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoinlib, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
"""Make a boostrap.dat file by getting the blocks from the RPC interface."""
import sys
if sys.version_info.major < 3:
sys.stderr.write('Sorry, Python 3.x required by this example.\n')
sys.exit(1)
import bitcoin
import bitcoin.rpc
import struct
import sys
import time
try:
if len(sys.argv) not in (2, 3):
raise Exception
n = int(sys.argv[1])
if len(sys.argv) == 3:
bitcoin.SelectParams(sys.argv[2])
except Exception as ex:
print('Usage: %s <block-height> [network=(mainnet|testnet|regtest)] > bootstrap.dat' % sys.argv[0], file=sys.stderr)
sys.exit(1)
proxy = bitcoin.rpc.Proxy()
total_bytes = 0
start_time = time.time()
fd = sys.stdout.buffer
for i in range(n + 1):
block = proxy.getblock(proxy.getblockhash(i))
block_bytes = block.serialize()
total_bytes += len(block_bytes)
print('%.2f KB/s, height %d, %d bytes' %
((total_bytes / 1000) / (time.time() - start_time),
i, len(block_bytes)),
file=sys.stderr)
fd.write(bitcoin.params.MESSAGE_START)
fd.write(struct.pack('<i', len(block_bytes)))
fd.write(block_bytes)
| lgpl-3.0 |
rishab96/chessEndGame | userTest.py | 1 | 6281 | import copy
import chess
import chess.syzygy
import chess.gaviota
import collections
from collections import Counter
import features
from features import *
import os.path
weight_vector_3_piece = {'black_king_closer_to_winning_square': -1.400000000000001, 'black_opposition': -1.9200000000000015, 'cant catch pawn': 1.0000000000000007, 'can catch pawn': -0.27, 'white_king_closer': 1.0700000000000007, 'white_king_closer_to_winning_square': -0.22000000000000008, 'black_can_capture': -0.38000000000000017, 'white_king_blocked_down': 0.04000000000000009, 'white_king_blocked_side': -0.5200000000000002, 'white king ahead': 0.42000000000000015, 'black_king_wrong_side': 1.5400000000000011, 'white_king_blocked': -0.6000000000000003, 'white_king_wrong_side': -1.570000000000001, 'black_king_closer': -1.340000000000001, 'h_pawn': -2.6499999999999875}
weight_vector_4_piece = {'black_king_closer_to_winning_square': -1.400000000000001, 'black_opposition': -1.9200000000000015, 'cant catch pawn': 1.0000000000000007, 'can catch pawn': -0.27, 'white_king_closer': 1.0700000000000007, 'white_king_closer_to_winning_square': -0.22000000000000008, 'black_can_capture': -0.38000000000000017, 'white_king_blocked_down': 0.04000000000000009, 'white_king_blocked_side': -0.5200000000000002, 'white king ahead': 0.42000000000000015, 'black_king_wrong_side': 1.5400000000000011, 'white_king_blocked': -0.6000000000000003, 'white_king_wrong_side': -1.570000000000001, 'black_king_closer': -1.340000000000001, 'h_pawn': -2.6499999999999875}
weight_vector_5_piece = {'black_king_closer_to_winning_square': -1.400000000000001, 'black_opposition': -1.9200000000000015, 'cant catch pawn': 1.0000000000000007, 'can catch pawn': -0.27, 'white_king_closer': 1.0700000000000007, 'white_king_closer_to_winning_square': -0.22000000000000008, 'black_can_capture': -0.38000000000000017, 'white_king_blocked_down': 0.04000000000000009, 'white_king_blocked_side': -0.5200000000000002, 'white king ahead': 0.42000000000000015, 'black_king_wrong_side': 1.5400000000000011, 'white_king_blocked': -0.6000000000000003, 'white_king_wrong_side': -1.570000000000001, 'black_king_closer': -1.340000000000001, 'h_pawn': -2.6499999999999875}
def featureExtractor_3(x):
"""
Chess Features
"""
features = {}
features = Counter()
features.update(canCatchPawn(x))
if (features['cant catch pawn'] == 1):
return features
features.update(canBeCaptured(x))
# if (features['black_can_capture'] == 1):
# return {'black_can_capture':1}
features.update(isWhiteKingAhead(x))
features.update(isOpposition(x))
features.update(move_distances(x))
features.update(wrongSide(x))
features.update(ishPawn(x))
return features
def userInput():
print ""
print "This is an endgame solver designed to solve 3,4 and 5 piece chess"
print "Currently it can solve the following board combinations: " #fill
print "Please enter the board in algebraic notation, enter . when you are done"
print "Uppercase letters define white's pieces, while lowecase define black's pieces"
print "If there are 2 pieces on the same row, please enter them in ascending order (ka1 before Pa3)"
print ""
pieces = []
while True:
newPiece = raw_input('Enter new piece: ')
if newPiece == ".":
break
else:
pieces.append(newPiece)
FEN = processInput(pieces)
turn = raw_input('Whose move is it (b or w): ')
board = chess.Board(FEN + " " + turn + " - - 0 1")
syzygy = chess.syzygy.Tablebases()
num = 0
num += syzygy.open_directory(os.path.join(os.path.dirname(__file__), "four-men"))
output = predictOutput(board, syzygy, len(pieces))
actualOutput = output[0]
predictedOutput = output[1]
if actualOutput != -100:
if actualOutput == -1:
print ""
print "The actual value according to databases is: DRAW"
else:
print ""
print "The actual value according to databases is: WIN"
if actualOutput == -1:
print "The predicted output according our algorithm is: DRAW"
else:
print "The predicted output according our algorithm is: WIN"
print ""
#problem
#Enter new piece: Ka2
# Enter new piece: ka6
# Enter new piece: pd2
# Enter new piece: .
# Whose move is it (b or w): w
# 3
def predictOutput(board, syzygy, numPieces):
features = {}
if numPieces == 3:
weights = weight_vector_3_piece
features = featureExtractor_3(board)
elif numPieces == 4:
weights = weight_vector_4_piece
elif numPieces == 5:
weights = weight_vector_5_piece
print board
expectVal = syzygy.probe_wdl(board)
if (expectVal is None):
print "Illegal move"
return (-100, -100)
elif (expectVal == 0):
expectVal = -1
else:
expectVal = 1
ourVal = 0
for val in features:
ourVal += weights[val] * features[val]
if (ourVal <= 0):
ourVal = -1
elif (ourVal > 0):
ourVal = 1
return (expectVal, ourVal)
def processInput(pieces):
FEN = ""
board = [ [] for i in range(8)]
Error = ""
wasError = False
for piece in pieces:
if len(piece) > 3:
Error = piece + " incorrect"
wasError = True
break
pieceType = piece[0]
column = int(piece[2]) - 1
row = piece[1]
row = ord(row) - ord('a') + 1
#row = row - 1
if column > 7 or row > 8:
wasError = True
Error = "Row or column invalid"
break
column = 7 - column
board[column].append((pieceType, row))
FEN = processBoard(board)
return FEN
def processBoard(board):
FEN = ""
for j, row in enumerate(board):
rowFEN = ""
curFilled = 0
prevPos = 0
for i,piece in enumerate(row):
if (piece[1] == 1):
rowFEN = piece[0]
curFilled = 1
elif i == 0:
rowFEN = str(piece[1] - 1) + piece[0]
curFilled = int(piece[1])
else:
diff = int(piece[1]) - curFilled - 1
if (diff != 0):
rowFEN += str(diff)
rowFEN += piece[0]
curFilled = int(piece[1])
left = 8 - curFilled
if left > 0:
rowFEN += str(left)
FEN += rowFEN
if j != 7:
FEN += "/"
return FEN
# curPos = 0
# while True:
# if numSlash == row:
# break
# if FEN[curPos] == '/':
# numSlash += 1
# curPos += 1
# curPosTemp = curPos
# FENofRow = ""
# while FEN[curPosTemp] != "/":
# FENofRow += FEN[curPosTemp]
# curPosTemp += 1
def tester():
print ord('z') - ord('a')
userInput()
| gpl-3.0 |
tod31/pyload | module/plugins/crypter/DataHuFolder.py | 5 | 1721 | # -*- coding: utf-8 -*-
import re
from module.plugins.internal.SimpleCrypter import SimpleCrypter
class DataHuFolder(SimpleCrypter):
__name__ = "DataHuFolder"
__type__ = "crypter"
__version__ = "0.12"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?data\.hu/dir/\w+'
__config__ = [("activated" , "bool" , "Activated" , True ),
("use_premium" , "bool" , "Use premium account if available" , True ),
("folder_per_package", "Default;Yes;No", "Create folder for each package" , "Default"),
("max_wait" , "int" , "Reconnect if waiting time is greater than minutes", 10 )]
__description__ = """Data.hu folder decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("crash", None),
("stickell", "[email protected]")]
LINK_PATTERN = r'<a href=\'(http://data\.hu/get/.+)\' target=\'_blank\'>\1</a>'
NAME_PATTERN = ur'<title>(?P<N>.+?) Let\xf6lt\xe9se</title>'
def _prepare(self):
super(DataHuFolder, self)._prepare()
if u'K\xe9rlek add meg a jelsz\xf3t' in self.data: #: Password protected
password = self.get_password()
if not password:
self.fail(_("Password required"))
self.log_debug("The folder is password protected', 'Using password: " + password)
self.data = self.load(self.pyfile.url, post={'mappa_pass': password})
if u'Hib\xe1s jelsz\xf3' in self.data: #: Wrong password
self.fail(_("Wrong password"))
| gpl-3.0 |
zcoinui/zetacoin | contrib/devtools/symbol-check.py | 138 | 4151 | #!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the (Linux) executables produced by gitian only contain
allowed gcc, glibc and libstdc++ version symbols. This makes sure they are
still compatible with the minimum supported Linux distribution versions.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py
'''
from __future__ import division, print_function
import subprocess
import re
import sys
# Debian 6.0.9 (Squeeze) has:
#
# - g++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.11.3 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libc6)
# - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libstdc%2B%2B6)
#
# Ubuntu 10.04.4 (Lucid Lynx) has:
#
# - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid§ion=all)
# - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid§ion=all)
# - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid§ion=all&arch=any&keywords=libstdc%2B%2B&searchon=names)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.4.0: GCC_4.4.0
# GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3
# (glibc) GLIBC_2_11
#
MAX_VERSIONS = {
'GCC': (4,4,0),
'CXXABI': (1,3,3),
'GLIBCXX': (3,4,13),
'GLIBC': (2,11)
}
READELF_CMD = '/usr/bin/readelf'
CPPFILT_CMD = '/usr/bin/c++filt'
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def __call__(self, mangled):
self.proc.stdin.write(mangled + '\n')
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True):
'''
Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip()))
syms = []
for line in stdout.split('\n'):
line = line.split()
if len(line)>7 and re.match('[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition('@')
is_import = line[6] == 'UND'
if version.startswith('@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version))
return syms
def check_version(max_versions, version):
if '_' in version:
(lib, _, ver) = version.rpartition('_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split('.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib]
if __name__ == '__main__':
cppfilt = CPPFilt()
retval = 0
for filename in sys.argv[1:]:
# Check imported symbols
for sym,version in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version):
print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym), version))
retval = 1
# Check exported symbols
for sym,version in read_symbols(filename, False):
print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym)))
retval = 1
exit(retval)
| mit |
six600110/xmms2 | waftools/plugin.py | 7 | 1326 | # Stock plugin configuration and build methods. These factor out the
# common tasks carried out by plugins in order to configure and build
# themselves.
from waflib.Errors import ConfigurationError
from copy import copy
def plugin(name, source=None, configure=False, build=False,
build_replace=False, libs=[],
tool='c', broken=False, output_prio=None):
def stock_configure(conf):
if broken:
conf.msg('%s plugin' % name, 'disabled (broken)', color='RED')
return
if configure:
configure(conf)
conf.env.XMMS_PLUGINS_ENABLED.append(name)
if output_prio:
conf.env.XMMS_OUTPUT_PLUGINS.append((output_prio, name))
def stock_build(bld):
pat = tool=='c' and '*.c' or '*.cpp'
obj = bld(
features = '%(tool)s %(tool)sshlib' % dict(tool=tool),
target = 'xmms_%s' % name,
source = copy(source) or bld.path.ant_glob(pat),
includes = '../../.. ../../include',
uselib = ['glib2'] + libs,
use = bld.env.xmms_shared_library and 'xmms2core' or '',
install_path = '${PLUGINDIR}',
mac_bundle = bld.env.mac_bundle_enabled,
)
if build:
build(bld, obj)
return stock_configure, stock_build
| lgpl-2.1 |
Mchakravartula/rockstor-core | src/rockstor/scripts/bootstrap.py | 2 | 3111 | """
Copyright (c) 2012-2015 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import sys
import time
from cli.api_wrapper import APIWrapper
from fs.btrfs import device_scan
from system.osi import run_command
import requests
from django.conf import settings
from storageadmin.models import Setup
BASE_DIR = settings.ROOT_DIR
BASE_BIN = '%sbin' % BASE_DIR
QGROUP_CLEAN = '%s/qgroup-clean' % BASE_BIN
QGROUP_MAXOUT_LIMIT = '%s/qgroup-maxout-limit' % BASE_BIN
def main():
try:
device_scan()
except Exception, e:
print ('BTRFS device scan failed due to an exception. This indicates '
'a serious problem. Aborting. Exception: %s' % e.__str__())
sys.exit(1)
print('BTRFS device scan complete')
#if the appliance is not setup, there's nothing more to do beyond
#device scan
setup = Setup.objects.first()
if (setup is None or setup.setup_user is False):
print('Appliance is not yet setup.')
return
num_attempts = 0
while True:
try:
aw = APIWrapper()
aw.api_call('network')
aw.api_call('commands/bootstrap', calltype='post')
break
except Exception, e:
#Retry on every exception, primarily because of django-oauth related
#code behaving unpredictably while setting tokens. Retrying is a
#decent workaround for now(11302015).
if (num_attempts > 15):
print('Max attempts(15) reached. Connection errors persist. '
'Failed to bootstrap. Error: %s' % e.__str__())
sys.exit(1)
print('Exception occured while bootstrapping. This could be because '
'rockstor.service is still starting up. will wait 2 seconds '
'and try again. Exception: %s' % e.__str__())
time.sleep(2)
num_attempts += 1
print('Bootstrapping complete')
try:
print('Running qgroup cleanup. %s' % QGROUP_CLEAN)
run_command([QGROUP_CLEAN])
except Exception, e:
print('Exception while running %s: %s' % (QGROUP_CLEAN, e.__str__()))
try:
print('Running qgroup limit maxout. %s' % QGROUP_MAXOUT_LIMIT)
run_command([QGROUP_MAXOUT_LIMIT])
except Exception, e:
print('Exception while running %s: %s' % (QGROUP_MAXOUT_LIMIT, e.__str__()))
if __name__ == '__main__':
main()
| gpl-3.0 |
liavkoren/djangoDev | tests/bash_completion/tests.py | 19 | 3224 | """
A series of tests to establish that the command-line bash completion works.
"""
import os
import sys
import unittest
from django.apps import apps
from django.core.management import ManagementUtility
from django.utils.six import StringIO
class BashCompletionTests(unittest.TestCase):
"""
Testing the Python level bash completion code.
This requires setting up the environment as if we got passed data
from bash.
"""
def setUp(self):
self.old_DJANGO_AUTO_COMPLETE = os.environ.get('DJANGO_AUTO_COMPLETE')
os.environ['DJANGO_AUTO_COMPLETE'] = '1'
self.output = StringIO()
self.old_stdout = sys.stdout
sys.stdout = self.output
def tearDown(self):
sys.stdout = self.old_stdout
if self.old_DJANGO_AUTO_COMPLETE:
os.environ['DJANGO_AUTO_COMPLETE'] = self.old_DJANGO_AUTO_COMPLETE
else:
del os.environ['DJANGO_AUTO_COMPLETE']
def _user_input(self, input_str):
os.environ['COMP_WORDS'] = input_str
os.environ['COMP_CWORD'] = str(len(input_str.split()) - 1)
sys.argv = input_str.split(' ')
def _run_autocomplete(self):
util = ManagementUtility(argv=sys.argv)
try:
util.autocomplete()
except SystemExit:
pass
return self.output.getvalue().strip().split('\n')
def test_django_admin_py(self):
"django_admin.py will autocomplete option flags"
self._user_input('django-admin.py sqlall --v')
output = self._run_autocomplete()
self.assertEqual(output, ['--verbosity='])
def test_manage_py(self):
"manage.py will autocomplete option flags"
self._user_input('manage.py sqlall --v')
output = self._run_autocomplete()
self.assertEqual(output, ['--verbosity='])
def test_custom_command(self):
"A custom command can autocomplete option flags"
self._user_input('django-admin.py test_command --l')
output = self._run_autocomplete()
self.assertEqual(output, ['--list'])
def test_subcommands(self):
"Subcommands can be autocompleted"
self._user_input('django-admin.py sql')
output = self._run_autocomplete()
self.assertEqual(output, ['sql sqlall sqlclear sqlcustom sqldropindexes sqlflush sqlindexes sqlinitialdata sqlmigrate sqlsequencereset'])
def test_help(self):
"No errors, just an empty list if there are no autocomplete options"
self._user_input('django-admin.py help --')
output = self._run_autocomplete()
self.assertEqual(output, [''])
def test_runfcgi(self):
"Command arguments will be autocompleted"
self._user_input('django-admin.py runfcgi h')
output = self._run_autocomplete()
self.assertEqual(output, ['host='])
def test_app_completion(self):
"Application names will be autocompleted for an AppCommand"
self._user_input('django-admin.py sqlall a')
output = self._run_autocomplete()
a_labels = sorted(app_config.label
for app_config in apps.get_app_configs()
if app_config.label.startswith('a'))
self.assertEqual(output, a_labels)
| bsd-3-clause |
doismellburning/edx-platform | common/test/acceptance/tests/lms/test_lms_user_preview.py | 59 | 15999 | # -*- coding: utf-8 -*-
"""
Tests the "preview" selector in the LMS that allows changing between Staff, Student, and Content Groups.
"""
from ..helpers import UniqueCourseTest, create_user_partition_json
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.instructor_dashboard import InstructorDashboardPage
from ...pages.lms.staff_view import StaffPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from bok_choy.promise import EmptyPromise
from xmodule.partitions.partitions import Group
from textwrap import dedent
class StaffViewTest(UniqueCourseTest):
"""
Tests that verify the staff view.
"""
USERNAME = "STAFF_TESTER"
EMAIL = "[email protected]"
def setUp(self):
super(StaffViewTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Install a course with sections/problems, tabs, updates, and handouts
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.populate_course_fixture(self.course_fixture) # pylint: disable=no-member
self.course_fixture.install()
# Auto-auth register for the course.
# Do this as global staff so that you will see the Staff View
AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL,
course_id=self.course_id, staff=True).visit()
def _goto_staff_page(self):
"""
Open staff page with assertion
"""
self.courseware_page.visit()
staff_page = StaffPage(self.browser, self.course_id)
self.assertEqual(staff_page.staff_view_mode, 'Staff')
return staff_page
class CourseWithoutContentGroupsTest(StaffViewTest):
"""
Setup for tests that have no content restricted to specific content groups.
"""
def populate_course_fixture(self, course_fixture):
"""
Populates test course with chapter, sequential, and 2 problems.
"""
problem_data = dedent("""
<problem markdown="Simple Problem" max_attempts="" weight="">
<p>Choose Yes.</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Yes</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=problem_data),
XBlockFixtureDesc('problem', 'Test Problem 2', data=problem_data)
)
)
)
class StaffViewToggleTest(CourseWithoutContentGroupsTest):
"""
Tests for the staff view toggle button.
"""
def test_instructor_tab_visibility(self):
"""
Test that the instructor tab is hidden when viewing as a student.
"""
course_page = self._goto_staff_page()
self.assertTrue(course_page.has_tab('Instructor'))
course_page.set_staff_view_mode('Student')
self.assertEqual(course_page.staff_view_mode, 'Student')
self.assertFalse(course_page.has_tab('Instructor'))
class StaffDebugTest(CourseWithoutContentGroupsTest):
"""
Tests that verify the staff debug info.
"""
def test_reset_attempts_empty(self):
"""
Test that we reset even when there is no student state
"""
staff_debug_page = self._goto_staff_page().open_staff_debug_info()
staff_debug_page.reset_attempts()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully reset the attempts '
'for user {}'.format(self.USERNAME), msg)
def test_delete_state_empty(self):
"""
Test that we delete properly even when there isn't state to delete.
"""
staff_debug_page = self._goto_staff_page().open_staff_debug_info()
staff_debug_page.delete_state()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully deleted student state '
'for user {}'.format(self.USERNAME), msg)
def test_reset_attempts_state(self):
"""
Successfully reset the student attempts
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.reset_attempts()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully reset the attempts '
'for user {}'.format(self.USERNAME), msg)
def test_rescore_state(self):
"""
Rescore the student
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.rescore()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully rescored problem for user STAFF_TESTER', msg)
def test_student_state_delete(self):
"""
Successfully delete the student state with an answer
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.delete_state()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully deleted student state '
'for user {}'.format(self.USERNAME), msg)
def test_student_by_email(self):
"""
Successfully reset the student attempts using their email address
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.reset_attempts(self.EMAIL)
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully reset the attempts '
'for user {}'.format(self.EMAIL), msg)
def test_bad_student(self):
"""
Test negative response with invalid user
"""
staff_page = self._goto_staff_page()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.delete_state('INVALIDUSER')
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Failed to delete student state. '
'User does not exist.', msg)
def test_reset_attempts_for_problem_loaded_via_ajax(self):
"""
Successfully reset the student attempts for problem loaded via ajax.
"""
staff_page = self._goto_staff_page()
staff_page.load_problem_via_ajax()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.reset_attempts()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully reset the attempts '
'for user {}'.format(self.USERNAME), msg)
def test_rescore_state_for_problem_loaded_via_ajax(self):
"""
Rescore the student for problem loaded via ajax.
"""
staff_page = self._goto_staff_page()
staff_page.load_problem_via_ajax()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.rescore()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully rescored problem for user STAFF_TESTER', msg)
def test_student_state_delete_for_problem_loaded_via_ajax(self):
"""
Successfully delete the student state for problem loaded via ajax.
"""
staff_page = self._goto_staff_page()
staff_page.load_problem_via_ajax()
staff_page.answer_problem()
staff_debug_page = staff_page.open_staff_debug_info()
staff_debug_page.delete_state()
msg = staff_debug_page.idash_msg[0]
self.assertEqual(u'Successfully deleted student state '
'for user {}'.format(self.USERNAME), msg)
class CourseWithContentGroupsTest(StaffViewTest):
"""
Verifies that changing the "View this course as" selector works properly for content groups.
"""
def setUp(self):
super(CourseWithContentGroupsTest, self).setUp()
# pylint: disable=protected-access
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
0,
'Configuration alpha,beta',
'Content Group Partition',
[Group("0", 'alpha'), Group("1", 'beta')],
scheme="cohort"
)
],
},
})
def populate_course_fixture(self, course_fixture):
"""
Populates test course with chapter, sequential, and 3 problems.
One problem is visible to all, one problem is visible only to Group "alpha", and
one problem is visible only to Group "beta".
"""
problem_data = dedent("""
<problem markdown="Simple Problem" max_attempts="" weight="">
<p>Choose Yes.</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Yes</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
self.alpha_text = "VISIBLE TO ALPHA"
self.beta_text = "VISIBLE TO BETA"
self.everyone_text = "VISIBLE TO EVERYONE"
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc(
'problem', self.alpha_text, data=problem_data, metadata={"group_access": {0: [0]}}
),
XBlockFixtureDesc(
'problem', self.beta_text, data=problem_data, metadata={"group_access": {0: [1]}}
),
XBlockFixtureDesc('problem', self.everyone_text, data=problem_data)
)
)
)
)
def test_staff_sees_all_problems(self):
"""
Scenario: Staff see all problems
Given I have a course with a cohort user partition
And problems that are associated with specific groups in the user partition
When I view the courseware in the LMS with staff access
Then I see all the problems, regardless of their group_access property
"""
course_page = self._goto_staff_page()
verify_expected_problem_visibility(self, course_page, [self.alpha_text, self.beta_text, self.everyone_text])
def test_student_not_in_content_group(self):
"""
Scenario: When previewing as a student, only content visible to all is shown
Given I have a course with a cohort user partition
And problems that are associated with specific groups in the user partition
When I view the courseware in the LMS with staff access
And I change to previewing as a Student
Then I see only problems visible to all users
"""
course_page = self._goto_staff_page()
course_page.set_staff_view_mode('Student')
verify_expected_problem_visibility(self, course_page, [self.everyone_text])
def test_as_student_in_alpha(self):
"""
Scenario: When previewing as a student in group alpha, only content visible to alpha is shown
Given I have a course with a cohort user partition
And problems that are associated with specific groups in the user partition
When I view the courseware in the LMS with staff access
And I change to previewing as a Student in group alpha
Then I see only problems visible to group alpha
"""
course_page = self._goto_staff_page()
course_page.set_staff_view_mode('Student in alpha')
verify_expected_problem_visibility(self, course_page, [self.alpha_text, self.everyone_text])
def test_as_student_in_beta(self):
"""
Scenario: When previewing as a student in group beta, only content visible to beta is shown
Given I have a course with a cohort user partition
And problems that are associated with specific groups in the user partition
When I view the courseware in the LMS with staff access
And I change to previewing as a Student in group beta
Then I see only problems visible to group beta
"""
course_page = self._goto_staff_page()
course_page.set_staff_view_mode('Student in beta')
verify_expected_problem_visibility(self, course_page, [self.beta_text, self.everyone_text])
def create_cohorts_and_assign_students(self, student_a_username, student_b_username):
"""
Adds 2 manual cohorts, linked to content groups, to the course.
Each cohort is assigned one student.
"""
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
instructor_dashboard_page.visit()
cohort_management_page = instructor_dashboard_page.select_cohort_management()
cohort_management_page.is_cohorted = True
def add_cohort_with_student(cohort_name, content_group, student):
""" Create cohort and assign student to it. """
cohort_management_page.add_cohort(cohort_name, content_group=content_group)
# After adding the cohort, it should automatically be selected
EmptyPromise(
lambda: cohort_name == cohort_management_page.get_selected_cohort(), "Waiting for new cohort"
).fulfill()
cohort_management_page.add_students_to_selected_cohort([student])
add_cohort_with_student("Cohort Alpha", "alpha", student_a_username)
add_cohort_with_student("Cohort Beta", "beta", student_b_username)
cohort_management_page.wait_for_ajax()
def test_as_specific_student(self):
student_a_username = 'tass_student_a'
student_b_username = 'tass_student_b'
AutoAuthPage(self.browser, username=student_a_username, course_id=self.course_id, no_login=True).visit()
AutoAuthPage(self.browser, username=student_b_username, course_id=self.course_id, no_login=True).visit()
self.create_cohorts_and_assign_students(student_a_username, student_b_username)
# Masquerade as student in alpha cohort:
course_page = self._goto_staff_page()
course_page.set_staff_view_mode_specific_student(student_a_username)
verify_expected_problem_visibility(self, course_page, [self.alpha_text, self.everyone_text])
# Masquerade as student in beta cohort:
course_page.set_staff_view_mode_specific_student(student_b_username)
verify_expected_problem_visibility(self, course_page, [self.beta_text, self.everyone_text])
def verify_expected_problem_visibility(test, courseware_page, expected_problems):
"""
Helper method that checks that the expected problems are visible on the current page.
"""
test.assertEqual(
len(expected_problems), courseware_page.num_xblock_components, "Incorrect number of visible problems"
)
for index, expected_problem in enumerate(expected_problems):
test.assertIn(expected_problem, courseware_page.xblock_components[index].text)
| agpl-3.0 |
hacksterio/pygments.rb | vendor/simplejson/simplejson/tests/test_decimal.py | 45 | 2357 | import decimal
from decimal import Decimal
from unittest import TestCase
from StringIO import StringIO
import simplejson as json
class TestDecimal(TestCase):
NUMS = "1.0", "10.00", "1.1", "1234567890.1234567890", "500"
def dumps(self, obj, **kw):
sio = StringIO()
json.dump(obj, sio, **kw)
res = json.dumps(obj, **kw)
self.assertEquals(res, sio.getvalue())
return res
def loads(self, s, **kw):
sio = StringIO(s)
res = json.loads(s, **kw)
self.assertEquals(res, json.load(sio, **kw))
return res
def test_decimal_encode(self):
for d in map(Decimal, self.NUMS):
self.assertEquals(self.dumps(d, use_decimal=True), str(d))
def test_decimal_decode(self):
for s in self.NUMS:
self.assertEquals(self.loads(s, parse_float=Decimal), Decimal(s))
def test_decimal_roundtrip(self):
for d in map(Decimal, self.NUMS):
# The type might not be the same (int and Decimal) but they
# should still compare equal.
self.assertEquals(
self.loads(
self.dumps(d, use_decimal=True), parse_float=Decimal),
d)
self.assertEquals(
self.loads(
self.dumps([d], use_decimal=True), parse_float=Decimal),
[d])
def test_decimal_defaults(self):
d = Decimal('1.1')
# use_decimal=True is the default
self.assertRaises(TypeError, json.dumps, d, use_decimal=False)
self.assertEqual('1.1', json.dumps(d))
self.assertEqual('1.1', json.dumps(d, use_decimal=True))
self.assertRaises(TypeError, json.dump, d, StringIO(),
use_decimal=False)
sio = StringIO()
json.dump(d, sio)
self.assertEqual('1.1', sio.getvalue())
sio = StringIO()
json.dump(d, sio, use_decimal=True)
self.assertEqual('1.1', sio.getvalue())
def test_decimal_reload(self):
# Simulate a subinterpreter that reloads the Python modules but not
# the C code https://github.com/simplejson/simplejson/issues/34
global Decimal
Decimal = reload(decimal).Decimal
import simplejson.encoder
simplejson.encoder.Decimal = Decimal
self.test_decimal_roundtrip()
| mit |
suiyuan2009/tensorflow | tensorflow/contrib/ndlstm/python/lstm2d.py | 51 | 8016 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A small library of functions dealing with LSTMs applied to images.
Tensors in this library generally have the shape (num_images, height, width,
depth).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.ndlstm.python import lstm1d
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope
def _shape(tensor):
"""Get the shape of a tensor as an int list."""
return tensor.get_shape().as_list()
def images_to_sequence(tensor):
"""Convert a batch of images into a batch of sequences.
Args:
tensor: a (num_images, height, width, depth) tensor
Returns:
(width, num_images*height, depth) sequence tensor
"""
num_image_batches, height, width, depth = _shape(tensor)
transposed = array_ops.transpose(tensor, [2, 0, 1, 3])
return array_ops.reshape(transposed,
[width, num_image_batches * height, depth])
def sequence_to_images(tensor, num_image_batches):
"""Convert a batch of sequences into a batch of images.
Args:
tensor: (num_steps, num_batches, depth) sequence tensor
num_image_batches: the number of image batches
Returns:
(num_images, height, width, depth) tensor
"""
width, num_batches, depth = _shape(tensor)
height = num_batches // num_image_batches
reshaped = array_ops.reshape(tensor,
[width, num_image_batches, height, depth])
return array_ops.transpose(reshaped, [1, 2, 0, 3])
def horizontal_lstm(images, num_filters_out, scope=None):
"""Run an LSTM bidirectionally over all the rows of each image.
Args:
images: (num_images, height, width, depth) tensor
num_filters_out: output depth
scope: optional scope name
Returns:
(num_images, height, width, num_filters_out) tensor, where
num_steps is width and new num_batches is num_image_batches * height
"""
with variable_scope.variable_scope(scope, "HorizontalLstm", [images]):
batch_size, _, _, _ = _shape(images)
sequence = images_to_sequence(images)
with variable_scope.variable_scope("lr"):
hidden_sequence_lr = lstm1d.ndlstm_base(sequence, num_filters_out // 2)
with variable_scope.variable_scope("rl"):
hidden_sequence_rl = (lstm1d.ndlstm_base(
sequence, num_filters_out - num_filters_out // 2, reverse=1))
output_sequence = array_ops.concat([hidden_sequence_lr, hidden_sequence_rl],
2)
output = sequence_to_images(output_sequence, batch_size)
return output
def get_blocks(images, kernel_size):
"""Split images in blocks
Args:
images: (num_images, height, width, depth) tensor
kernel_size: A list of length 2 holding the [kernel_height, kernel_width] of
of the pooling. Can be an int if both values are the same.
Returns:
(num_images, height/kernel_height, width/kernel_width,
depth*kernel_height*kernel_width) tensor
"""
with variable_scope.variable_scope("image_blocks"):
batch_size, height, width, chanels = _shape(images)
if height % kernel_size[0] != 0:
offset = array_ops.zeros([batch_size,
kernel_size[0] - (height % kernel_size[0]),
width,
chanels])
images = array_ops.concat([images, offset], 1)
batch_size, height, width, chanels = _shape(images)
if width % kernel_size[1] != 0:
offset = array_ops.zeros([batch_size,
height,
kernel_size[1] - (width % kernel_size[1]),
chanels])
images = array_ops.concat([images, offset], 2)
batch_size, height, width, chanels = _shape(images)
h, w = int(height / kernel_size[0]), int(width / kernel_size[1])
features = kernel_size[1] * kernel_size[0] * chanels
lines = array_ops.split(images, h, axis=1)
line_blocks = []
for line in lines:
line = array_ops.transpose(line, [0, 2, 3, 1])
line = array_ops.reshape(line, [batch_size, w, features])
line_blocks.append(line)
return array_ops.stack(line_blocks, axis=1)
def separable_lstm(images, num_filters_out,
kernel_size=None, nhidden=None, scope=None):
"""Run bidirectional LSTMs first horizontally then vertically.
Args:
images: (num_images, height, width, depth) tensor
num_filters_out: output layer depth
kernel_size: A list of length 2 holding the [kernel_height, kernel_width] of
of the pooling. Can be an int if both values are the same. Set to None for
not using blocks
nhidden: hidden layer depth
scope: optional scope name
Returns:
(num_images, height/kernel_height, width/kernel_width,
num_filters_out) tensor
"""
with variable_scope.variable_scope(scope, "SeparableLstm", [images]):
if nhidden is None:
nhidden = num_filters_out
if kernel_size is not None:
images = get_blocks(images, kernel_size)
hidden = horizontal_lstm(images, nhidden)
with variable_scope.variable_scope("vertical"):
transposed = array_ops.transpose(hidden, [0, 2, 1, 3])
output_transposed = horizontal_lstm(transposed, num_filters_out)
output = array_ops.transpose(output_transposed, [0, 2, 1, 3])
return output
def reduce_to_sequence(images, num_filters_out, scope=None):
"""Reduce an image to a sequence by scanning an LSTM vertically.
Args:
images: (num_images, height, width, depth) tensor
num_filters_out: output layer depth
scope: optional scope name
Returns:
A (width, num_images, num_filters_out) sequence.
"""
with variable_scope.variable_scope(scope, "ReduceToSequence", [images]):
batch_size, height, width, depth = _shape(images)
transposed = array_ops.transpose(images, [1, 0, 2, 3])
reshaped = array_ops.reshape(transposed,
[height, batch_size * width, depth])
reduced = lstm1d.sequence_to_final(reshaped, num_filters_out)
output = array_ops.reshape(reduced, [batch_size, width, num_filters_out])
return output
def reduce_to_final(images, num_filters_out, nhidden=None, scope=None):
"""Reduce an image to a final state by running two LSTMs.
Args:
images: (num_images, height, width, depth) tensor
num_filters_out: output layer depth
nhidden: hidden layer depth (defaults to num_filters_out)
scope: optional scope name
Returns:
A (num_images, num_filters_out) batch.
"""
with variable_scope.variable_scope(scope, "ReduceToFinal", [images]):
nhidden = nhidden or num_filters_out
batch_size, height, width, depth = _shape(images)
transposed = array_ops.transpose(images, [1, 0, 2, 3])
reshaped = array_ops.reshape(transposed,
[height, batch_size * width, depth])
with variable_scope.variable_scope("reduce1"):
reduced = lstm1d.sequence_to_final(reshaped, nhidden)
transposed_hidden = array_ops.reshape(reduced,
[batch_size, width, nhidden])
hidden = array_ops.transpose(transposed_hidden, [1, 0, 2])
with variable_scope.variable_scope("reduce2"):
output = lstm1d.sequence_to_final(hidden, num_filters_out)
return output
| apache-2.0 |
asser/django | tests/admin_custom_urls/tests.py | 8 | 5656 | from __future__ import unicode_literals
from django.contrib.admin.utils import quote
from django.contrib.auth.models import User
from django.template.response import TemplateResponse
from django.test import TestCase, override_settings
from django.urls import reverse
from .models import Action, Car, Person
@override_settings(ROOT_URLCONF='admin_custom_urls.urls',)
class AdminCustomUrlsTest(TestCase):
"""
Remember that:
* The Action model has a CharField PK.
* The ModelAdmin for Action customizes the add_view URL, it's
'<app name>/<model name>/!add/'
"""
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='secret', email='[email protected]')
Action.objects.create(name='delete', description='Remove things.')
Action.objects.create(name='rename', description='Gives things other names.')
Action.objects.create(name='add', description='Add things.')
Action.objects.create(name='path/to/file/', description="An action with '/' in its name.")
Action.objects.create(
name='path/to/html/document.html',
description='An action with a name similar to a HTML doc path.'
)
Action.objects.create(
name='javascript:alert(\'Hello world\');">Click here</a>',
description='An action with a name suspected of being a XSS attempt'
)
def setUp(self):
self.client.force_login(self.superuser)
def test_basic_add_GET(self):
"""
Ensure GET on the add_view works.
"""
add_url = reverse('admin_custom_urls:admin_custom_urls_action_add')
self.assertTrue(add_url.endswith('/!add/'))
response = self.client.get(add_url)
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
def test_add_with_GET_args(self):
"""
Ensure GET on the add_view plus specifying a field value in the query
string works.
"""
response = self.client.get(reverse('admin_custom_urls:admin_custom_urls_action_add'), {'name': 'My Action'})
self.assertContains(response, 'value="My Action"')
def test_basic_add_POST(self):
"""
Ensure POST on add_view works.
"""
post_data = {
'_popup': '1',
"name": 'Action added through a popup',
"description": "Description of added action",
}
response = self.client.post(reverse('admin_custom_urls:admin_custom_urls_action_add'), post_data)
self.assertContains(response, 'Action added through a popup')
def test_admin_URLs_no_clash(self):
"""
Test that some admin URLs work correctly.
"""
# Should get the change_view for model instance with PK 'add', not show
# the add_view
url = reverse('admin_custom_urls:%s_action_change' % Action._meta.app_label,
args=(quote('add'),))
response = self.client.get(url)
self.assertContains(response, 'Change action')
# Should correctly get the change_view for the model instance with the
# funny-looking PK (the one with a 'path/to/html/document.html' value)
url = reverse('admin_custom_urls:%s_action_change' % Action._meta.app_label,
args=(quote("path/to/html/document.html"),))
response = self.client.get(url)
self.assertContains(response, 'Change action')
self.assertContains(response, 'value="path/to/html/document.html"')
def test_post_save_add_redirect(self):
"""
Ensures that ModelAdmin.response_post_save_add() controls the
redirection after the 'Save' button has been pressed when adding a
new object.
Refs 8001, 18310, 19505.
"""
post_data = {'name': 'John Doe'}
self.assertEqual(Person.objects.count(), 0)
response = self.client.post(
reverse('admin_custom_urls:admin_custom_urls_person_add'), post_data)
persons = Person.objects.all()
self.assertEqual(len(persons), 1)
self.assertRedirects(
response, reverse('admin_custom_urls:admin_custom_urls_person_history', args=[persons[0].pk]))
def test_post_save_change_redirect(self):
"""
Ensures that ModelAdmin.response_post_save_change() controls the
redirection after the 'Save' button has been pressed when editing an
existing object.
Refs 8001, 18310, 19505.
"""
Person.objects.create(name='John Doe')
self.assertEqual(Person.objects.count(), 1)
person = Person.objects.all()[0]
post_data = {'name': 'Jack Doe'}
response = self.client.post(
reverse('admin_custom_urls:admin_custom_urls_person_change', args=[person.pk]), post_data)
self.assertRedirects(
response, reverse('admin_custom_urls:admin_custom_urls_person_delete', args=[person.pk]))
def test_post_url_continue(self):
"""
Ensures that the ModelAdmin.response_add()'s parameter `post_url_continue`
controls the redirection after an object has been created.
"""
post_data = {'name': 'SuperFast', '_continue': '1'}
self.assertEqual(Car.objects.count(), 0)
response = self.client.post(
reverse('admin_custom_urls:admin_custom_urls_car_add'), post_data)
cars = Car.objects.all()
self.assertEqual(len(cars), 1)
self.assertRedirects(
response, reverse('admin_custom_urls:admin_custom_urls_car_history', args=[cars[0].pk]))
| bsd-3-clause |
cyx1231st/nova | nova/tests/unit/test_wsgi.py | 7 | 11923 | # Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for `nova.wsgi`."""
import os.path
import socket
import tempfile
import eventlet
import eventlet.wsgi
import mock
from oslo_config import cfg
import requests
import testtools
import webob
import nova.exception
from nova import test
from nova.tests.unit import utils
import nova.wsgi
SSL_CERT_DIR = os.path.normpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'ssl_cert'))
CONF = cfg.CONF
class TestLoaderNothingExists(test.NoDBTestCase):
"""Loader tests where os.path.exists always returns False."""
def setUp(self):
super(TestLoaderNothingExists, self).setUp()
self.stub_out('os.path.exists', lambda _: False)
def test_relpath_config_not_found(self):
self.flags(api_paste_config='api-paste.ini')
self.assertRaises(
nova.exception.ConfigNotFound,
nova.wsgi.Loader,
)
def test_asbpath_config_not_found(self):
self.flags(api_paste_config='/etc/nova/api-paste.ini')
self.assertRaises(
nova.exception.ConfigNotFound,
nova.wsgi.Loader,
)
class TestLoaderNormalFilesystem(test.NoDBTestCase):
"""Loader tests with normal filesystem (unmodified os.path module)."""
_paste_config = """
[app:test_app]
use = egg:Paste#static
document_root = /tmp
"""
def setUp(self):
super(TestLoaderNormalFilesystem, self).setUp()
self.config = tempfile.NamedTemporaryFile(mode="w+t")
self.config.write(self._paste_config.lstrip())
self.config.seek(0)
self.config.flush()
self.loader = nova.wsgi.Loader(self.config.name)
def test_config_found(self):
self.assertEqual(self.config.name, self.loader.config_path)
def test_app_not_found(self):
self.assertRaises(
nova.exception.PasteAppNotFound,
self.loader.load_app,
"nonexistent app",
)
def test_app_found(self):
url_parser = self.loader.load_app("test_app")
self.assertEqual("/tmp", url_parser.directory)
def tearDown(self):
self.config.close()
super(TestLoaderNormalFilesystem, self).tearDown()
class TestWSGIServer(test.NoDBTestCase):
"""WSGI server tests."""
def test_no_app(self):
server = nova.wsgi.Server("test_app", None)
self.assertEqual("test_app", server.name)
def test_custom_max_header_line(self):
self.flags(max_header_line=4096) # Default value is 16384.
nova.wsgi.Server("test_custom_max_header_line", None)
self.assertEqual(CONF.max_header_line, eventlet.wsgi.MAX_HEADER_LINE)
def test_start_random_port(self):
server = nova.wsgi.Server("test_random_port", None,
host="127.0.0.1", port=0)
server.start()
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
@testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support")
def test_start_random_port_with_ipv6(self):
server = nova.wsgi.Server("test_random_port", None,
host="::1", port=0)
server.start()
self.assertEqual("::1", server.host)
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
@testtools.skipIf(not utils.is_linux(), 'SO_REUSEADDR behaves differently '
'on OSX and BSD, see bugs '
'1436895 and 1467145')
def test_socket_options_for_simple_server(self):
# test normal socket options has set properly
self.flags(tcp_keepidle=500)
server = nova.wsgi.Server("test_socket_options", None,
host="127.0.0.1", port=0)
server.start()
sock = server._socket
self.assertEqual(1, sock.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
self.assertEqual(1, sock.getsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE))
if hasattr(socket, 'TCP_KEEPIDLE'):
self.assertEqual(CONF.tcp_keepidle,
sock.getsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE))
server.stop()
server.wait()
def test_server_pool_waitall(self):
# test pools waitall method gets called while stopping server
server = nova.wsgi.Server("test_server", None,
host="127.0.0.1")
server.start()
with mock.patch.object(server._pool,
'waitall') as mock_waitall:
server.stop()
server.wait()
mock_waitall.assert_called_once_with()
def test_uri_length_limit(self):
server = nova.wsgi.Server("test_uri_length_limit", None,
host="127.0.0.1", max_url_len=16384)
server.start()
uri = "http://127.0.0.1:%d/%s" % (server.port, 10000 * 'x')
resp = requests.get(uri, proxies={"http": ""})
eventlet.sleep(0)
self.assertNotEqual(resp.status_code,
requests.codes.REQUEST_URI_TOO_LARGE)
uri = "http://127.0.0.1:%d/%s" % (server.port, 20000 * 'x')
resp = requests.get(uri, proxies={"http": ""})
eventlet.sleep(0)
self.assertEqual(resp.status_code,
requests.codes.REQUEST_URI_TOO_LARGE)
server.stop()
server.wait()
def test_reset_pool_size_to_default(self):
server = nova.wsgi.Server("test_resize", None,
host="127.0.0.1", max_url_len=16384)
server.start()
# Stopping the server, which in turn sets pool size to 0
server.stop()
self.assertEqual(server._pool.size, 0)
# Resetting pool size to default
server.reset()
server.start()
self.assertEqual(server._pool.size, CONF.wsgi_default_pool_size)
def test_client_socket_timeout(self):
self.flags(client_socket_timeout=5)
# mocking eventlet spawn method to check it is called with
# configured 'client_socket_timeout' value.
with mock.patch.object(eventlet,
'spawn') as mock_spawn:
server = nova.wsgi.Server("test_app", None,
host="127.0.0.1", port=0)
server.start()
_, kwargs = mock_spawn.call_args
self.assertEqual(CONF.client_socket_timeout,
kwargs['socket_timeout'])
server.stop()
def test_wsgi_keep_alive(self):
self.flags(wsgi_keep_alive=False)
# mocking eventlet spawn method to check it is called with
# configured 'wsgi_keep_alive' value.
with mock.patch.object(eventlet,
'spawn') as mock_spawn:
server = nova.wsgi.Server("test_app", None,
host="127.0.0.1", port=0)
server.start()
_, kwargs = mock_spawn.call_args
self.assertEqual(CONF.wsgi_keep_alive,
kwargs['keepalive'])
server.stop()
class TestWSGIServerWithSSL(test.NoDBTestCase):
"""WSGI server with SSL tests."""
def setUp(self):
super(TestWSGIServerWithSSL, self).setUp()
self.flags(enabled_ssl_apis=['fake_ssl'],
ssl_cert_file=os.path.join(SSL_CERT_DIR, 'certificate.crt'),
ssl_key_file=os.path.join(SSL_CERT_DIR, 'privatekey.key'))
def test_ssl_server(self):
def test_app(env, start_response):
start_response('200 OK', {})
return ['PONG']
fake_ssl_server = nova.wsgi.Server("fake_ssl", test_app,
host="127.0.0.1", port=0,
use_ssl=True)
fake_ssl_server.start()
self.assertNotEqual(0, fake_ssl_server.port)
response = requests.post(
'https://127.0.0.1:%s/' % fake_ssl_server.port,
verify=os.path.join(SSL_CERT_DIR, 'ca.crt'), data='PING')
self.assertEqual(response.text, 'PONG')
fake_ssl_server.stop()
fake_ssl_server.wait()
def test_two_servers(self):
def test_app(env, start_response):
start_response('200 OK', {})
return ['PONG']
fake_ssl_server = nova.wsgi.Server("fake_ssl", test_app,
host="127.0.0.1", port=0, use_ssl=True)
fake_ssl_server.start()
self.assertNotEqual(0, fake_ssl_server.port)
fake_server = nova.wsgi.Server("fake", test_app,
host="127.0.0.1", port=0)
fake_server.start()
self.assertNotEqual(0, fake_server.port)
response = requests.post(
'https://127.0.0.1:%s/' % fake_ssl_server.port,
verify=os.path.join(SSL_CERT_DIR, 'ca.crt'), data='PING')
self.assertEqual(response.text, 'PONG')
response = requests.post('http://127.0.0.1:%s/' % fake_server.port,
data='PING')
self.assertEqual(response.text, 'PONG')
fake_ssl_server.stop()
fake_ssl_server.wait()
fake_server.stop()
fake_server.wait()
@testtools.skipIf(not utils.is_linux(), 'SO_REUSEADDR behaves differently '
'on OSX and BSD, see bugs '
'1436895 and 1467145')
def test_socket_options_for_ssl_server(self):
# test normal socket options has set properly
self.flags(tcp_keepidle=500)
server = nova.wsgi.Server("test_socket_options", None,
host="127.0.0.1", port=0,
use_ssl=True)
server.start()
sock = server._socket
self.assertEqual(1, sock.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR))
self.assertEqual(1, sock.getsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE))
if hasattr(socket, 'TCP_KEEPIDLE'):
self.assertEqual(CONF.tcp_keepidle,
sock.getsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE))
server.stop()
server.wait()
@testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support")
def test_app_using_ipv6_and_ssl(self):
greetings = 'Hello, World!!!'
@webob.dec.wsgify
def hello_world(req):
return greetings
server = nova.wsgi.Server("fake_ssl",
hello_world,
host="::1",
port=0,
use_ssl=True)
server.start()
response = requests.get('https://[::1]:%d/' % server.port,
verify=os.path.join(SSL_CERT_DIR, 'ca.crt'))
self.assertEqual(greetings, response.text)
server.stop()
server.wait()
| apache-2.0 |
cneill/designate | designate/api/v2/controllers/limits.py | 7 | 1830 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from oslo_log import log as logging
from oslo_config import cfg
from designate.api.v2.controllers import rest
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class LimitsController(rest.RestController):
@pecan.expose(template='json:', content_type='application/json')
def get_all(self):
context = pecan.request.environ['context']
absolute_limits = self.central_api.get_absolute_limits(context)
return {
# Resource Creation Limits
"max_zones": absolute_limits['domains'],
"max_zone_recordsets": absolute_limits['domain_recordsets'],
"max_zone_records": absolute_limits['domain_records'],
"max_recordset_records": absolute_limits['recordset_records'],
# Resource Field Value Limits
"min_ttl": CONF['service:central'].min_ttl,
"max_zone_name_length":
CONF['service:central'].max_domain_name_len,
"max_recordset_name_length":
CONF['service:central'].max_recordset_name_len,
# Resource Fetching Limits
"max_page_limit": CONF['service:api'].max_limit_v2,
}
| apache-2.0 |
drewandersonnz/openshift-tools | openshift/installer/vendored/openshift-ansible-3.9.40/roles/openshift_health_checker/library/ocutil.py | 49 | 2182 | #!/usr/bin/python
"""Interface to OpenShift oc command"""
import os
import shlex
import shutil
import subprocess
from ansible.module_utils.basic import AnsibleModule
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
"""Find and return oc binary file"""
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
def main():
"""Module that executes commands on a remote OpenShift cluster"""
module = AnsibleModule(
argument_spec=dict(
namespace=dict(type="str", required=False),
config_file=dict(type="str", required=True),
cmd=dict(type="str", required=True),
extra_args=dict(type="list", default=[]),
),
)
cmd = [locate_oc_binary(), '--config', module.params["config_file"]]
if module.params["namespace"]:
cmd += ['-n', module.params["namespace"]]
cmd += shlex.split(module.params["cmd"]) + module.params["extra_args"]
failed = True
try:
cmd_result = subprocess.check_output(list(cmd), stderr=subprocess.STDOUT)
failed = False
except subprocess.CalledProcessError as exc:
cmd_result = '[rc {}] {}\n{}'.format(exc.returncode, ' '.join(exc.cmd), exc.output)
except OSError as exc:
# we get this when 'oc' is not there
cmd_result = str(exc)
module.exit_json(
changed=False,
failed=failed,
result=cmd_result,
)
if __name__ == '__main__':
main()
| apache-2.0 |
Microvellum/Fluid-Designer | win64-vc/2.78/python/lib/heapq.py | 19 | 22930 | """Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
return returnitem
return lastelt
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(range(n//2)):
_siftup(x, i)
def _heappop_max(heap):
"""Maxheap version of a heappop."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup_max(heap, 0)
return returnitem
return lastelt
def _heapreplace_max(heap, item):
"""Maxheap version of a heappop followed by a heappush."""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup_max(heap, 0)
return returnitem
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i)
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom comparison methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
def _siftup_max(heap, pos):
'Maxheap variant of _siftup'
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the larger child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of larger child.
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
# Move the larger child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
def merge(*iterables, key=None, reverse=False):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
If *key* is not None, applies a key function to each element to determine
its sort order.
>>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len))
['dog', 'cat', 'fish', 'horse', 'kangaroo']
'''
h = []
h_append = h.append
if reverse:
_heapify = _heapify_max
_heappop = _heappop_max
_heapreplace = _heapreplace_max
direction = -1
else:
_heapify = heapify
_heappop = heappop
_heapreplace = heapreplace
direction = 1
if key is None:
for order, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
h_append([next(), order * direction, next])
except StopIteration:
pass
_heapify(h)
while len(h) > 1:
try:
while True:
value, order, next = s = h[0]
yield value
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
value, order, next = h[0]
yield value
yield from next.__self__
return
for order, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
value = next()
h_append([key(value), order * direction, value, next])
except StopIteration:
pass
_heapify(h)
while len(h) > 1:
try:
while True:
key_value, order, value, next = s = h[0]
yield value
value = next()
s[0] = key(value)
s[2] = value
_heapreplace(h, s)
except StopIteration:
_heappop(h)
if h:
key_value, order, value, next = h[0]
yield value
yield from next.__self__
# Algorithm notes for nlargest() and nsmallest()
# ==============================================
#
# Make a single pass over the data while keeping the k most extreme values
# in a heap. Memory consumption is limited to keeping k values in a list.
#
# Measured performance for random inputs:
#
# number of comparisons
# n inputs k-extreme values (average of 5 trials) % more than min()
# ------------- ---------------- --------------------- -----------------
# 1,000 100 3,317 231.7%
# 10,000 100 14,046 40.5%
# 100,000 100 105,749 5.7%
# 1,000,000 100 1,007,751 0.8%
# 10,000,000 100 10,009,401 0.1%
#
# Theoretical number of comparisons for k smallest of n random inputs:
#
# Step Comparisons Action
# ---- -------------------------- ---------------------------
# 1 1.66 * k heapify the first k-inputs
# 2 n - k compare remaining elements to top of heap
# 3 k * (1 + lg2(k)) * ln(n/k) replace the topmost value on the heap
# 4 k * lg2(k) - (k/2) final sort of the k most extreme values
#
# Combining and simplifying for a rough estimate gives:
#
# comparisons = n + k * (log(k, 2) * log(n/k) + log(k, 2) + log(n/k))
#
# Computing the number of comparisons for step 3:
# -----------------------------------------------
# * For the i-th new value from the iterable, the probability of being in the
# k most extreme values is k/i. For example, the probability of the 101st
# value seen being in the 100 most extreme values is 100/101.
# * If the value is a new extreme value, the cost of inserting it into the
# heap is 1 + log(k, 2).
# * The probability times the cost gives:
# (k/i) * (1 + log(k, 2))
# * Summing across the remaining n-k elements gives:
# sum((k/i) * (1 + log(k, 2)) for i in range(k+1, n+1))
# * This reduces to:
# (H(n) - H(k)) * k * (1 + log(k, 2))
# * Where H(n) is the n-th harmonic number estimated by:
# gamma = 0.5772156649
# H(n) = log(n, e) + gamma + 1 / (2 * n)
# http://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#Rate_of_divergence
# * Substituting the H(n) formula:
# comparisons = k * (1 + log(k, 2)) * (log(n/k, e) + (1/n - 1/k) / 2)
#
# Worst-case for step 3:
# ----------------------
# In the worst case, the input data is reversed sorted so that every new element
# must be inserted in the heap:
#
# comparisons = 1.66 * k + log(k, 2) * (n - k)
#
# Alternative Algorithms
# ----------------------
# Other algorithms were not used because they:
# 1) Took much more auxiliary memory,
# 2) Made multiple passes over the data.
# 3) Made more comparisons in common cases (small k, large n, semi-random input).
# See the more detailed comparison of approach at:
# http://code.activestate.com/recipes/577573-compare-algorithms-for-heapqsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
# Short-cut for n==1 is to use min()
if n == 1:
it = iter(iterable)
sentinel = object()
if key is None:
result = min(it, default=sentinel)
else:
result = min(it, default=sentinel, key=key)
return [] if result is sentinel else [result]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = iter(iterable)
# put the range(n) first so that zip() doesn't
# consume one too many elements from the iterator
result = [(elem, i) for i, elem in zip(range(n), it)]
if not result:
return result
_heapify_max(result)
top = result[0][0]
order = n
_heapreplace = _heapreplace_max
for elem in it:
if elem < top:
_heapreplace(result, (elem, order))
top = result[0][0]
order += 1
result.sort()
return [r[0] for r in result]
# General case, slowest method
it = iter(iterable)
result = [(key(elem), i, elem) for i, elem in zip(range(n), it)]
if not result:
return result
_heapify_max(result)
top = result[0][0]
order = n
_heapreplace = _heapreplace_max
for elem in it:
k = key(elem)
if k < top:
_heapreplace(result, (k, order, elem))
top = result[0][0]
order += 1
result.sort()
return [r[2] for r in result]
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max()
if n == 1:
it = iter(iterable)
sentinel = object()
if key is None:
result = max(it, default=sentinel)
else:
result = max(it, default=sentinel, key=key)
return [] if result is sentinel else [result]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = iter(iterable)
result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)]
if not result:
return result
heapify(result)
top = result[0][0]
order = -n
_heapreplace = heapreplace
for elem in it:
if top < elem:
_heapreplace(result, (elem, order))
top = result[0][0]
order -= 1
result.sort(reverse=True)
return [r[0] for r in result]
# General case, slowest method
it = iter(iterable)
result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)]
if not result:
return result
heapify(result)
top = result[0][0]
order = -n
_heapreplace = heapreplace
for elem in it:
k = key(elem)
if top < k:
_heapreplace(result, (k, order, elem))
top = result[0][0]
order -= 1
result.sort(reverse=True)
return [r[2] for r in result]
# If available, use C implementation
try:
from _heapq import *
except ImportError:
pass
try:
from _heapq import _heapreplace_max
except ImportError:
pass
try:
from _heapq import _heapify_max
except ImportError:
pass
try:
from _heapq import _heappop_max
except ImportError:
pass
if __name__ == "__main__":
import doctest
print(doctest.testmod())
| gpl-3.0 |
schristakidis/p2ner | p2ner/components/overlay/completeclient/completeclient/messages/validationmessages.py | 1 | 2128 | # -*- coding: utf-8 -*-
# Copyright 2012 Loris Corazza, Sakis Christakidis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from p2ner.base.ControlMessage import ControlMessage, trap_sent,probe_all,BaseControlMessage
from p2ner.base.Consts import MessageCodes as MSG
from construct import Container
class ValidateNeighboursMessage(ControlMessage):
type = "subsidmessage"
code = MSG.VALIDATE_NEIGHS_SUB
ack = True
def trigger(self, message):
if self.stream.id != message.streamid or not self.subOverlay.checkTriggerMessage(message.superOverlay,message.interOverlay):
return False
return True
def action(self, message, peer):
self.subOverlay.ansValidateNeighs(peer)
@classmethod
def send(cls, sid,sover,iover, peer, out):
d=out.send(cls, Container(streamid = sid, superOverlay=sover, interOverlay=iover), peer)
d.addErrback(trap_sent)
return d
class ReplyValidateNeighboursMessage(ControlMessage):
type='sublockmessage'
code = MSG.REPLY_VALIDATE_NEIGHS_SUB
ack=True
def trigger(self, message):
if self.stream.id != message.streamid or not self.subOverlay.checkTriggerMessage(message.superOverlay,message.interOverlay):
return False
return True
def action(self,message,peer):
self.subOverlay.checkValidateNeighs(message.lock,peer)
return
@classmethod
def send(cls, sid, sover, iover, ans , peer, out):
return out.send(cls, Container(streamid=sid, superOverlay=sover, interOverlay=iover, swapid=0, lock=ans), peer).addErrback(trap_sent)
| apache-2.0 |
wonder-sk/inasafe | safe/impact_functions/volcanic/volcano_point_building/parameter_definitions.py | 10 | 2338 | # coding=utf-8
"""InaSAFE Disaster risk tool by Australian Aid - Parameter definition for
Flood Vector on Building QGIS IF
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from safe_extras.parameters.boolean_parameter import BooleanParameter
from safe_extras.parameters.string_parameter import StringParameter
def target_field():
"""Generator for the flooded target field parameter."""
field = StringParameter()
field.name = 'Target Field'
field.is_required = True
field.help_text = (
'This field of impact layer marks inundated roads by \'1\' value')
field.description = (
'This field of impact layer marks inundated roads by \'1\' value. '
'This is the longer description of this parameter.')
field.value = 'INUNDATED' # default value
return field
def affected_field():
""""Generator for selection of affected field parameter."""
field = StringParameter()
field.name = 'Affected Field'
field.is_required = True
field.help_text = (
'This field of the hazard layer contains information about inundated '
'areas')
field.description = (
'This field of the hazard layer contains information about inundated '
'areas. This is the longer description of this parameter.')
field.value = 'affected' # default value
return field
def affected_value():
"""Generator for parameter stating what values constitute 'affected'."""
field = StringParameter()
field.name = 'Affected Value'
field.is_required = True
field.help_text = (
'This value in \'affected_field\' of the hazard layer marks the areas '
'as inundated')
field.description = (
'This value in \'affected_field\' of the hazard layer marks the areas '
'as inundated. This is the longer description of this parameter.')
field.value = '1' # default value
return field
def building_type_field():
field = BooleanParameter()
field.name = 'Building Type Field'
field.is_required = True
field.value = True
return field
| gpl-3.0 |
sansna/PythonWidgets.py | lib3/web/client.py | 1 | 2235 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Author: user
# Date : 2021 Jun 27 10:16:32 PM
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../")))
import time
import ujson
import datetime
import requests
from lib3.decorator.safe_run import safe_run_wrap
# App Config
# XXX: https://stackoverflow.com/questions/3536620/how-to-change-a-module-variable-from-another-module
#if __name__ == "__main__":
# import config.base
# if not config.base.Configured:
# config.base.Configured = True
# config.base.App = "client"
# config.base.Env = config.base.ENV_PRODUCTION
now = int(time.time())
today = int(now+8*3600)/86400*86400-8*3600
dayts = 86400
hourts = 3600
mints = 60
yesterday = today - dayts
nowdate = datetime.datetime.fromtimestamp(now)
Year = nowdate.year
Month = nowdate.month
Day = nowdate.day
BeginOfCurrentMonth = datetime.date(Year, Month, 1)
BeginOfLastMonth = (BeginOfCurrentMonth - datetime.timedelta(1)).replace(day=1)
BeginOfCurrentYear = BeginOfCurrentMonth.replace(month=1)
BeginOfLastYear = (BeginOfCurrentYear - datetime.timedelta(1)).replace(month=1, day=1)
BeginOfCurrentMonth = int(time.mktime(BeginOfCurrentMonth.timetuple()))
BeginOfLastMonth = int(time.mktime(BeginOfLastMonth.timetuple()))
BeginOfCurrentYear = int(time.mktime(BeginOfCurrentYear.timetuple()))
BeginOfLastYear = int(time.mktime(BeginOfLastYear.timetuple()))
def YMD(ts):
return time.strftime("%Y%m%d", time.localtime(ts))
def YM(ts):
return time.strftime("%Y%m", time.localtime(ts))
def DAY(ts):
return time.strftime("%d", time.localtime(ts))
session = requests.session()
@safe_run_wrap
def Get(url="http://www.baidu.com", json={}, headers={}):
"""
Usage:
ret = Get(url, json, headers)
type of str
"""
resp = session.get(url, json=json, headers=headers)
return resp.content
@safe_run_wrap
def Post(url="http://www.baidu.com", json={}, headers={}):
"""
Usage:
ret = Post(url, json, headers)
type of str
"""
resp = session.post(url, json=json, headers=headers)
return resp.content
def main():
a = Get()
print(a)
#a = Post()
#print(a)
if __name__ == "__main__":
main()
| lgpl-3.0 |
technologiescollege/s2a_fr | s2a/Python/Lib/test/test_deque.py | 60 | 25329 | from collections import deque
import unittest
from test import test_support, seq_tests
import gc
import weakref
import copy
import cPickle as pickle
import random
import struct
BIG = 100000
def fail():
raise SyntaxError
yield 1
class BadCmp:
def __eq__(self, other):
raise RuntimeError
class MutateCmp:
def __init__(self, deque, result):
self.deque = deque
self.result = result
def __eq__(self, other):
self.deque.clear()
return self.result
class TestBasic(unittest.TestCase):
def test_basics(self):
d = deque(xrange(-5125, -5000))
d.__init__(xrange(200))
for i in xrange(200, 400):
d.append(i)
for i in reversed(xrange(-200, 0)):
d.appendleft(i)
self.assertEqual(list(d), range(-200, 400))
self.assertEqual(len(d), 600)
left = [d.popleft() for i in xrange(250)]
self.assertEqual(left, range(-200, 50))
self.assertEqual(list(d), range(50, 400))
right = [d.pop() for i in xrange(250)]
right.reverse()
self.assertEqual(right, range(150, 400))
self.assertEqual(list(d), range(50, 150))
def test_maxlen(self):
self.assertRaises(ValueError, deque, 'abc', -1)
self.assertRaises(ValueError, deque, 'abc', -2)
it = iter(range(10))
d = deque(it, maxlen=3)
self.assertEqual(list(it), [])
self.assertEqual(repr(d), 'deque([7, 8, 9], maxlen=3)')
self.assertEqual(list(d), range(7, 10))
self.assertEqual(d, deque(range(10), 3))
d.append(10)
self.assertEqual(list(d), range(8, 11))
d.appendleft(7)
self.assertEqual(list(d), range(7, 10))
d.extend([10, 11])
self.assertEqual(list(d), range(9, 12))
d.extendleft([8, 7])
self.assertEqual(list(d), range(7, 10))
d = deque(xrange(200), maxlen=10)
d.append(d)
test_support.unlink(test_support.TESTFN)
fo = open(test_support.TESTFN, "wb")
try:
print >> fo, d,
fo.close()
fo = open(test_support.TESTFN, "rb")
self.assertEqual(fo.read(), repr(d))
finally:
fo.close()
test_support.unlink(test_support.TESTFN)
d = deque(range(10), maxlen=None)
self.assertEqual(repr(d), 'deque([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])')
fo = open(test_support.TESTFN, "wb")
try:
print >> fo, d,
fo.close()
fo = open(test_support.TESTFN, "rb")
self.assertEqual(fo.read(), repr(d))
finally:
fo.close()
test_support.unlink(test_support.TESTFN)
def test_maxlen_zero(self):
it = iter(range(100))
deque(it, maxlen=0)
self.assertEqual(list(it), [])
it = iter(range(100))
d = deque(maxlen=0)
d.extend(it)
self.assertEqual(list(it), [])
it = iter(range(100))
d = deque(maxlen=0)
d.extendleft(it)
self.assertEqual(list(it), [])
def test_maxlen_attribute(self):
self.assertEqual(deque().maxlen, None)
self.assertEqual(deque('abc').maxlen, None)
self.assertEqual(deque('abc', maxlen=4).maxlen, 4)
self.assertEqual(deque('abc', maxlen=2).maxlen, 2)
self.assertEqual(deque('abc', maxlen=0).maxlen, 0)
with self.assertRaises(AttributeError):
d = deque('abc')
d.maxlen = 10
def test_count(self):
for s in ('', 'abracadabra', 'simsalabim'*500+'abc'):
s = list(s)
d = deque(s)
for letter in 'abcdefghijklmnopqrstuvwxyz':
self.assertEqual(s.count(letter), d.count(letter), (s, d, letter))
self.assertRaises(TypeError, d.count) # too few args
self.assertRaises(TypeError, d.count, 1, 2) # too many args
class BadCompare:
def __eq__(self, other):
raise ArithmeticError
d = deque([1, 2, BadCompare(), 3])
self.assertRaises(ArithmeticError, d.count, 2)
d = deque([1, 2, 3])
self.assertRaises(ArithmeticError, d.count, BadCompare())
class MutatingCompare:
def __eq__(self, other):
self.d.pop()
return True
m = MutatingCompare()
d = deque([1, 2, 3, m, 4, 5])
m.d = d
self.assertRaises(RuntimeError, d.count, 3)
# test issue11004
# block advance failed after rotation aligned elements on right side of block
d = deque([None]*16)
for i in range(len(d)):
d.rotate(-1)
d.rotate(1)
self.assertEqual(d.count(1), 0)
self.assertEqual(d.count(None), 16)
def test_comparisons(self):
d = deque('xabc'); d.popleft()
for e in [d, deque('abc'), deque('ab'), deque(), list(d)]:
self.assertEqual(d==e, type(d)==type(e) and list(d)==list(e))
self.assertEqual(d!=e, not(type(d)==type(e) and list(d)==list(e)))
args = map(deque, ('', 'a', 'b', 'ab', 'ba', 'abc', 'xba', 'xabc', 'cba'))
for x in args:
for y in args:
self.assertEqual(x == y, list(x) == list(y), (x,y))
self.assertEqual(x != y, list(x) != list(y), (x,y))
self.assertEqual(x < y, list(x) < list(y), (x,y))
self.assertEqual(x <= y, list(x) <= list(y), (x,y))
self.assertEqual(x > y, list(x) > list(y), (x,y))
self.assertEqual(x >= y, list(x) >= list(y), (x,y))
self.assertEqual(cmp(x,y), cmp(list(x),list(y)), (x,y))
def test_extend(self):
d = deque('a')
self.assertRaises(TypeError, d.extend, 1)
d.extend('bcd')
self.assertEqual(list(d), list('abcd'))
d.extend(d)
self.assertEqual(list(d), list('abcdabcd'))
def test_iadd(self):
d = deque('a')
d += 'bcd'
self.assertEqual(list(d), list('abcd'))
d += d
self.assertEqual(list(d), list('abcdabcd'))
def test_extendleft(self):
d = deque('a')
self.assertRaises(TypeError, d.extendleft, 1)
d.extendleft('bcd')
self.assertEqual(list(d), list(reversed('abcd')))
d.extendleft(d)
self.assertEqual(list(d), list('abcddcba'))
d = deque()
d.extendleft(range(1000))
self.assertEqual(list(d), list(reversed(range(1000))))
self.assertRaises(SyntaxError, d.extendleft, fail())
def test_getitem(self):
n = 200
d = deque(xrange(n))
l = range(n)
for i in xrange(n):
d.popleft()
l.pop(0)
if random.random() < 0.5:
d.append(i)
l.append(i)
for j in xrange(1-len(l), len(l)):
assert d[j] == l[j]
d = deque('superman')
self.assertEqual(d[0], 's')
self.assertEqual(d[-1], 'n')
d = deque()
self.assertRaises(IndexError, d.__getitem__, 0)
self.assertRaises(IndexError, d.__getitem__, -1)
def test_setitem(self):
n = 200
d = deque(xrange(n))
for i in xrange(n):
d[i] = 10 * i
self.assertEqual(list(d), [10*i for i in xrange(n)])
l = list(d)
for i in xrange(1-n, 0, -1):
d[i] = 7*i
l[i] = 7*i
self.assertEqual(list(d), l)
def test_delitem(self):
n = 500 # O(n**2) test, don't make this too big
d = deque(xrange(n))
self.assertRaises(IndexError, d.__delitem__, -n-1)
self.assertRaises(IndexError, d.__delitem__, n)
for i in xrange(n):
self.assertEqual(len(d), n-i)
j = random.randrange(-len(d), len(d))
val = d[j]
self.assertIn(val, d)
del d[j]
self.assertNotIn(val, d)
self.assertEqual(len(d), 0)
def test_reverse(self):
n = 500 # O(n**2) test, don't make this too big
data = [random.random() for i in range(n)]
for i in range(n):
d = deque(data[:i])
r = d.reverse()
self.assertEqual(list(d), list(reversed(data[:i])))
self.assertIs(r, None)
d.reverse()
self.assertEqual(list(d), data[:i])
self.assertRaises(TypeError, d.reverse, 1) # Arity is zero
def test_rotate(self):
s = tuple('abcde')
n = len(s)
d = deque(s)
d.rotate(1) # verify rot(1)
self.assertEqual(''.join(d), 'eabcd')
d = deque(s)
d.rotate(-1) # verify rot(-1)
self.assertEqual(''.join(d), 'bcdea')
d.rotate() # check default to 1
self.assertEqual(tuple(d), s)
for i in xrange(n*3):
d = deque(s)
e = deque(d)
d.rotate(i) # check vs. rot(1) n times
for j in xrange(i):
e.rotate(1)
self.assertEqual(tuple(d), tuple(e))
d.rotate(-i) # check that it works in reverse
self.assertEqual(tuple(d), s)
e.rotate(n-i) # check that it wraps forward
self.assertEqual(tuple(e), s)
for i in xrange(n*3):
d = deque(s)
e = deque(d)
d.rotate(-i)
for j in xrange(i):
e.rotate(-1) # check vs. rot(-1) n times
self.assertEqual(tuple(d), tuple(e))
d.rotate(i) # check that it works in reverse
self.assertEqual(tuple(d), s)
e.rotate(i-n) # check that it wraps backaround
self.assertEqual(tuple(e), s)
d = deque(s)
e = deque(s)
e.rotate(BIG+17) # verify on long series of rotates
dr = d.rotate
for i in xrange(BIG+17):
dr()
self.assertEqual(tuple(d), tuple(e))
self.assertRaises(TypeError, d.rotate, 'x') # Wrong arg type
self.assertRaises(TypeError, d.rotate, 1, 10) # Too many args
d = deque()
d.rotate() # rotate an empty deque
self.assertEqual(d, deque())
def test_len(self):
d = deque('ab')
self.assertEqual(len(d), 2)
d.popleft()
self.assertEqual(len(d), 1)
d.pop()
self.assertEqual(len(d), 0)
self.assertRaises(IndexError, d.pop)
self.assertEqual(len(d), 0)
d.append('c')
self.assertEqual(len(d), 1)
d.appendleft('d')
self.assertEqual(len(d), 2)
d.clear()
self.assertEqual(len(d), 0)
def test_underflow(self):
d = deque()
self.assertRaises(IndexError, d.pop)
self.assertRaises(IndexError, d.popleft)
def test_clear(self):
d = deque(xrange(100))
self.assertEqual(len(d), 100)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(list(d), [])
d.clear() # clear an emtpy deque
self.assertEqual(list(d), [])
def test_remove(self):
d = deque('abcdefghcij')
d.remove('c')
self.assertEqual(d, deque('abdefghcij'))
d.remove('c')
self.assertEqual(d, deque('abdefghij'))
self.assertRaises(ValueError, d.remove, 'c')
self.assertEqual(d, deque('abdefghij'))
# Handle comparison errors
d = deque(['a', 'b', BadCmp(), 'c'])
e = deque(d)
self.assertRaises(RuntimeError, d.remove, 'c')
for x, y in zip(d, e):
# verify that original order and values are retained.
self.assertTrue(x is y)
# Handle evil mutator
for match in (True, False):
d = deque(['ab'])
d.extend([MutateCmp(d, match), 'c'])
self.assertRaises(IndexError, d.remove, 'c')
self.assertEqual(d, deque())
def test_repr(self):
d = deque(xrange(200))
e = eval(repr(d))
self.assertEqual(list(d), list(e))
d.append(d)
self.assertIn('...', repr(d))
def test_print(self):
d = deque(xrange(200))
d.append(d)
test_support.unlink(test_support.TESTFN)
fo = open(test_support.TESTFN, "wb")
try:
print >> fo, d,
fo.close()
fo = open(test_support.TESTFN, "rb")
self.assertEqual(fo.read(), repr(d))
finally:
fo.close()
test_support.unlink(test_support.TESTFN)
def test_init(self):
self.assertRaises(TypeError, deque, 'abc', 2, 3);
self.assertRaises(TypeError, deque, 1);
def test_hash(self):
self.assertRaises(TypeError, hash, deque('abc'))
def test_long_steadystate_queue_popleft(self):
for size in (0, 1, 2, 100, 1000):
d = deque(xrange(size))
append, pop = d.append, d.popleft
for i in xrange(size, BIG):
append(i)
x = pop()
if x != i - size:
self.assertEqual(x, i-size)
self.assertEqual(list(d), range(BIG-size, BIG))
def test_long_steadystate_queue_popright(self):
for size in (0, 1, 2, 100, 1000):
d = deque(reversed(xrange(size)))
append, pop = d.appendleft, d.pop
for i in xrange(size, BIG):
append(i)
x = pop()
if x != i - size:
self.assertEqual(x, i-size)
self.assertEqual(list(reversed(list(d))), range(BIG-size, BIG))
def test_big_queue_popleft(self):
pass
d = deque()
append, pop = d.append, d.popleft
for i in xrange(BIG):
append(i)
for i in xrange(BIG):
x = pop()
if x != i:
self.assertEqual(x, i)
def test_big_queue_popright(self):
d = deque()
append, pop = d.appendleft, d.pop
for i in xrange(BIG):
append(i)
for i in xrange(BIG):
x = pop()
if x != i:
self.assertEqual(x, i)
def test_big_stack_right(self):
d = deque()
append, pop = d.append, d.pop
for i in xrange(BIG):
append(i)
for i in reversed(xrange(BIG)):
x = pop()
if x != i:
self.assertEqual(x, i)
self.assertEqual(len(d), 0)
def test_big_stack_left(self):
d = deque()
append, pop = d.appendleft, d.popleft
for i in xrange(BIG):
append(i)
for i in reversed(xrange(BIG)):
x = pop()
if x != i:
self.assertEqual(x, i)
self.assertEqual(len(d), 0)
def test_roundtrip_iter_init(self):
d = deque(xrange(200))
e = deque(d)
self.assertNotEqual(id(d), id(e))
self.assertEqual(list(d), list(e))
def test_pickle(self):
d = deque(xrange(200))
for i in range(pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(d, i)
e = pickle.loads(s)
self.assertNotEqual(id(d), id(e))
self.assertEqual(list(d), list(e))
## def test_pickle_recursive(self):
## d = deque('abc')
## d.append(d)
## for i in range(pickle.HIGHEST_PROTOCOL + 1):
## e = pickle.loads(pickle.dumps(d, i))
## self.assertNotEqual(id(d), id(e))
## self.assertEqual(id(e), id(e[-1]))
def test_deepcopy(self):
mut = [10]
d = deque([mut])
e = copy.deepcopy(d)
self.assertEqual(list(d), list(e))
mut[0] = 11
self.assertNotEqual(id(d), id(e))
self.assertNotEqual(list(d), list(e))
def test_copy(self):
mut = [10]
d = deque([mut])
e = copy.copy(d)
self.assertEqual(list(d), list(e))
mut[0] = 11
self.assertNotEqual(id(d), id(e))
self.assertEqual(list(d), list(e))
def test_reversed(self):
for s in ('abcd', xrange(2000)):
self.assertEqual(list(reversed(deque(s))), list(reversed(s)))
def test_gc_doesnt_blowup(self):
import gc
# This used to assert-fail in deque_traverse() under a debug
# build, or run wild with a NULL pointer in a release build.
d = deque()
for i in xrange(100):
d.append(1)
gc.collect()
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for deque iterator objects
class C(object):
pass
for i in range(2):
obj = C()
ref = weakref.ref(obj)
if i == 0:
container = deque([obj, 1])
else:
container = reversed(deque([obj, 1]))
obj.x = iter(container)
del obj, container
gc.collect()
self.assertTrue(ref() is None, "Cycle was not collected")
check_sizeof = test_support.check_sizeof
@test_support.cpython_only
def test_sizeof(self):
BLOCKLEN = 62
basesize = test_support.calcobjsize('2P4PlP')
blocksize = struct.calcsize('2P%dP' % BLOCKLEN)
self.assertEqual(object.__sizeof__(deque()), basesize)
check = self.check_sizeof
check(deque(), basesize + blocksize)
check(deque('a'), basesize + blocksize)
check(deque('a' * (BLOCKLEN // 2)), basesize + blocksize)
check(deque('a' * (BLOCKLEN // 2 + 1)), basesize + 2 * blocksize)
check(deque('a' * (42 * BLOCKLEN)), basesize + 43 * blocksize)
class TestVariousIteratorArgs(unittest.TestCase):
def test_constructor(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (seq_tests.Sequence, seq_tests.IterFunc,
seq_tests.IterGen, seq_tests.IterFuncStop,
seq_tests.itermulti, seq_tests.iterfunc):
self.assertEqual(list(deque(g(s))), list(g(s)))
self.assertRaises(TypeError, deque, seq_tests.IterNextOnly(s))
self.assertRaises(TypeError, deque, seq_tests.IterNoNext(s))
self.assertRaises(ZeroDivisionError, deque, seq_tests.IterGenExc(s))
def test_iter_with_altered_data(self):
d = deque('abcdefg')
it = iter(d)
d.pop()
self.assertRaises(RuntimeError, it.next)
def test_runtime_error_on_empty_deque(self):
d = deque()
it = iter(d)
d.append(10)
self.assertRaises(RuntimeError, it.next)
class Deque(deque):
pass
class DequeWithBadIter(deque):
def __iter__(self):
raise TypeError
class TestSubclass(unittest.TestCase):
def test_basics(self):
d = Deque(xrange(25))
d.__init__(xrange(200))
for i in xrange(200, 400):
d.append(i)
for i in reversed(xrange(-200, 0)):
d.appendleft(i)
self.assertEqual(list(d), range(-200, 400))
self.assertEqual(len(d), 600)
left = [d.popleft() for i in xrange(250)]
self.assertEqual(left, range(-200, 50))
self.assertEqual(list(d), range(50, 400))
right = [d.pop() for i in xrange(250)]
right.reverse()
self.assertEqual(right, range(150, 400))
self.assertEqual(list(d), range(50, 150))
d.clear()
self.assertEqual(len(d), 0)
def test_copy_pickle(self):
d = Deque('abc')
e = d.__copy__()
self.assertEqual(type(d), type(e))
self.assertEqual(list(d), list(e))
e = Deque(d)
self.assertEqual(type(d), type(e))
self.assertEqual(list(d), list(e))
s = pickle.dumps(d)
e = pickle.loads(s)
self.assertNotEqual(id(d), id(e))
self.assertEqual(type(d), type(e))
self.assertEqual(list(d), list(e))
d = Deque('abcde', maxlen=4)
e = d.__copy__()
self.assertEqual(type(d), type(e))
self.assertEqual(list(d), list(e))
e = Deque(d)
self.assertEqual(type(d), type(e))
self.assertEqual(list(d), list(e))
s = pickle.dumps(d)
e = pickle.loads(s)
self.assertNotEqual(id(d), id(e))
self.assertEqual(type(d), type(e))
self.assertEqual(list(d), list(e))
## def test_pickle(self):
## d = Deque('abc')
## d.append(d)
##
## e = pickle.loads(pickle.dumps(d))
## self.assertNotEqual(id(d), id(e))
## self.assertEqual(type(d), type(e))
## dd = d.pop()
## ee = e.pop()
## self.assertEqual(id(e), id(ee))
## self.assertEqual(d, e)
##
## d.x = d
## e = pickle.loads(pickle.dumps(d))
## self.assertEqual(id(e), id(e.x))
##
## d = DequeWithBadIter('abc')
## self.assertRaises(TypeError, pickle.dumps, d)
def test_weakref(self):
d = deque('gallahad')
p = weakref.proxy(d)
self.assertEqual(str(p), str(d))
d = None
self.assertRaises(ReferenceError, str, p)
def test_strange_subclass(self):
class X(deque):
def __iter__(self):
return iter([])
d1 = X([1,2,3])
d2 = X([4,5,6])
d1 == d2 # not clear if this is supposed to be True or False,
# but it used to give a SystemError
class SubclassWithKwargs(deque):
def __init__(self, newarg=1):
deque.__init__(self)
class TestSubclassWithKwargs(unittest.TestCase):
def test_subclass_with_kwargs(self):
# SF bug #1486663 -- this used to erroneously raise a TypeError
SubclassWithKwargs(newarg=1)
#==============================================================================
libreftest = """
Example from the Library Reference: Doc/lib/libcollections.tex
>>> from collections import deque
>>> d = deque('ghi') # make a new deque with three items
>>> for elem in d: # iterate over the deque's elements
... print elem.upper()
G
H
I
>>> d.append('j') # add a new entry to the right side
>>> d.appendleft('f') # add a new entry to the left side
>>> d # show the representation of the deque
deque(['f', 'g', 'h', 'i', 'j'])
>>> d.pop() # return and remove the rightmost item
'j'
>>> d.popleft() # return and remove the leftmost item
'f'
>>> list(d) # list the contents of the deque
['g', 'h', 'i']
>>> d[0] # peek at leftmost item
'g'
>>> d[-1] # peek at rightmost item
'i'
>>> list(reversed(d)) # list the contents of a deque in reverse
['i', 'h', 'g']
>>> 'h' in d # search the deque
True
>>> d.extend('jkl') # add multiple elements at once
>>> d
deque(['g', 'h', 'i', 'j', 'k', 'l'])
>>> d.rotate(1) # right rotation
>>> d
deque(['l', 'g', 'h', 'i', 'j', 'k'])
>>> d.rotate(-1) # left rotation
>>> d
deque(['g', 'h', 'i', 'j', 'k', 'l'])
>>> deque(reversed(d)) # make a new deque in reverse order
deque(['l', 'k', 'j', 'i', 'h', 'g'])
>>> d.clear() # empty the deque
>>> d.pop() # cannot pop from an empty deque
Traceback (most recent call last):
File "<pyshell#6>", line 1, in -toplevel-
d.pop()
IndexError: pop from an empty deque
>>> d.extendleft('abc') # extendleft() reverses the input order
>>> d
deque(['c', 'b', 'a'])
>>> def delete_nth(d, n):
... d.rotate(-n)
... d.popleft()
... d.rotate(n)
...
>>> d = deque('abcdef')
>>> delete_nth(d, 2) # remove the entry at d[2]
>>> d
deque(['a', 'b', 'd', 'e', 'f'])
>>> def roundrobin(*iterables):
... pending = deque(iter(i) for i in iterables)
... while pending:
... task = pending.popleft()
... try:
... yield task.next()
... except StopIteration:
... continue
... pending.append(task)
...
>>> for value in roundrobin('abc', 'd', 'efgh'):
... print value
...
a
d
e
b
f
c
g
h
>>> def maketree(iterable):
... d = deque(iterable)
... while len(d) > 1:
... pair = [d.popleft(), d.popleft()]
... d.append(pair)
... return list(d)
...
>>> print maketree('abcdefgh')
[[[['a', 'b'], ['c', 'd']], [['e', 'f'], ['g', 'h']]]]
"""
#==============================================================================
__test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
import sys
test_classes = (
TestBasic,
TestVariousIteratorArgs,
TestSubclass,
TestSubclassWithKwargs,
)
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
# doctests
from test import test_deque
test_support.run_doctest(test_deque, verbose)
if __name__ == "__main__":
test_main(verbose=True)
| gpl-3.0 |
joone/chromium-crosswalk | tools/telemetry/telemetry/core/memory_cache_http_server_unittest.py | 8 | 2630 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import util
from telemetry.testing import tab_test_case
class MemoryCacheHTTPServerTest(tab_test_case.TabTestCase):
def setUp(self):
super(MemoryCacheHTTPServerTest, self).setUp()
self._test_filename = 'bear.webm'
_test_file = os.path.join(util.GetUnittestDataDir(), 'bear.webm')
self._test_file_size = os.stat(_test_file).st_size
def testBasicHostingAndRangeRequests(self):
self.Navigate('blank.html')
x = self._tab.EvaluateJavaScript('document.body.innerHTML')
x = x.strip()
# Test basic html hosting.
self.assertEquals(x, 'Hello world')
file_size = self._test_file_size
last_byte = file_size - 1
# Test byte range request: no end byte.
self.CheckContentHeaders('0-', '0-%d' % last_byte, file_size)
# Test byte range request: greater than zero start byte.
self.CheckContentHeaders('100-', '100-%d' % last_byte, file_size - 100)
# Test byte range request: explicit byte range.
self.CheckContentHeaders('2-500', '2-500', '499')
# Test byte range request: no start byte.
self.CheckContentHeaders('-228', '%d-%d' % (file_size - 228, last_byte),
'228')
# Test byte range request: end byte less than start byte.
self.CheckContentHeaders('100-5', '100-%d' % last_byte, file_size - 100)
def CheckContentHeaders(self, content_range_request, content_range_response,
content_length_response):
self._tab.ExecuteJavaScript("""
var loaded = false;
var xmlhttp = new XMLHttpRequest();
xmlhttp.onload = function(e) {
loaded = true;
};
// Avoid cached content by appending unique URL param.
xmlhttp.open('GET', "%s?t=" + Date.now(), true);
xmlhttp.setRequestHeader('Range', 'bytes=%s');
xmlhttp.send();
""" % (self.UrlOfUnittestFile(self._test_filename), content_range_request))
self._tab.WaitForJavaScriptExpression('loaded', 5)
content_range = self._tab.EvaluateJavaScript(
'xmlhttp.getResponseHeader("Content-Range");')
content_range_response = 'bytes %s/%d' % (content_range_response,
self._test_file_size)
self.assertEquals(content_range, content_range_response)
content_length = self._tab.EvaluateJavaScript(
'xmlhttp.getResponseHeader("Content-Length");')
self.assertEquals(content_length, str(content_length_response))
| bsd-3-clause |
apixandru/intellij-community | python/lib/Lib/site-packages/django/db/models/sql/compiler.py | 71 | 43002 | from django.core.exceptions import FieldError
from django.db import connections
from django.db.backends.util import truncate_name
from django.db.models.sql.constants import *
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import get_proxied_model, get_order_dir, \
select_related_descend, Query
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {}
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
if not self.query.tables:
self.query.join((None, self.query.model._meta.db_table, None, None))
if (not self.query.select and self.query.default_cols and not
self.query.included_inherited_models):
self.query.setup_inherited_models()
if self.query.select_related and not self.query.related_select_cols:
self.fill_related_selections()
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.pre_sql_setup()
out_cols = self.get_columns(with_col_aliases)
ordering, ordering_group_by = self.get_ordering()
# This must come after 'select' and 'ordering' -- see docstring of
# get_from_clause() for details.
from_, f_params = self.get_from_clause()
qn = self.quote_name_unless_alias
where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection)
having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection)
params = []
for val in self.query.extra_select.itervalues():
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append('DISTINCT')
result.append(', '.join(out_cols + self.query.ordering_aliases))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping()
if grouping:
if ordering:
# If the backend can't group by PK (i.e., any database
# other than MySQL), then any fields mentioned in the
# ordering clause needs to be in the group by clause.
if not self.connection.features.allows_group_by_pk:
for col, col_params in ordering_group_by:
if col not in grouping:
grouping.append(str(col))
gb_params.extend(col_params)
else:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
return ' '.join(result), tuple(params)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
obj.bump_prefix()
return obj.get_compiler(connection=self.connection).as_sql()
def get_columns(self, with_aliases=False):
"""
Returns the list of columns to use in the select statement. If no
columns have been specified, returns all columns relating to fields in
the model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguity with nested queries.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in self.query.extra_select.iteritems()]
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
for col in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias][TABLE_NAME]
if table in only_load and col not in only_load[table]:
continue
r = '%s.%s' % (qn(alias), qn(column))
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(col.as_sql(qn, self.connection))
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
result.extend([
'%s%s' % (
aggregate.as_sql(qn, self.connection),
alias is not None
and ' AS %s' % qn(truncate_name(alias, max_name_length))
or ''
)
for alias, aggregate in self.query.aggregate_select.items()
])
for table, col in self.query.related_select_cols:
r = '%s.%s' % (qn(table), qn(col))
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, local_only=False):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.model._meta
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
aliases = set()
only_load = self.deferred_to_columns()
# Skip all proxy to the root proxied model
proxied_model = get_proxied_model(opts)
if start_alias:
seen = {None: start_alias}
for field, model in opts.get_fields_with_model():
if local_only and model is not None:
continue
if start_alias:
try:
alias = seen[model]
except KeyError:
if model is proxied_model:
alias = start_alias
else:
link_field = opts.get_ancestor_link(model)
alias = self.query.join((start_alias, model._meta.db_table,
link_field.column, model._meta.pk.column))
seen[model] = alias
else:
# If we're starting from the base model of the queryset, the
# aliases will have already been set up in pre_sql_setup(), so
# we can save time here.
alias = self.query.included_inherited_models[model]
table = self.query.alias_map[alias][TABLE_NAME]
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
qn2(field.column), c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = '%s.%s' % (qn(alias), qn2(field.column))
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def get_ordering(self):
"""
Returns a tuple containing a list representing the SQL elements in the
"order by" clause, and the list of SQL elements that need to be added
to the GROUP BY clause as a result of the ordering.
Also sets the ordering_aliases attribute on this instance to a list of
extra aliases needed in the select.
Determining the ordering SQL can change the tables we need to include,
so this should be run *before* get_from_clause().
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = self.query.order_by or self.query.model._meta.ordering
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
distinct = self.query.distinct
select_aliases = self._select_aliases
result = []
group_by = []
ordering_aliases = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
# It's possible, due to model inheritance, that normal usage might try
# to include the same field more than once in the ordering. We track
# the table/column pairs we use and discard any after the first use.
processed_pairs = set()
for field in ordering:
if field == '?':
result.append(self.connection.ops.random_function_sql())
continue
if isinstance(field, int):
if field < 0:
order = desc
field = -field
else:
order = asc
result.append('%s %s' % (field, order))
group_by.append((field, []))
continue
col, order = get_order_dir(field, asc)
if col in self.query.aggregate_select:
result.append('%s %s' % (col, order))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), col)
processed_pairs.add((table, col))
if not distinct or elt in select_aliases:
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
elif get_order_dir(field)[0] not in self.query.extra_select:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
for table, col, order in self.find_ordering_name(field,
self.query.model._meta, default_order=asc):
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), qn2(col))
processed_pairs.add((table, col))
if distinct and elt not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
else:
elt = qn2(col)
if distinct and col not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append(self.query.extra_select[col])
self.query.ordering_aliases = ordering_aliases
return result, group_by
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
pieces = name.split(LOOKUP_SEP)
if not alias:
alias = self.query.get_initial_alias()
field, target, opts, joins, last, extra = self.query.setup_joins(pieces,
opts, alias, False)
alias = joins[-1]
col = target.column
if not field.rel:
# To avoid inadvertent trimming of a necessary alias, use the
# refcount to show that we are referencing a non-relation field on
# the model.
self.query.ref_alias(alias)
# Must use left outer joins for nullable fields and their relations.
self.query.promote_alias_chain(joins,
self.query.alias_map[joins[0]][JOIN_TYPE] == self.query.LOUTER)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model.
if field.rel and len(joins) > 1 and opts.ordering:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple([self.query.alias_map[j][TABLE_NAME] for j in joins])
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
if alias:
# We have to do the same "final join" optimisation as in
# add_filter, since the final column might not otherwise be part of
# the select set (so we can't order on it).
while 1:
join = self.query.alias_map[alias]
if col != join[RHS_JOIN_COL]:
break
self.query.unref_alias(alias)
alias = join[LHS_ALIAS]
col = join[LHS_JOIN_COL]
return [(alias, col, order)]
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns and
ordering must be done first.
"""
result = []
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
first = True
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, lhs_col, col, nullable = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
alias_str = (alias != name and ' %s' % alias or '')
if join_type and not first:
result.append('%s %s%s ON (%s.%s = %s.%s)'
% (join_type, qn(name), alias_str, qn(lhs),
qn2(lhs_col), qn(alias), qn2(col)))
else:
connector = not first and ', ' or ''
result.append('%s%s%s' % (connector, qn(name), alias_str))
first = False
for t in self.query.extra_tables:
alias, unused = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
connector = not first and ', ' or ''
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, []
def get_grouping(self):
"""
Returns a tuple representing the SQL elements in the "group by" clause.
"""
qn = self.quote_name_unless_alias
result, params = [], []
if self.query.group_by is not None:
if (len(self.query.model._meta.fields) == len(self.query.select) and
self.connection.features.allows_group_by_pk):
self.query.group_by = [
(self.query.model._meta.db_table, self.query.model._meta.pk.column)
]
group_by = self.query.group_by or []
extra_selects = []
for extra_select, extra_params in self.query.extra_select.itervalues():
extra_selects.append(extra_select)
params.extend(extra_params)
cols = (group_by + self.query.select +
self.query.related_select_cols + extra_selects)
for col in cols:
if isinstance(col, (list, tuple)):
result.append('%s.%s' % (qn(col[0]), qn(col[1])))
elif hasattr(col, 'as_sql'):
result.append(col.as_sql(qn, self.connection))
else:
result.append('(%s)' % str(col))
return result, params
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
used=None, requested=None, restricted=None, nullable=None,
dupe_set=None, avoid_set=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
self.query.related_select_cols = []
self.query.related_select_fields = []
if not used:
used = set()
if dupe_set is None:
dupe_set = set()
if avoid_set is None:
avoid_set = set()
orig_dupe_set = dupe_set
# Setup for the case when only particular related fields should be
# included in the related selection.
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
for f, model in opts.get_fields_with_model():
if not select_related_descend(f, restricted, requested):
continue
# The "avoid" set is aliases we want to avoid just for this
# particular branch of the recursion. They aren't permanently
# forbidden from reuse in the related selection tables (which is
# what "used" specifies).
avoid = avoid_set.copy()
dupe_set = orig_dupe_set.copy()
table = f.rel.to._meta.db_table
promote = nullable or f.null
if model:
int_opts = opts
alias = root_alias
alias_chain = []
for int_model in opts.get_base_chain(model):
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not int_opts.parents[int_model]:
int_opts = int_model._meta
continue
lhs_col = int_opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), lhs_col),
()))
dupe_set.add((opts, lhs_col))
int_opts = int_model._meta
alias = self.query.join((alias, int_opts.db_table, lhs_col,
int_opts.pk.column), exclusions=used,
promote=promote)
alias_chain.append(alias)
for (dupe_opts, dupe_col) in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
if self.query.alias_map[root_alias][JOIN_TYPE] == self.query.LOUTER:
self.query.promote_alias_chain(alias_chain, True)
else:
alias = root_alias
dedupe = f.column in opts.duplicate_targets
if dupe_set or dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), f.column), ()))
if dedupe:
dupe_set.add((opts, f.column))
alias = self.query.join((alias, table, f.column,
f.rel.get_related_field().column),
exclusions=used.union(avoid), promote=promote)
used.add(alias)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=f.rel.to._meta, as_pairs=True)
self.query.related_select_cols.extend(columns)
if self.query.alias_map[alias][JOIN_TYPE] == self.query.LOUTER:
self.query.promote_alias_chain(aliases, True)
self.query.related_select_fields.extend(f.rel.to._meta.fields)
if restricted:
next = requested.get(f.name, {})
else:
next = False
new_nullable = f.null or promote
for dupe_opts, dupe_col in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
used, next, restricted, new_nullable, dupe_set, avoid)
if restricted:
related_fields = [
(o.field, o.model)
for o in opts.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested, reverse=True):
continue
# The "avoid" set is aliases we want to avoid just for this
# particular branch of the recursion. They aren't permanently
# forbidden from reuse in the related selection tables (which is
# what "used" specifies).
avoid = avoid_set.copy()
dupe_set = orig_dupe_set.copy()
table = model._meta.db_table
int_opts = opts
alias = root_alias
alias_chain = []
chain = opts.get_base_chain(f.rel.to)
if chain is not None:
for int_model in chain:
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not int_opts.parents[int_model]:
int_opts = int_model._meta
continue
lhs_col = int_opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
avoid.update((self.query.dupe_avoidance.get(id(opts), lhs_col),
()))
dupe_set.add((opts, lhs_col))
int_opts = int_model._meta
alias = self.query.join(
(alias, int_opts.db_table, lhs_col, int_opts.pk.column),
exclusions=used, promote=True, reuse=used
)
alias_chain.append(alias)
for dupe_opts, dupe_col in dupe_set:
self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias)
dedupe = f.column in opts.duplicate_targets
if dupe_set or dedupe:
avoid.update(self.query.dupe_avoidance.get((id(opts), f.column), ()))
if dedupe:
dupe_set.add((opts, f.column))
alias = self.query.join(
(alias, table, f.rel.get_related_field().column, f.column),
exclusions=used.union(avoid),
promote=True
)
used.add(alias)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=model._meta, as_pairs=True, local_only=True)
self.query.related_select_cols.extend(columns)
self.query.related_select_fields.extend(model._meta.fields)
next = requested.get(f.related_query_name(), {})
new_nullable = f.null or None
self.fill_related_selections(model._meta, table, cur_depth+1,
used, next, restricted, new_nullable)
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
return columns
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
fields = None
has_aggregate_select = bool(self.query.aggregate_select)
for rows in self.execute_sql(MULTI):
for row in rows:
if resolve_columns:
if fields is None:
# We only set this up here because
# related_select_fields isn't populated until
# execute_sql() has been called.
if self.query.select_fields:
fields = self.query.select_fields + self.query.related_select_fields
else:
fields = self.query.model._meta.fields
# If the field was deferred, exclude it from being passed
# into `resolve_columns` because it wasn't selected.
only_load = self.deferred_to_columns()
if only_load:
db_table = self.query.model._meta.db_table
fields = [f for f in fields if db_table in only_load and
f.column in only_load[db_table]]
row = self.resolve_columns(row, fields)
if has_aggregate_select:
aggregate_start = len(self.query.extra_select.keys()) + len(self.query.select)
aggregate_end = aggregate_start + len(self.query.aggregate_select)
row = tuple(row[:aggregate_start]) + tuple([
self.query.resolve_aggregate(value, aggregate, self.connection)
for (alias, aggregate), value
in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
]) + tuple(row[aggregate_end:])
yield row
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return empty_iter()
else:
return
cursor = self.connection.cursor()
cursor.execute(sql, params)
if not result_type:
return cursor
if result_type == SINGLE:
if self.query.ordering_aliases:
return cursor.fetchone()[:-len(self.query.ordering_aliases)]
return cursor.fetchone()
# The MULTI case.
if self.query.ordering_aliases:
result = order_modified_iter(cursor, len(self.query.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
self.connection.features.empty_fetchmany_value)
if not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
return result
class SQLInsertCompiler(SQLCompiler):
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.model._meta
result = ['INSERT INTO %s' % qn(opts.db_table)]
result.append('(%s)' % ', '.join([qn(c) for c in self.query.columns]))
values = [self.placeholder(*v) for v in self.query.values]
result.append('VALUES (%s)' % ', '.join(values))
params = self.query.params
if self.return_id and self.connection.features.can_return_id_from_insert:
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
r_fmt, r_params = self.connection.ops.return_insert_id()
result.append(r_fmt % col)
params = params + r_params
return ' '.join(result), params
def execute_sql(self, return_id=False):
self.return_id = return_id
cursor = super(SQLInsertCompiler, self).execute_sql(None)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.model._meta.db_table, self.query.model._meta.pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
from django.db.models.base import Model
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn, self.connection)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
rows = cursor and cursor.rowcount or 0
is_empty = cursor is None
del cursor
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.query.select_related = False
self.query.clear_ordering(True)
super(SQLUpdateCompiler, self).pre_sql_setup()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.query.clone(klass=Query)
query.bump_prefix()
query.extra = {}
query.select = []
query.add_fields([query.model._meta.pk.name])
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend([r[0] for r in rows])
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
for alias in self.query.tables[1:]:
self.query.alias_refcount[alias] = 0
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self, qn=None):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
if qn is None:
qn = self.quote_name_unless_alias
sql = ('SELECT %s FROM (%s) subquery' % (
', '.join([
aggregate.as_sql(qn, self.connection)
for aggregate in self.query.aggregate_select.values()
]),
self.query.subquery)
)
params = self.query.sub_params
return (sql, params)
class SQLDateCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.util import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_timestamp(str(date))
yield date
def empty_iter():
"""
Returns an iterator containing no results.
"""
yield iter([]).next()
def order_modified_iter(cursor, trim, sentinel):
"""
Yields blocks of rows from a cursor. We use this iterator in the special
case when extra output columns have been added to support ordering
requirements. We must trim those extra columns before anything else can use
the results, since they're only needed to make the SQL valid.
"""
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[:-trim] for r in rows]
| apache-2.0 |
Vogeltak/pauselan | lib/python3.4/site-packages/sqlalchemy/testing/schema.py | 79 | 3446 | # testing/schema.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import exclusions
from .. import schema, event
from . import config
__all__ = 'Table', 'Column',
table_options = {}
def Table(*args, **kw):
"""A schema.Table wrapper/hook for dialect-specific tweaks."""
test_opts = dict([(k, kw.pop(k)) for k in list(kw)
if k.startswith('test_')])
kw.update(table_options)
if exclusions.against(config._current, 'mysql'):
if 'mysql_engine' not in kw and 'mysql_type' not in kw:
if 'test_needs_fk' in test_opts or 'test_needs_acid' in test_opts:
kw['mysql_engine'] = 'InnoDB'
else:
kw['mysql_engine'] = 'MyISAM'
# Apply some default cascading rules for self-referential foreign keys.
# MySQL InnoDB has some issues around seleting self-refs too.
if exclusions.against(config._current, 'firebird'):
table_name = args[0]
unpack = (config.db.dialect.
identifier_preparer.unformat_identifiers)
# Only going after ForeignKeys in Columns. May need to
# expand to ForeignKeyConstraint too.
fks = [fk
for col in args if isinstance(col, schema.Column)
for fk in col.foreign_keys]
for fk in fks:
# root around in raw spec
ref = fk._colspec
if isinstance(ref, schema.Column):
name = ref.table.name
else:
# take just the table name: on FB there cannot be
# a schema, so the first element is always the
# table name, possibly followed by the field name
name = unpack(ref)[0]
if name == table_name:
if fk.ondelete is None:
fk.ondelete = 'CASCADE'
if fk.onupdate is None:
fk.onupdate = 'CASCADE'
return schema.Table(*args, **kw)
def Column(*args, **kw):
"""A schema.Column wrapper/hook for dialect-specific tweaks."""
test_opts = dict([(k, kw.pop(k)) for k in list(kw)
if k.startswith('test_')])
if not config.requirements.foreign_key_ddl.enabled_for_config(config):
args = [arg for arg in args if not isinstance(arg, schema.ForeignKey)]
col = schema.Column(*args, **kw)
if 'test_needs_autoincrement' in test_opts and \
kw.get('primary_key', False):
# allow any test suite to pick up on this
col.info['test_needs_autoincrement'] = True
# hardcoded rule for firebird, oracle; this should
# be moved out
if exclusions.against(config._current, 'firebird', 'oracle'):
def add_seq(c, tbl):
c._init_items(
schema.Sequence(_truncate_name(
config.db.dialect, tbl.name + '_' + c.name + '_seq'),
optional=True)
)
event.listen(col, 'after_parent_attach', add_seq, propagate=True)
return col
def _truncate_name(dialect, name):
if len(name) > dialect.max_identifier_length:
return name[0:max(dialect.max_identifier_length - 6, 0)] + \
"_" + hex(hash(name) % 64)[2:]
else:
return name
| gpl-2.0 |
cloudnull/ansible | lib/ansible/module_utils/rax.py | 280 | 11974 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by
# Ansible still belong to the author of the module, and may assign their own
# license to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from uuid import UUID
FINAL_STATUSES = ('ACTIVE', 'ERROR')
VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use',
'error', 'error_deleting')
CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN',
'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN']
CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS',
'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP',
'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
NON_CALLABLES = (basestring, bool, dict, int, list, type(None))
PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
def rax_slugify(value):
"""Prepend a key with rax_ and normalize the key name"""
return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_'))
def rax_clb_node_to_dict(obj):
"""Function to convert a CLB Node object to a dict"""
if not obj:
return {}
node = obj.to_dict()
node['id'] = obj.id
node['weight'] = obj.weight
return node
def rax_to_dict(obj, obj_type='standard'):
"""Generic function to convert a pyrax object to a dict
obj_type values:
standard
clb
server
"""
instance = {}
for key in dir(obj):
value = getattr(obj, key)
if obj_type == 'clb' and key == 'nodes':
instance[key] = []
for node in value:
instance[key].append(rax_clb_node_to_dict(node))
elif (isinstance(value, list) and len(value) > 0 and
not isinstance(value[0], NON_CALLABLES)):
instance[key] = []
for item in value:
instance[key].append(rax_to_dict(item))
elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
if obj_type == 'server':
if key == 'image':
if not value:
instance['rax_boot_source'] = 'volume'
else:
instance['rax_boot_source'] = 'local'
key = rax_slugify(key)
instance[key] = value
if obj_type == 'server':
for attr in ['id', 'accessIPv4', 'name', 'status']:
instance[attr] = instance.get(rax_slugify(attr))
return instance
def rax_find_bootable_volume(module, rax_module, server, exit=True):
"""Find a servers bootable volume"""
cs = rax_module.cloudservers
cbs = rax_module.cloud_blockstorage
server_id = rax_module.utils.get_id(server)
volumes = cs.volumes.get_server_volumes(server_id)
bootable_volumes = []
for volume in volumes:
vol = cbs.get(volume)
if module.boolean(vol.bootable):
bootable_volumes.append(vol)
if not bootable_volumes:
if exit:
module.fail_json(msg='No bootable volumes could be found for '
'server %s' % server_id)
else:
return False
elif len(bootable_volumes) > 1:
if exit:
module.fail_json(msg='Multiple bootable volumes found for server '
'%s' % server_id)
else:
return False
return bootable_volumes[0]
def rax_find_image(module, rax_module, image, exit=True):
"""Find a server image by ID or Name"""
cs = rax_module.cloudservers
try:
UUID(image)
except ValueError:
try:
image = cs.images.find(human_id=image)
except(cs.exceptions.NotFound,
cs.exceptions.NoUniqueMatch):
try:
image = cs.images.find(name=image)
except (cs.exceptions.NotFound,
cs.exceptions.NoUniqueMatch):
if exit:
module.fail_json(msg='No matching image found (%s)' %
image)
else:
return False
return rax_module.utils.get_id(image)
def rax_find_volume(module, rax_module, name):
"""Find a Block storage volume by ID or name"""
cbs = rax_module.cloud_blockstorage
try:
UUID(name)
volume = cbs.get(name)
except ValueError:
try:
volume = cbs.find(name=name)
except rax_module.exc.NotFound:
volume = None
except Exception, e:
module.fail_json(msg='%s' % e)
return volume
def rax_find_network(module, rax_module, network):
"""Find a cloud network by ID or name"""
cnw = rax_module.cloud_networks
try:
UUID(network)
except ValueError:
if network.lower() == 'public':
return cnw.get_server_networks(PUBLIC_NET_ID)
elif network.lower() == 'private':
return cnw.get_server_networks(SERVICE_NET_ID)
else:
try:
network_obj = cnw.find_network_by_label(network)
except (rax_module.exceptions.NetworkNotFound,
rax_module.exceptions.NetworkLabelNotUnique):
module.fail_json(msg='No matching network found (%s)' %
network)
else:
return cnw.get_server_networks(network_obj)
else:
return cnw.get_server_networks(network)
def rax_find_server(module, rax_module, server):
"""Find a Cloud Server by ID or name"""
cs = rax_module.cloudservers
try:
UUID(server)
server = cs.servers.get(server)
except ValueError:
servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
if not servers:
module.fail_json(msg='No Server was matched by name, '
'try using the Server ID instead')
if len(servers) > 1:
module.fail_json(msg='Multiple servers matched by name, '
'try using the Server ID instead')
# We made it this far, grab the first and hopefully only server
# in the list
server = servers[0]
return server
def rax_find_loadbalancer(module, rax_module, loadbalancer):
"""Find a Cloud Load Balancer by ID or name"""
clb = rax_module.cloud_loadbalancers
try:
found = clb.get(loadbalancer)
except:
found = []
for lb in clb.list():
if loadbalancer == lb.name:
found.append(lb)
if not found:
module.fail_json(msg='No loadbalancer was matched')
if len(found) > 1:
module.fail_json(msg='Multiple loadbalancers matched')
# We made it this far, grab the first and hopefully only item
# in the list
found = found[0]
return found
def rax_argument_spec():
"""Return standard base dictionary used for the argument_spec
argument in AnsibleModule
"""
return dict(
api_key=dict(type='str', aliases=['password'], no_log=True),
auth_endpoint=dict(type='str'),
credentials=dict(type='str', aliases=['creds_file']),
env=dict(type='str'),
identity_type=dict(type='str', default='rackspace'),
region=dict(type='str'),
tenant_id=dict(type='str'),
tenant_name=dict(type='str'),
username=dict(type='str'),
verify_ssl=dict(choices=BOOLEANS, type='bool'),
)
def rax_required_together():
"""Return the default list used for the required_together argument to
AnsibleModule"""
return [['api_key', 'username']]
def setup_rax_module(module, rax_module, region_required=True):
"""Set up pyrax in a standard way for all modules"""
rax_module.USER_AGENT = 'ansible/%s %s' % (ANSIBLE_VERSION,
rax_module.USER_AGENT)
api_key = module.params.get('api_key')
auth_endpoint = module.params.get('auth_endpoint')
credentials = module.params.get('credentials')
env = module.params.get('env')
identity_type = module.params.get('identity_type')
region = module.params.get('region')
tenant_id = module.params.get('tenant_id')
tenant_name = module.params.get('tenant_name')
username = module.params.get('username')
verify_ssl = module.params.get('verify_ssl')
if env is not None:
rax_module.set_environment(env)
rax_module.set_setting('identity_type', identity_type)
if verify_ssl is not None:
rax_module.set_setting('verify_ssl', verify_ssl)
if auth_endpoint is not None:
rax_module.set_setting('auth_endpoint', auth_endpoint)
if tenant_id is not None:
rax_module.set_setting('tenant_id', tenant_id)
if tenant_name is not None:
rax_module.set_setting('tenant_name', tenant_name)
try:
username = username or os.environ.get('RAX_USERNAME')
if not username:
username = rax_module.get_setting('keyring_username')
if username:
api_key = 'USE_KEYRING'
if not api_key:
api_key = os.environ.get('RAX_API_KEY')
credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or
os.environ.get('RAX_CREDS_FILE'))
region = (region or os.environ.get('RAX_REGION') or
rax_module.get_setting('region'))
except KeyError, e:
module.fail_json(msg='Unable to load %s' % e.message)
try:
if api_key and username:
if api_key == 'USE_KEYRING':
rax_module.keyring_auth(username, region=region)
else:
rax_module.set_credentials(username, api_key=api_key,
region=region)
elif credentials:
credentials = os.path.expanduser(credentials)
rax_module.set_credential_file(credentials, region=region)
else:
raise Exception('No credentials supplied!')
except Exception, e:
if e.message:
msg = str(e.message)
else:
msg = repr(e)
module.fail_json(msg=msg)
if region_required and region not in rax_module.regions:
module.fail_json(msg='%s is not a valid region, must be one of: %s' %
(region, ','.join(rax_module.regions)))
return rax_module
| gpl-3.0 |
Elektropippo/kernel_852i | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
arbrandes/edx-platform | cms/djangoapps/contentstore/tests/test_clone_course.py | 4 | 6454 | """
Unit tests for cloning a course between the same and different module stores.
"""
import json
from unittest.mock import Mock, patch
from django.conf import settings
from opaque_keys.edx.locator import CourseLocator
from cms.djangoapps.contentstore.tasks import rerun_course
from cms.djangoapps.contentstore.tests.utils import CourseTestCase
from common.djangoapps.course_action_state.managers import CourseRerunUIStateManager
from common.djangoapps.course_action_state.models import CourseRerunState
from common.djangoapps.student.auth import has_course_author_access
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.modulestore import EdxJSONEncoder, ModuleStoreEnum
from xmodule.modulestore.tests.factories import CourseFactory
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
class CloneCourseTest(CourseTestCase):
"""
Unit tests for cloning a course
"""
def test_clone_course(self):
"""Tests cloning of a course as follows: XML -> Mongo (+ data) -> Mongo -> Split -> Split"""
# 1. import and populate test toy course
mongo_course1_id = self.import_and_populate_course()
mongo_course2_id = mongo_course1_id
# 3. clone course (mongo -> split)
with self.store.default_store(ModuleStoreEnum.Type.split):
split_course3_id = CourseLocator(
org="edx3", course="split3", run="2013_Fall"
)
self.store.clone_course(mongo_course2_id, split_course3_id, self.user.id)
self.assertCoursesEqual(mongo_course2_id, split_course3_id)
# 4. clone course (split -> split)
split_course4_id = CourseLocator(
org="edx4", course="split4", run="2013_Fall"
)
self.store.clone_course(split_course3_id, split_course4_id, self.user.id)
self.assertCoursesEqual(split_course3_id, split_course4_id)
def test_space_in_asset_name_for_rerun_course(self):
"""
Tests check the scenario where one course which has an asset with percentage(%) in its
name, it should re-run successfully.
"""
org = 'edX'
course_number = 'CS101'
course_run = '2015_Q1'
display_name = 'rerun'
fields = {'display_name': display_name}
course_assets = {'subs_Introduction%20To%20New.srt.sjson'}
# Create a course using split modulestore
course = CourseFactory.create(
org=org,
number=course_number,
run=course_run,
display_name=display_name,
default_store=ModuleStoreEnum.Type.split
)
# add an asset
asset_key = course.id.make_asset_key('asset', 'subs_Introduction%20To%20New.srt.sjson')
content = StaticContent(
asset_key, 'Dummy assert', 'application/json', 'dummy data',
)
contentstore().save(content)
# Get & verify all assets of the course
assets, count = contentstore().get_all_content_for_course(course.id)
self.assertEqual(count, 1)
self.assertEqual({asset['asset_key'].block_id for asset in assets}, course_assets) # lint-amnesty, pylint: disable=consider-using-set-comprehension
# rerun from split into split
split_rerun_id = CourseLocator(org=org, course=course_number, run="2012_Q2")
CourseRerunState.objects.initiated(course.id, split_rerun_id, self.user, fields['display_name'])
result = rerun_course.delay(
str(course.id),
str(split_rerun_id),
self.user.id,
json.dumps(fields, cls=EdxJSONEncoder)
)
# Check if re-run was successful
self.assertEqual(result.get(), "succeeded")
rerun_state = CourseRerunState.objects.find_first(course_key=split_rerun_id)
self.assertEqual(rerun_state.state, CourseRerunUIStateManager.State.SUCCEEDED)
def test_rerun_course(self):
"""
Unit tests for :meth: `contentstore.tasks.rerun_course`
"""
mongo_course1_id = self.import_and_populate_course()
# rerun from mongo into split
split_course3_id = CourseLocator(
org="edx3", course="split3", run="rerun_test"
)
# Mark the action as initiated
fields = {'display_name': 'rerun'}
CourseRerunState.objects.initiated(mongo_course1_id, split_course3_id, self.user, fields['display_name'])
result = rerun_course.delay(str(mongo_course1_id), str(split_course3_id), self.user.id,
json.dumps(fields, cls=EdxJSONEncoder))
self.assertEqual(result.get(), "succeeded")
self.assertTrue(has_course_author_access(self.user, split_course3_id), "Didn't grant access")
rerun_state = CourseRerunState.objects.find_first(course_key=split_course3_id)
self.assertEqual(rerun_state.state, CourseRerunUIStateManager.State.SUCCEEDED)
# try creating rerunning again to same name and ensure it generates error
result = rerun_course.delay(str(mongo_course1_id), str(split_course3_id), self.user.id)
self.assertEqual(result.get(), "duplicate course")
# the below will raise an exception if the record doesn't exist
CourseRerunState.objects.find_first(
course_key=split_course3_id,
state=CourseRerunUIStateManager.State.FAILED
)
# try to hit the generic exception catch
with patch('xmodule.modulestore.split_mongo.mongo_connection.MongoConnection.insert_course_index', Mock(side_effect=Exception)): # lint-amnesty, pylint: disable=line-too-long
split_course4_id = CourseLocator(org="edx3", course="split3", run="rerun_fail")
fields = {'display_name': 'total failure'}
CourseRerunState.objects.initiated(split_course3_id, split_course4_id, self.user, fields['display_name'])
result = rerun_course.delay(str(split_course3_id), str(split_course4_id), self.user.id,
json.dumps(fields, cls=EdxJSONEncoder))
self.assertIn("exception: ", result.get())
self.assertIsNone(self.store.get_course(split_course4_id), "Didn't delete course after error")
CourseRerunState.objects.find_first(
course_key=split_course4_id,
state=CourseRerunUIStateManager.State.FAILED
)
| agpl-3.0 |
ariakerstein/twitterFlaskClone | project/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/fields.py | 1007 | 5833 | import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except UnicodeEncodeError:
pass
else:
return result
if not six.PY3: # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
| mit |
2013Commons/HUE-SHARK | desktop/core/ext-py/Mako-0.7.2/build/lib.linux-i686-2.7/mako/pygen.py | 19 | 9485 | # mako/pygen.py
# Copyright (C) 2006-2012 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""utilities for generating and formatting literal Python code."""
import re, string
from StringIO import StringIO
from mako import exceptions
class PythonPrinter(object):
def __init__(self, stream):
# indentation counter
self.indent = 0
# a stack storing information about why we incremented
# the indentation counter, to help us determine if we
# should decrement it
self.indent_detail = []
# the string of whitespace multiplied by the indent
# counter to produce a line
self.indentstring = " "
# the stream we are writing to
self.stream = stream
# a list of lines that represents a buffered "block" of code,
# which can be later printed relative to an indent level
self.line_buffer = []
self.in_indent_lines = False
self._reset_multi_line_flags()
def write(self, text):
self.stream.write(text)
def write_indented_block(self, block):
"""print a line or lines of python which already contain indentation.
The indentation of the total block of lines will be adjusted to that of
the current indent level."""
self.in_indent_lines = False
for l in re.split(r'\r?\n', block):
self.line_buffer.append(l)
def writelines(self, *lines):
"""print a series of lines of python."""
for line in lines:
self.writeline(line)
def writeline(self, line):
"""print a line of python, indenting it according to the current
indent level.
this also adjusts the indentation counter according to the
content of the line.
"""
if not self.in_indent_lines:
self._flush_adjusted_lines()
self.in_indent_lines = True
if (line is None or
re.match(r"^\s*#",line) or
re.match(r"^\s*$", line)
):
hastext = False
else:
hastext = True
is_comment = line and len(line) and line[0] == '#'
# see if this line should decrease the indentation level
if (not is_comment and
(not hastext or self._is_unindentor(line))
):
if self.indent > 0:
self.indent -=1
# if the indent_detail stack is empty, the user
# probably put extra closures - the resulting
# module wont compile.
if len(self.indent_detail) == 0:
raise exceptions.SyntaxException(
"Too many whitespace closures")
self.indent_detail.pop()
if line is None:
return
# write the line
self.stream.write(self._indent_line(line) + "\n")
# see if this line should increase the indentation level.
# note that a line can both decrase (before printing) and
# then increase (after printing) the indentation level.
if re.search(r":[ \t]*(?:#.*)?$", line):
# increment indentation count, and also
# keep track of what the keyword was that indented us,
# if it is a python compound statement keyword
# where we might have to look for an "unindent" keyword
match = re.match(r"^\s*(if|try|elif|while|for|with)", line)
if match:
# its a "compound" keyword, so we will check for "unindentors"
indentor = match.group(1)
self.indent +=1
self.indent_detail.append(indentor)
else:
indentor = None
# its not a "compound" keyword. but lets also
# test for valid Python keywords that might be indenting us,
# else assume its a non-indenting line
m2 = re.match(r"^\s*(def|class|else|elif|except|finally)",
line)
if m2:
self.indent += 1
self.indent_detail.append(indentor)
def close(self):
"""close this printer, flushing any remaining lines."""
self._flush_adjusted_lines()
def _is_unindentor(self, line):
"""return true if the given line is an 'unindentor',
relative to the last 'indent' event received.
"""
# no indentation detail has been pushed on; return False
if len(self.indent_detail) == 0:
return False
indentor = self.indent_detail[-1]
# the last indent keyword we grabbed is not a
# compound statement keyword; return False
if indentor is None:
return False
# if the current line doesnt have one of the "unindentor" keywords,
# return False
match = re.match(r"^\s*(else|elif|except|finally).*\:", line)
if not match:
return False
# whitespace matches up, we have a compound indentor,
# and this line has an unindentor, this
# is probably good enough
return True
# should we decide that its not good enough, heres
# more stuff to check.
#keyword = match.group(1)
# match the original indent keyword
#for crit in [
# (r'if|elif', r'else|elif'),
# (r'try', r'except|finally|else'),
# (r'while|for', r'else'),
#]:
# if re.match(crit[0], indentor) and re.match(crit[1], keyword):
# return True
#return False
def _indent_line(self, line, stripspace=''):
"""indent the given line according to the current indent level.
stripspace is a string of space that will be truncated from the
start of the line before indenting."""
return re.sub(r"^%s" % stripspace, self.indentstring
* self.indent, line)
def _reset_multi_line_flags(self):
"""reset the flags which would indicate we are in a backslashed
or triple-quoted section."""
self.backslashed, self.triplequoted = False, False
def _in_multi_line(self, line):
"""return true if the given line is part of a multi-line block,
via backslash or triple-quote."""
# we are only looking for explicitly joined lines here, not
# implicit ones (i.e. brackets, braces etc.). this is just to
# guard against the possibility of modifying the space inside of
# a literal multiline string with unfortunately placed
# whitespace
current_state = (self.backslashed or self.triplequoted)
if re.search(r"\\$", line):
self.backslashed = True
else:
self.backslashed = False
triples = len(re.findall(r"\"\"\"|\'\'\'", line))
if triples == 1 or triples % 2 != 0:
self.triplequoted = not self.triplequoted
return current_state
def _flush_adjusted_lines(self):
stripspace = None
self._reset_multi_line_flags()
for entry in self.line_buffer:
if self._in_multi_line(entry):
self.stream.write(entry + "\n")
else:
entry = entry.expandtabs()
if stripspace is None and re.search(r"^[ \t]*[^# \t]", entry):
stripspace = re.match(r"^([ \t]*)", entry).group(1)
self.stream.write(self._indent_line(entry, stripspace) + "\n")
self.line_buffer = []
self._reset_multi_line_flags()
def adjust_whitespace(text):
"""remove the left-whitespace margin of a block of Python code."""
state = [False, False]
(backslashed, triplequoted) = (0, 1)
def in_multi_line(line):
start_state = (state[backslashed] or state[triplequoted])
if re.search(r"\\$", line):
state[backslashed] = True
else:
state[backslashed] = False
def match(reg, t):
m = re.match(reg, t)
if m:
return m, t[len(m.group(0)):]
else:
return None, t
while line:
if state[triplequoted]:
m, line = match(r"%s" % state[triplequoted], line)
if m:
state[triplequoted] = False
else:
m, line = match(r".*?(?=%s|$)" % state[triplequoted], line)
else:
m, line = match(r'#', line)
if m:
return start_state
m, line = match(r"\"\"\"|\'\'\'", line)
if m:
state[triplequoted] = m.group(0)
continue
m, line = match(r".*?(?=\"\"\"|\'\'\'|#|$)", line)
return start_state
def _indent_line(line, stripspace = ''):
return re.sub(r"^%s" % stripspace, '', line)
lines = []
stripspace = None
for line in re.split(r'\r?\n', text):
if in_multi_line(line):
lines.append(line)
else:
line = line.expandtabs()
if stripspace is None and re.search(r"^[ \t]*[^# \t]", line):
stripspace = re.match(r"^([ \t]*)", line).group(1)
lines.append(_indent_line(line, stripspace))
return "\n".join(lines)
| apache-2.0 |
sajuptpm/contrail-controller | src/config/svc-monitor/svc_monitor/physical_service_manager.py | 1 | 4391 | import uuid
from vnc_api.vnc_api import *
from instance_manager import InstanceManager
from config_db import (
VirtualMachineSM,
VirtualMachineInterfaceSM,
ServiceApplianceSetSM,
ServiceApplianceSM,
PhysicalInterfaceSM,
ServiceInstanceSM,
PortTupleSM)
from cfgm_common import svc_info
class PhysicalServiceManager(InstanceManager):
def create_service(self, st, si):
if not self.validate_network_config(st, si):
return
# get service appliances from service template
sa_set = st.service_appliance_set
if not sa_set:
self.logger.log_error("Can't find service appliances set")
return
service_appliance_set = ServiceApplianceSetSM.get(sa_set)
service_appliances = service_appliance_set.service_appliances
# validation
if not service_appliances:
self.logger.log_error("Can't find service appliances")
return
service_appliances = list(service_appliances)
si_obj = ServiceInstanceSM.get(si.uuid)
# create a fake VM for the schmea transfer to use
vm_uuid_list = list(si_obj.virtual_machines)
vm_list = [None]*si.max_instances
for vm_uuid in vm_uuid_list:
vm = VirtualMachineSM.get(vm_uuid)
if not vm:
continue
if (vm.index + 1) > si.max_instances:
self.delete_service(vm)
continue
vm_list[vm.index] = vm_uuid
# get the port-tuple
pt_list = [None]*si.max_instances
pts = list(si.port_tuples)
for i in range(0, len(pts)):
pt_list[i] = pts[i]
if si.max_instances > len(service_appliances):
self.logger.log_info(
"There are not enough Service appliance \
for that Service instance "+si.uuid)
return
for idx, sa_uuid in enumerate(service_appliances):
if idx > si.max_instances:
return
vm_uuid = vm_list[idx]
if not vm_uuid:
vm_uuid = str(uuid.uuid4())
vm_obj = self.link_si_to_vm(si, st, idx, vm_uuid)
pt_uuid = pt_list[idx]
if not pt_uuid:
pt_uuid = str(uuid.uuid4())
pt_obj = self.create_port_tuple(si, st, idx, pt_uuid)
instance_name = self._get_instance_name(si, idx)
si.state = 'launching'
sa = ServiceApplianceSM.get(sa_uuid)
for nic in si.vn_info:
pi_uuid = sa.physical_interfaces.get(nic['type'], None)
pi_obj = PhysicalInterfaceSM.get(pi_uuid)
if not pi_obj:
return
vmi_obj = self._create_svc_vm_port(nic,
instance_name, si, st,
vm_obj=vm_obj,
pi=pi_obj,
pt=pt_obj)
si.state = "active"
def delete_service(self, vm):
if not vm.virtual_machine_interfaces:
return
vmi_list = list(vm.virtual_machine_interfaces)
pt_uuid = VirtualMachineInterfaceSM.get(vmi_list[0]).port_tuple
self.cleanup_pi_connections(vmi_list)
self.cleanup_svc_vm_ports(vmi_list)
try:
self._vnc_lib.port_tuple_delete(id=pt_uuid)
PortTupleSM.delete(pt_uuid)
except NoIdError:
pass
try:
self._vnc_lib.virtual_machine_delete(id=vm.uuid)
VirtualMachineSM.delete(vm.uuid)
except NoIdError:
pass
def cleanup_pi_connections(self, vmi_list):
for vmi_id in vmi_list:
try:
vmi = VirtualMachineInterfaceSM.get(vmi_id)
self._vnc_lib.ref_update('virtual-machine-interface',
vmi.uuid,
'physical_interface_refs',
vmi.physical_interface,
None,
'DELETE')
PhysicalInterfaceSM.locate(vmi.physical_interface)
except:
pass
def check_service(self, si):
return True
| apache-2.0 |
uclmr/inferbeddings | scripts/guo-fb122/UCL_ARRAY_GUO-FB122_adv_KALE_v2.py | 1 | 4329 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import itertools
import os
import os.path
import sys
import argparse
import logging
def cartesian_product(dicts):
return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values()))
def summary(configuration):
kvs = sorted([(k, v) for k, v in configuration.items()], key=lambda e: e[0])
return '_'.join([('%s=%s' % (k, v)) for (k, v) in kvs])
def to_cmd(c, _path=None):
if _path is None:
_path = '/home/pminervi/workspace/inferbeddings/'
command = 'python3 {}/bin/kbp-cli.py' \
' --train {}/data/guo-emnlp16/fb122/fb122_triples.train' \
' --valid {}/data/guo-emnlp16/fb122/fb122_triples.valid' \
' --test {}/data/guo-emnlp16/fb122/fb122_triples.test' \
' --clauses {}/data/guo-emnlp16/fb122/clauses/fb122-clauses.pl' \
' --nb-epochs {}' \
' --lr 0.1' \
' --nb-batches 10' \
' --model {}' \
' --similarity {}' \
' --margin {}' \
' --embedding-size {}' \
' --loss {}' \
' --unit-cube --adv-lr {} --adv-init-ground --adversary-epochs {}' \
' --discriminator-epochs {} --adv-weight {} --adv-batch-size {} --adv-pooling {}' \
''.format(_path, _path, _path, _path, _path,
c['epochs'],
c['model'], c['similarity'],
c['margin'], c['embedding_size'],
c['loss'],
c['adv_lr'], c['adv_epochs'],
c['disc_epochs'], c['adv_weight'], c['adv_batch_size'], c['adv_pooling'])
return command
def to_logfile(c, path):
outfile = "%s/ucl_guo-fb122_adv_KALE_v2.%s.log" % (path, summary(c))
return outfile
def main(argv):
def formatter(prog):
return argparse.HelpFormatter(prog, max_help_position=100, width=200)
argparser = argparse.ArgumentParser('Generating experiments for the UCL cluster', formatter_class=formatter)
argparser.add_argument('--debug', '-D', action='store_true', help='Debug flag')
argparser.add_argument('--path', '-p', action='store', type=str, default=None, help='Path')
args = argparser.parse_args(argv)
hyperparameters_space_distmult_complex = dict(
epochs=[100],
model=['DistMult', 'ComplEx'],
similarity=['dot'],
margin=[1, 2, 5, 10],
embedding_size=[20, 50, 100, 150, 200],
loss=['hinge'],
adv_lr=[.1],
adv_epochs=[0, 10],
disc_epochs=[10],
adv_weight=[0, 1, 100, 10000, 1000000],
adv_batch_size=[1, 10, 100],
adv_pooling=['sum', 'mean', 'max'] # adv_pooling=['sum', 'mean', 'max', 'logsumexp']
)
configurations_distmult_complex = cartesian_product(hyperparameters_space_distmult_complex)
path = '/home/pminervi/workspace/inferbeddings/logs/ucl_guo-fb122_adv_KALE_v2/'
# Check that we are on the UCLCS cluster first
if os.path.exists('/home/pminervi/'):
# If the folder that will contain logs does not exist, create it
if not os.path.exists(path):
os.makedirs(path)
configurations = list(configurations_distmult_complex)
command_lines = set()
for cfg in configurations:
logfile = to_logfile(cfg, path)
completed = False
if os.path.isfile(logfile):
with open(logfile, 'r', encoding='utf-8', errors='ignore') as f:
content = f.read()
completed = '### MICRO (test filtered)' in content
if not completed:
command_line = '{} >> {} 2>&1'.format(to_cmd(cfg, _path=args.path), logfile)
command_lines |= {command_line}
# Sort command lines and remove duplicates
sorted_command_lines = sorted(command_lines)
nb_jobs = len(sorted_command_lines)
header = """#!/bin/bash
#$ -cwd
#$ -S /bin/bash
#$ -o /dev/null
#$ -e /dev/null
#$ -t 1-{}
#$ -l h_vmem=6G,tmem=6G
#$ -l h_rt=4:00:00
""".format(nb_jobs)
print(header)
for job_id, command_line in enumerate(sorted_command_lines, 1):
print('test $SGE_TASK_ID -eq {} && {}'.format(job_id, command_line))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main(sys.argv[1:])
| mit |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/nltk/sem/lfg.py | 12 | 6372 | # Natural Language Toolkit: Lexical Functional Grammar
#
# Author: Dan Garrette <[email protected]>
#
# Copyright (C) 2001-2012 NLTK Project
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
from collections import defaultdict
from nltk.internals import Counter
class FStructure(dict):
def safeappend(self, key, item):
"""
Append 'item' to the list at 'key'. If no list exists for 'key', then
construct one.
"""
if key not in self:
self[key] = []
self[key].append(item)
def __setitem__(self, key, value):
dict.__setitem__(self, key.lower(), value)
def __getitem__(self, key):
return dict.__getitem__(self, key.lower())
def __contains__(self, key):
return dict.__contains__(self, key.lower())
def to_glueformula_list(self, glue_dict):
depgraph = self.to_depgraph()
return glue_dict.to_glueformula_list(depgraph)
def to_depgraph(self, rel=None):
from nltk.parse.dependencygraph import DependencyGraph
depgraph = DependencyGraph()
nodelist = depgraph.nodelist
self._to_depgraph(nodelist, 0, 'ROOT')
#Add all the dependencies for all the nodes
for node_addr, node in enumerate(nodelist):
for n2 in nodelist[1:]:
if n2['head'] == node_addr:
node['deps'].append(n2['address'])
depgraph.root = nodelist[1]
return depgraph
def _to_depgraph(self, nodelist, head, rel):
index = len(nodelist)
nodelist.append({'address': index,
'word': self.pred[0],
'tag': self.pred[1],
'head': head,
'rel': rel,
'deps': []})
for feature in self:
for item in self[feature]:
if isinstance(item, FStructure):
item._to_depgraph(nodelist, index, feature)
elif isinstance(item, tuple):
nodelist.append({'address': len(nodelist),
'word': item[0],
'tag': item[1],
'head': index,
'rel': feature,
'deps': []})
elif isinstance(item, list):
for n in item:
n._to_depgraph(nodelist, index, feature)
else: # ERROR
raise Exception, 'feature %s is not an FStruct, a list, or a tuple' % feature
@staticmethod
def read_depgraph(depgraph):
return FStructure._read_depgraph(depgraph.root, depgraph)
@staticmethod
def _read_depgraph(node, depgraph, label_counter=None, parent=None):
if not label_counter:
label_counter = Counter()
if node['rel'].lower() in ['spec', 'punct']:
# the value of a 'spec' entry is a word, not an FStructure
return (node['word'], node['tag'])
else:
fstruct = FStructure()
fstruct.pred = None
fstruct.label = FStructure._make_label(label_counter.get())
fstruct.parent = parent
word, tag = node['word'], node['tag']
if tag[:2] == 'VB':
if tag[2:3] == 'D':
fstruct.safeappend('tense', ('PAST', 'tense'))
fstruct.pred = (word, tag[:2])
if not fstruct.pred:
fstruct.pred = (word, tag)
children = [depgraph.nodelist[idx] for idx in node['deps']]
for child in children:
fstruct.safeappend(child['rel'], FStructure._read_depgraph(child, depgraph, label_counter, fstruct))
return fstruct
@staticmethod
def _make_label(value):
"""
Pick an alphabetic character as identifier for an entity in the model.
:param value: where to index into the list of characters
:type value: int
"""
letter = ['f','g','h','i','j','k','l','m','n','o','p','q','r','s',
't','u','v','w','x','y','z','a','b','c','d','e'][value-1]
num = int(value) / 26
if num > 0:
return letter + str(num)
else:
return letter
def __repr__(self):
return str(self).replace('\n', '')
def __str__(self, indent=3):
try:
accum = '%s:[' % self.label
except NameError:
accum = '['
try:
accum += 'pred \'%s\'' % (self.pred[0])
except NameError:
pass
for feature in self:
for item in self[feature]:
if isinstance(item, FStructure):
next_indent = indent+len(feature)+3+len(self.label)
accum += '\n%s%s %s' % (' '*(indent), feature, item.__str__(next_indent))
elif isinstance(item, tuple):
accum += '\n%s%s \'%s\'' % (' '*(indent), feature, item[0])
elif isinstance(item, list):
accum += '\n%s%s {%s}' % (' '*(indent), feature, ('\n%s' % (' '*(indent+len(feature)+2))).join(item))
else: # ERROR
raise Exception, 'feature %s is not an FStruct, a list, or a tuple' % feature
return accum+']'
def demo_read_depgraph():
from nltk.parse.dependencygraph import DependencyGraph
dg1 = DependencyGraph("""\
Esso NNP 2 SUB
said VBD 0 ROOT
the DT 5 NMOD
Whiting NNP 5 NMOD
field NN 6 SUB
started VBD 2 VMOD
production NN 6 OBJ
Tuesday NNP 6 VMOD
""")
dg2 = DependencyGraph("""\
John NNP 2 SUB
sees VBP 0 ROOT
Mary NNP 2 OBJ
""")
dg3 = DependencyGraph("""\
a DT 2 SPEC
man NN 3 SUBJ
walks VB 0 ROOT
""")
dg4 = DependencyGraph("""\
every DT 2 SPEC
girl NN 3 SUBJ
chases VB 0 ROOT
a DT 5 SPEC
dog NN 3 OBJ
""")
depgraphs = [dg1,dg2,dg3,dg4]
for dg in depgraphs:
print FStructure.read_depgraph(dg)
if __name__ == '__main__':
demo_read_depgraph()
| agpl-3.0 |
stevenvo/rpi-gpio-scripts | 06_rgb.py | 2 | 1605 | #!/usr/bin/env python
import RPi.GPIO as GPIO
import time
colors = [0xFF0000, 0x00FF00, 0x0000FF, 0xFFFF00, 0xFF00FF, 0x00FFFF]
LedPinRed = 11
LedPinGreen = 12
LedPinBlue = 13
def setup(Rpin, Gpin, Bpin):
global pins
global p_R, p_G, p_B
pins = {'pin_R': Rpin, 'pin_G': Gpin, 'pin_B': Bpin}
GPIO.setmode(GPIO.BOARD) # Numbers GPIOs by physical location
for i in pins:
GPIO.setup(pins[i], GPIO.OUT) # Set pins' mode is output
GPIO.output(pins[i], GPIO.HIGH) # Set pins to high(+3.3V) to off led
p_R = GPIO.PWM(pins['pin_R'], 2000) # set Frequece to 2KHz
p_G = GPIO.PWM(pins['pin_G'], 1999)
p_B = GPIO.PWM(pins['pin_B'], 5000)
p_R.start(100) # Initial duty Cycle = 0(leds off)
p_G.start(100)
p_B.start(100)
def map(x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
def off():
for i in pins:
GPIO.output(pins[i], GPIO.HIGH) # Turn off all leds
def setColor(col): # For example : col = 0x112233
R_val = (col & 0xff0000) >> 16
G_val = (col & 0x00ff00) >> 8
B_val = (col & 0x0000ff) >> 0
R_val = map(R_val, 0, 255, 0, 100)
G_val = map(G_val, 0, 255, 0, 100)
B_val = map(B_val, 0, 255, 0, 100)
p_R.ChangeDutyCycle(100-R_val) # Change duty cycle
p_G.ChangeDutyCycle(100-G_val)
p_B.ChangeDutyCycle(100-B_val)
def loop():
while True:
for col in colors:
setColor(col)
time.sleep(1)
def destroy():
p_R.stop()
p_G.stop()
p_B.stop()
off()
GPIO.cleanup()
if __name__ == "__main__":
try:
setup(LedPinRed, LedPinGreen, LedPinBlue)
loop()
except KeyboardInterrupt:
destroy()
| gpl-2.0 |
plaes/numpy | numpy/lib/tests/test_ufunclike.py | 5 | 1776 | """
>>> import numpy.core as nx
>>> import numpy.lib.ufunclike as U
Test fix:
>>> a = nx.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]])
>>> U.fix(a)
array([[ 1., 1., 1., 1.],
[-1., -1., -1., -1.]])
>>> y = nx.zeros(a.shape, float)
>>> U.fix(a, y)
array([[ 1., 1., 1., 1.],
[-1., -1., -1., -1.]])
>>> y
array([[ 1., 1., 1., 1.],
[-1., -1., -1., -1.]])
Test isposinf, isneginf, sign
>>> a = nx.array([nx.Inf, -nx.Inf, nx.NaN, 0.0, 3.0, -3.0])
>>> U.isposinf(a)
array([ True, False, False, False, False, False], dtype=bool)
>>> U.isneginf(a)
array([False, True, False, False, False, False], dtype=bool)
>>> olderr = nx.seterr(invalid='ignore')
>>> nx.sign(a)
array([ 1., -1., NaN, 0., 1., -1.])
>>> olderr = nx.seterr(**olderr)
Same thing with an output array:
>>> y = nx.zeros(a.shape, bool)
>>> U.isposinf(a, y)
array([ True, False, False, False, False, False], dtype=bool)
>>> y
array([ True, False, False, False, False, False], dtype=bool)
>>> U.isneginf(a, y)
array([False, True, False, False, False, False], dtype=bool)
>>> y
array([False, True, False, False, False, False], dtype=bool)
>>> olderr = nx.seterr(invalid='ignore')
>>> nx.sign(a, y)
array([ True, True, True, False, True, True], dtype=bool)
>>> olderr = nx.seterr(**olderr)
>>> y
array([ True, True, True, False, True, True], dtype=bool)
Now log2:
>>> a = nx.array([4.5, 2.3, 6.5])
>>> U.log2(a)
array([ 2.169925 , 1.20163386, 2.70043972])
>>> 2**_
array([ 4.5, 2.3, 6.5])
>>> y = nx.zeros(a.shape, float)
>>> U.log2(a, y)
array([ 2.169925 , 1.20163386, 2.70043972])
>>> y
array([ 2.169925 , 1.20163386, 2.70043972])
"""
from numpy.testing import *
def test():
return rundocs()
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
meteorcloudy/tensorflow | tensorflow/python/kernel_tests/lrn_op_test.py | 138 | 5675 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for local response normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import nn
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class LRNOpTest(test.TestCase):
def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0, alpha=1.0,
beta=0.5):
"""Compute expected result."""
output = copy.deepcopy(input_image)
batch_size = input_image.shape[0]
rows = input_image.shape[1]
cols = input_image.shape[2]
depth = input_image.shape[3]
for b in range(batch_size):
for r in range(rows):
for c in range(cols):
for d in range(depth):
begin = max(0, d - lrn_depth_radius)
end = min(depth, d + lrn_depth_radius + 1)
patch = input_image[b, r, c, begin:end]
output[b, r, c, d] /= (
np.power(bias + alpha * np.sum(patch * patch), beta))
return output
def _RunAndVerify(self, dtype):
with self.test_session(use_gpu=True):
# random shape
shape = np.random.randint(1, 16, size=4)
# Make depth at least 2 to make it meaningful
shape[3] += 1
p = array_ops.placeholder(dtype, shape=shape)
# random depth_radius, bias, alpha, beta. cuDNN requires depth_radius to
# be in [1, 7].
lrn_depth_radius = np.random.randint(1, min(8, shape[3]))
bias = 1.0 + np.random.rand()
alpha = 2.0 * np.random.rand()
# cuDNN requires beta >= 0.01.
beta = 0.01 + 2.0 * np.random.rand()
lrn_t = nn.local_response_normalization(
p,
name="lrn",
depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
params = {p: np.random.rand(*shape).astype("f")}
result = lrn_t.eval(feed_dict=params)
expected = self._LRN(
params[p],
lrn_depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
err = np.amax(np.abs(result - expected))
print("LRN error for bias ", bias, "alpha ", alpha, " beta ", beta, " is ",
err)
if dtype == dtypes.float32:
self.assertTrue(err < 1e-4)
else:
self.assertTrue(err < 1e-2)
self.assertShapeEqual(expected, lrn_t)
def testCompute(self):
for _ in range(2):
self._RunAndVerify(dtypes.float32)
# Enable when LRN supports tf.float16 on GPU.
if not test.is_gpu_available():
self._RunAndVerify(dtypes.float16)
def testGradientsZeroInput(self):
with self.test_session(use_gpu=True):
shape = [4, 4, 4, 4]
p = array_ops.placeholder(dtypes.float32, shape=shape)
inp_array = np.zeros(shape).astype("f")
lrn_op = nn.local_response_normalization(p, 2, 1.0, 0.0, 1.0, name="lrn")
grad = gradients_impl.gradients([lrn_op], [p])[0]
params = {p: inp_array}
r = grad.eval(feed_dict=params)
expected = np.ones(shape).astype("f")
self.assertAllClose(r, expected)
self.assertShapeEqual(expected, grad)
def _RunAndVerifyGradients(self, dtype):
with self.test_session(use_gpu=True):
# random shape
shape = np.random.randint(1, 5, size=4)
# Make depth at least 2 to make it meaningful
shape[3] += 1
# random depth_radius, bias, alpha, beta. cuDNN requires depth_radius to
# be in [1, 7].
lrn_depth_radius = np.random.randint(1, min(8, shape[3]))
bias = 1.0 + np.random.rand()
alpha = 1.0 * np.random.rand()
# cuDNN requires beta >= 0.01.
beta = 0.01 + 1.0 * np.random.rand()
if dtype == dtypes.float32:
inp_array = np.random.rand(*shape).astype(np.float32)
else:
inp_array = np.random.rand(*shape).astype(np.float16)
inp = constant_op.constant(
list(inp_array.ravel(order="C")), shape=shape, dtype=dtype)
lrn_op = nn.local_response_normalization(
inp,
name="lrn",
depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
err = gradient_checker.compute_gradient_error(inp, shape, lrn_op, shape)
print("LRN Gradient error for bias ", bias, "alpha ", alpha, " beta ", beta,
" is ", err)
if dtype == dtypes.float32:
self.assertLess(err, 1e-4)
else:
self.assertLess(err, 1.0)
def testGradients(self):
for _ in range(2):
self._RunAndVerifyGradients(dtypes.float32)
# Enable when LRN supports tf.float16 on GPU.
if not test.is_gpu_available():
self._RunAndVerifyGradients(dtypes.float16)
if __name__ == "__main__":
test.main()
| apache-2.0 |
gautamkrishnar/hatter | venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.py | 310 | 4625 | from __future__ import absolute_import, division, unicode_literals
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
OrderedDict = dict
import gettext
_ = gettext.gettext
import re
from pip._vendor.six import text_type
from . import _base
from ..utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class TreeWalker(_base.NonRecursiveTreeWalker):
"""Given the particular ElementTree representation, this implementation,
to avoid using recursion, returns "nodes" as tuples with the following
content:
1. The current element
2. The index of the element relative to its parent
3. A stack of ancestor elements
4. A flag "text", "tail" or None to indicate if the current node is a
text node; either the text or tail of the current element (1)
"""
def getNodeDetails(self, node):
if isinstance(node, tuple): # It might be the root Element
elt, key, parents, flag = node
if flag in ("text", "tail"):
return _base.TEXT, getattr(elt, flag)
else:
node = elt
if not(hasattr(node, "tag")):
node = node.getroot()
if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"):
return (_base.DOCUMENT,)
elif node.tag == "<!DOCTYPE>":
return (_base.DOCTYPE, node.text,
node.get("publicId"), node.get("systemId"))
elif node.tag == ElementTreeCommentType:
return _base.COMMENT, node.text
else:
assert type(node.tag) == text_type, type(node.tag)
# This is assumed to be an ordinary element
match = tag_regexp.match(node.tag)
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = node.tag
attrs = OrderedDict()
for name, value in list(node.attrib.items()):
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, tag,
attrs, len(node) or node.text)
def getFirstChild(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
element, key, parents, flag = node, None, [], None
if flag in ("text", "tail"):
return None
else:
if element.text:
return element, key, parents, "text"
elif len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
def getNextSibling(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
else:
if element.tail and flag != "tail":
return element, key, parents, "tail"
elif key < len(parents[-1]) - 1:
return parents[-1][key + 1], key + 1, parents, None
else:
return None
def getParentNode(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if not parents:
return element
else:
return element, key, parents, None
else:
parent = parents.pop()
if not parents:
return parent
else:
return parent, list(parents[-1]).index(parent), parents, None
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
| gpl-3.0 |
jmartinm/invenio-master | modules/websubmit/lib/websubmitadmincli.py | 21 | 21304 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebSubmitAdmin CLI tool.
Usage websubmitadmin [options]
Options:
-v, --verbose Verbose level (0=min, 2=default, 3=max).
-h, --help Prints this help
-d, --dump=DOCTYPE Dump given DOCTYPE from database
-c, --clean={y|n} Create dump that includes lines to remove
submission from database before
insertion (`y', default) or not (`n'). Default 'y'
-n, --no-fail-insert Create dump that does not fail when inserting
duplicate rows
-f, --diff=DOCTYPE Diff given DOCTYPE from database with standard input
-i, --ignore={d|o|p} Ignore some differences (d=date, o=order, p=page). Use with --diff
-m, --method=METHOD Type of dumps: NAMES (default) or RELATIONS:
- NAMES: includes functions and elements (including
definitions) with a name starting with doctype,
even if not used by the submission. Might then miss
functions and elements (mostly ``generic'' ones) and
add some unwanted elements.
- RELATIONS: include all functions and elements used
by the submission. Might leave aside
elements that are defined, but not
used.
Dump submission:
Eg: websubmitadmin --dump=DEMOART > DEMOART_db_dump.sql
Dump submission including all used functions and elements definitions:
Eg: websubmitadmin --dump=DEMOART -m relations > DEMOART_db_dump.sql
Diff submission with given dump:
Eg: websubmitadmin --diff=DEMOART < DEMOART_db_dump.sql
Diff between latest version in 'master' branch of your Git repo, with
version in database:
Eg: git show master:websubmit/DEMOART_db_dump.sql | ../websubmitadmin --diff=DEMOART | less -S
Diff between CVS version and submission in database, ignoring dates
and ordering of submission fields on the page:
Eg: cvs update -p DEMOART_db_dump.sql | ./websubmitadmin -i d,o --diff=DEMOART | less -S
"""
__revision__ = "$Id$"
import os
import sys
import getopt
import difflib
import re
import time
import tempfile
from MySQLdb.converters import conversions
from MySQLdb import escape, escape_string
from invenio.config import CFG_PREFIX, CFG_TMPDIR
from invenio.dbquery import run_sql
from invenio.shellutils import run_shell_command
CFG_WEBSUBMIT_DUMPER_DEFAULT_METHOD = "NAMES"
CFG_WEBSUBMIT_DUMPER_DB_SCHEMA_VERSION = 1
def dump_submission(doctype, method=None, include_cleaning=True,
ignore_duplicate_insert=False):
"""Returns a .sql dump of submission with given doctype"""
def build_table_dump(table_name, rows_with_desc, ignore_duplicate_insert):
"Build a dump-like output from the given table and rows"
table_dump = ''
for row in rows_with_desc[0]:
table_dump += 'INSERT%s INTO %s VALUES (%s);\n' % \
(ignore_duplicate_insert and ' IGNORE' or '',
table_name,
','.join([escape(column, conversions) for column in row]))
return table_dump
if not method:
method = CFG_WEBSUBMIT_DUMPER_DEFAULT_METHOD
dump_header = "-- %s dump %s v%i\n" % (doctype,
time.strftime("%Y-%m-%d %H:%M:%S"),
CFG_WEBSUBMIT_DUMPER_DB_SCHEMA_VERSION)
if method == "NAMES":
dump_header += "-- Extra:NAMES (the following dump contains rows in sbmALLFUNCDESCR, sbmFUNDESC, sbmFIELD and sbmFIELDDESC tables which are not specific to this submission, but that include keyword %s)\n" % doctype
elif method == "RELATIONS":
dump_header += "-- Extra:RELATIONS (the following dump contains rows in sbmALLFUNCDESCR, sbmFUNDESC, sbmFIELD and sbmFIELDDESC tables that are not specific to doctype %s\n" % doctype
else:
dump_header += "-- Extra:None (the following dump only has rows specific to submission %s i.e. does not contains rows from sbmALLFUNCDESCR, sbmFUNDESC, sbmFIELD and sbmFIELDDESC tables\n" % doctype
if include_cleaning:
if method == 'NAMES':
dump_header += """
DELETE FROM sbmFUNDESC WHERE function LIKE '%(doctype)s%%';
DELETE FROM sbmFIELD WHERE subname LIKE '%%%(doctype)s';
DELETE FROM sbmFIELDDESC WHERE name LIKE '%(doctype)s%%';
DELETE FROM sbmALLFUNCDESCR WHERE function LIKE '%(doctype)s%%';
""" % {'doctype': escape_string(doctype)}
elif method == "RELATIONS":
dump_header += """
DELETE sbmALLFUNCDESCR.* FROM sbmALLFUNCDESCR, sbmFUNCTIONS WHERE sbmALLFUNCDESCR.function=sbmFUNCTIONS.function and sbmFUNCTIONS.doctype='%(doctype)s';
DELETE sbmFUNDESC.* FROM sbmFUNDESC, sbmFUNCTIONS WHERE sbmFUNDESC.function=sbmFUNCTIONS.function and sbmFUNCTIONS.doctype='%(doctype)s';
DELETE sbmFIELDDESC.* FROM sbmFIELDDESC, sbmFIELD, sbmIMPLEMENT WHERE sbmFIELD.fidesc=sbmFIELDDESC.name AND sbmFIELD.subname=sbmIMPLEMENT.subname AND sbmIMPLEMENT.docname='%(doctype)s';
DELETE sbmFIELD.* FROM sbmFIELD, sbmIMPLEMENT WHERE sbmFIELD.subname=sbmIMPLEMENT.subname AND sbmIMPLEMENT.docname='%(doctype)s';
""" % {'doctype': escape_string(doctype)}
dump_header += """DELETE FROM sbmDOCTYPE WHERE sdocname='%(doctype)s';
DELETE FROM sbmCATEGORIES WHERE doctype ='%(doctype)s';
DELETE FROM sbmFUNCTIONS WHERE doctype='%(doctype)s';
DELETE FROM sbmIMPLEMENT WHERE docname='%(doctype)s';
DELETE FROM sbmPARAMETERS WHERE doctype='%(doctype)s';
""" % {'doctype': escape_string(doctype)}
dump_output = ''
res = run_sql('SELECT * FROM sbmDOCTYPE WHERE sdocname=%s', (doctype,), with_desc=1)
dump_output += build_table_dump('sbmDOCTYPE', res, ignore_duplicate_insert)
res = run_sql('SELECT * FROM sbmCATEGORIES WHERE doctype=%s', (doctype,), with_desc=1)
dump_output += build_table_dump('sbmCATEGORIES', res, ignore_duplicate_insert)
# res = run_sql("SELECT * FROM sbmFIELD WHERE subname like '%%%s'" % (escape_string(doctype),), with_desc=1)
# dump_output += build_table_dump('sbmFIELD', res)
# res = run_sql("SELECT * FROM sbmFIELDDESC WHERE name like '%s%%'" % (escape_string(doctype),), with_desc=1)
# dump_output += build_table_dump('sbmFIELDDESC', res)
res = run_sql('SELECT * FROM sbmFUNCTIONS WHERE doctype=%s', (doctype,), with_desc=1)
dump_output += build_table_dump('sbmFUNCTIONS', res, ignore_duplicate_insert)
res = run_sql('SELECT * FROM sbmIMPLEMENT WHERE docname=%s', (doctype,), with_desc=1)
dump_output += build_table_dump('sbmIMPLEMENT', res, ignore_duplicate_insert)
res = run_sql('SELECT * FROM sbmPARAMETERS WHERE doctype=%s', (doctype,), with_desc=1)
dump_output += build_table_dump('sbmPARAMETERS', res, ignore_duplicate_insert)
if method == "NAMES":
res = run_sql("SELECT * FROM sbmALLFUNCDESCR WHERE function LIKE '%s%%'" % (escape_string(doctype),), with_desc=1)
dump_output += build_table_dump('sbmALLFUNCDESCR', res, ignore_duplicate_insert)
res = run_sql("SELECT * FROM sbmFUNDESC WHERE function LIKE '%s%%'" % (escape_string(doctype),), with_desc=1)
dump_output += build_table_dump('sbmFUNDESC', res, ignore_duplicate_insert)
res = run_sql("SELECT * FROM sbmFIELD WHERE subname LIKE '%%%s'" % (escape_string(doctype),), with_desc=1)
dump_output += build_table_dump('sbmFIELD', res, ignore_duplicate_insert)
res = run_sql("SELECT * FROM sbmFIELDDESC WHERE name LIKE '%s%%'" % (escape_string(doctype),), with_desc=1)
dump_output += build_table_dump('sbmFIELDDESC', res, ignore_duplicate_insert)
elif method == "RELATIONS":
res = run_sql("SELECT DISTINCT sbmALLFUNCDESCR.* FROM sbmALLFUNCDESCR, sbmFUNCTIONS WHERE sbmALLFUNCDESCR.function=sbmFUNCTIONS.function and sbmFUNCTIONS.doctype=%s", \
(doctype,), with_desc=1)
dump_output += build_table_dump('sbmALLFUNCDESCR', res, ignore_duplicate_insert)
res = run_sql("SELECT DISTINCT sbmFUNDESC.* FROM sbmFUNDESC, sbmFUNCTIONS WHERE sbmFUNDESC.function=sbmFUNCTIONS.function and sbmFUNCTIONS.doctype=%s", \
(doctype,), with_desc=1)
dump_output += build_table_dump('sbmFUNDESC', res, ignore_duplicate_insert)
res = run_sql("SELECT DISTINCT sbmFIELD.* FROM sbmFIELD, sbmIMPLEMENT WHERE sbmFIELD.subname=sbmIMPLEMENT.subname AND sbmIMPLEMENT.docname=%s", \
(doctype,), with_desc=1)
dump_output += build_table_dump('sbmFIELD', res, ignore_duplicate_insert)
# check:
res = run_sql("SELECT DISTINCT sbmFIELDDESC.* FROM sbmFIELDDESC, sbmFIELD, sbmIMPLEMENT WHERE sbmFIELD.fidesc=sbmFIELDDESC.name AND sbmFIELD.subname=sbmIMPLEMENT.subname AND sbmIMPLEMENT.docname=%s", \
(doctype,), with_desc=1)
#res = run_sql("SELECT DISTINCT sbmFIELDDESC.* FROM sbmFIELDDESC, sbmFIELD, sbmIMPLEMENT WHERE sbmFIELD.fidesc=sbmFIELDDESC.name AND sbmFIELDDESC.name=sbmIMPLEMENT.subname AND sbmIMPLEMENT.docname=%s", \
# (doctype,), with_desc=1)
dump_output += build_table_dump('sbmFIELDDESC', res, ignore_duplicate_insert)
# Sort
dump_output_lines = dump_output.splitlines()
dump_output_lines.sort()
return dump_header + '\n'.join(dump_output_lines)
def remove_submission(doctype, method=CFG_WEBSUBMIT_DUMPER_DEFAULT_METHOD):
"Remove submission from database"
# NOT TESTED
if method == "NAMES":
run_sql("DELETE FROM sbmFUNDESC WHERE function LIKE '%s%%'" % (doctype,))
run_sql("DELETE FROM sbmFIELD WHERE subname LIKE '%%%s'" % (doctype,))
run_sql("DELETE FROM sbmFIELDDESC WHERE name LIKE '%s%%'" % (doctype,))
run_sql("DELETE FROM sbmALLFUNCDESCR WHERE function LIKE '%s%%'" % (doctype,))
elif method == "RELATIONS":
run_sql("DELETE sbmALLFUNCDESCR.* FROM sbmALLFUNCDESCR, sbmFUNCTIONS WHERE sbmALLFUNCDESCR.function=sbmFUNCTIONS.function and sbmFUNCTIONS.doctype=%s", (doctype,))
run_sql("DELETE sbmFUNDESC.* FROM sbmFUNDESC, sbmFUNCTIONS WHERE sbmFUNDESC.function=sbmFUNCTIONS.function and sbmFUNCTIONS.doctype=%s", (doctype,))
run_sql("DELETE sbmFIELDDESC.* FROM sbmFIELDDESC, sbmFIELD, sbmIMPLEMENT WHERE sbmFIELD.fidesc=sbmFIELDDESC.name AND sbmFIELD.subname=sbmIMPLEMENT.subname AND sbmIMPLEMENT.docname=%s", (doctype,))
run_sql("DELETE sbmFIELD.* FROM sbmFIELD, sbmIMPLEMENT WHERE sbmFIELD.subname=sbmIMPLEMENT.subname AND sbmIMPLEMENT.docname=%s", (doctype,))
run_sql("DELETE FROM sbmDOCTYPE WHERE sdocname=%s", (doctype,))
run_sql("DELETE FROM sbmCATEGORIES WHERE doctype=%s", (doctype,))
run_sql("DELETE FROM sbmFUNCTIONS WHERE doctype=%s", (doctype,))
run_sql("DELETE FROM sbmIMPLEMENT WHERE docname=%s", (doctype,))
run_sql("DELETE FROM sbmPARAMETERS WHERE doctype=%s", (doctype,))
re_method_pattern = re.compile("-- Extra:(?P<method>\S*)\s")
def load_submission(doctype, dump, method=None):
"Insert submission into database. Return tuple(error code, msg)"
# NOT TESTED
messages = []
def guess_dump_method(dump):
"""Guess which method was used to dump this file (i.e. if it contains all the submission rows or not)"""
match_obj = re_method_pattern.search(dump)
if match_obj:
return match_obj.group('method')
else:
return None
def guess_dump_has_delete_statements(dump):
"""Guess if given submission dump already contain delete statements"""
return "DELETE FROM sbmDOCTYPE WHERE sdocname".lower() in dump.lower()
if not method:
method = guess_dump_method(dump)
if method is None:
method = CFG_WEBSUBMIT_DUMPER_DEFAULT_METHOD
messages.append("WARNING: method could not be guessed. Using method %s" % method)
else:
messages.append("Used method %s to load data" % method)
(dump_code, dump_path) = tempfile.mkstemp(prefix=doctype, dir=CFG_TMPDIR)
dump_fd = open(dump_path, 'w')
dump_fd.write(dump)
dump_fd.close()
# We need to remove the submission. But let's create a backup first.
submission_backup = dump_submission(doctype, method)
submission_backup_path = "%s_db_dump%s.sql" % (doctype, time.strftime("%Y%m%d_%H%M%S"))
fd = file(os.path.join(CFG_TMPDIR, submission_backup_path), "w")
fd.write(submission_backup)
fd.close()
if not guess_dump_has_delete_statements(dump):
remove_submission(doctype, method)
# Load the dump
(exit_code, out_msg, err_msg) = run_shell_command("%s/bin/dbexec < %s", (CFG_PREFIX, os.path.abspath(dump_path)))
if exit_code:
messages.append("ERROR: failed to load submission:" + err_msg)
return (1, messages)
messages.append("Submission loaded. Previous submission saved to %s" % os.path.join(CFG_TMPDIR, submission_backup_path))
return (0, messages)
def diff_submission(submission1_dump, submission2_dump, verbose=2,
ignore_dates=False, ignore_positions=False, ignore_pages=False):
"Output diff between submissions"
def clean_line(line, ignore_dates, ignore_positions, ignore_pages):
"Clean one line of the submission"
updated_line = line
if ignore_dates:
if line.startswith('INSERT INTO sbmFIELD VALUES'):
args = updated_line.split(",")
args[-3] = ''
args[-4] = ''
updated_line = ','.join(args)
elif line.startswith('INSERT INTO sbmFIELDDESC VALUES'):
args = updated_line.split(",")
args[-4] = ''
args[-5] = ''
updated_line = ','.join(args)
elif line.startswith('INSERT INTO sbmIMPLEMENT VALUES '):
args = updated_line.split(",")
args[-6] = ''
args[-7] = ''
updated_line = ','.join(args)
if ignore_positions:
if line.startswith('INSERT INTO sbmFIELD VALUES'):
args = updated_line.split(",")
args[2] = ''
updated_line = ','.join(args)
if ignore_pages:
if line.startswith('INSERT INTO sbmFIELD VALUES'):
args = updated_line.split(",")
args[1] = ''
updated_line = ','.join(args)
if line.startswith('INSERT INTO sbmIMPLEMENT VALUES '):
args = updated_line.split(",")
args[4] = ''
updated_line = ','.join(args)
return updated_line
file1 = [line.strip() for line in submission1_dump.splitlines() if line]
file2 = [line.strip() for line in submission2_dump.splitlines() if line]
file1 = [clean_line(line, ignore_dates, ignore_positions, ignore_pages) for line in file1]
file2 = [clean_line(line, ignore_dates, ignore_positions, ignore_pages) for line in file2]
file1.sort()
file2.sort()
d = difflib.Differ()
result = d.compare(file2, file1)
result = [line for line in result if not line.startswith(' ')]
if verbose > 1:
result = [line.rstrip().replace('? ', ' ', 1) for line in result]
else:
result = [line for line in result if not line.startswith('? ')]
return '\n'.join(result)
def usage(exitcode=1, msg=""):
"Print usage"
print __doc__
print msg
sys.exit(exitcode)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hv:i:d:l:f:r:m:c:n",
["help",
"verbose=",
"ignore=",
"dump=",
"load=",
"diff=",
"remove=",
"method=",
"clean=",
"no-fail-insert",
"yes-i-know"])
except getopt.GetoptError, err:
print err
usage(1)
_ignore_date = False
_ignore_position = False
_ignore_page = False
_doctype = None
_verbose = 2
_action = None
_method = None
_clean = True
_no_fail_insert = False
_yes_i_know = False
try:
for opt in opts:
if opt[0] in ["-h", "--help"]:
usage()
elif opt[0] in ["-v", "--verbose"]:
_verbose = opt[1]
elif opt[0] in ["-m", "--method"]:
_method = opt[1].upper()
if not _method in ["NAMES", "RELATIONS"]:
usage("Parameter --method must be 'NAMES' or 'RELATIONS'")
elif opt[0] in ["-c", "--clean"]:
_clean = opt[1].lower()
if not _clean in ["y", "n"]:
usage("Parameter --clean must be 'y' or 'n'")
_clean = _clean == 'y' and True or False
elif opt[0] in ["-n", "--no-fail-insert"]:
_no_fail_insert = True
elif opt[0] in ["--yes-i-know"]:
_yes_i_know = True
elif opt[0] in ["-i", "--ignore"]:
ignore = opt[1].split(',')
if 'd' in ignore:
_ignore_date = True
if 'p' in ignore:
_ignore_page = True
if 'o' in ignore:
_ignore_position = True
elif opt[0] in ["-d", "--dump"]:
if _action:
usage("Choose only one action among --dump, --load, --diff and --remove")
_action = 'dump'
_doctype = opt[1]
elif opt[0] in ["-l", "--load"]:
if _action:
usage("Choose only one action among --dump, --load, --diff and --remove")
_action = 'load'
_doctype = opt[1]
elif opt[0] in ["-f", "--diff"]:
if _action:
usage("Choose only one action among --dump, --load, --diff and --remove")
_action = 'diff'
_doctype = opt[1]
elif opt[0] in ["-r", "--remove"]:
if _action:
usage("Choose only one action among --dump, --load, --diff and --remove")
_action = 'remove'
_doctype = opt[1]
except StandardError, _exception:
print _exception
usage(1)
if not _action:
usage(1, 'You must specify an action among --dump, --load, --diff and --remove')
if not _doctype:
usage(1, 'You must specify a doctype')
if _action == 'dump':
print dump_submission(doctype=_doctype,
method=_method,
include_cleaning=_clean,
ignore_duplicate_insert=_no_fail_insert)
elif _action == 'load':
if _yes_i_know:
input_stream = sys.stdin.read()
(code, messages) = load_submission(doctype=_doctype, dump=input_stream, method=_method)
print '\n'.join(messages)
sys.exit(code)
else:
print "Loading submission dumps using this tool is experimental. Please use 'dbexec' instead, or run with '--yes-i-know' if you really want to proceed."
sys.exit(1)
elif _action == 'diff':
if not sys.stdin.isatty():
input_stream = sys.stdin.read()
dump1 = dump_submission(doctype=_doctype,
method=_method,
include_cleaning=_clean,
ignore_duplicate_insert=_no_fail_insert)
print diff_submission(dump1, input_stream, _verbose, _ignore_date, _ignore_position, _ignore_page)
elif _action == 'remove':
if not _method:
usage(1, 'You must specify option --method')
if _yes_i_know:
remove_submission(doctype=_doctype, method=_method)
else:
print "Removing submissions using this tool is experimental. Run with '--yes-i-know' if you really want to proceed."
sys.exit(1)
if __name__ == '__main__':
main()
| gpl-2.0 |
martonw/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py | 113 | 20678 | # Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi ([email protected]), University of Szeged
# Copyright (C) 2011 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import optparse
import os
import signal
import sys
import traceback
from webkitpy.common.host import Host
from webkitpy.layout_tests.controllers.manager import Manager
from webkitpy.port import configuration_options, platform_options
from webkitpy.layout_tests.views import buildbot_results
from webkitpy.layout_tests.views import printing
_log = logging.getLogger(__name__)
# This mirrors what the shell normally does.
INTERRUPTED_EXIT_STATUS = signal.SIGINT + 128
# This is a randomly chosen exit code that can be tested against to
# indicate that an unexpected exception occurred.
EXCEPTIONAL_EXIT_STATUS = 254
def main(argv, stdout, stderr):
options, args = parse_args(argv)
if options.platform and 'test' in options.platform:
# It's a bit lame to import mocks into real code, but this allows the user
# to run tests against the test platform interactively, which is useful for
# debugging test failures.
from webkitpy.common.host_mock import MockHost
host = MockHost()
else:
host = Host()
if options.lint_test_files:
from webkitpy.layout_tests.lint_test_expectations import lint
return lint(host, options, stderr)
try:
port = host.port_factory.get(options.platform, options)
except NotImplementedError, e:
# FIXME: is this the best way to handle unsupported port names?
print >> stderr, str(e)
return EXCEPTIONAL_EXIT_STATUS
try:
run_details = run(port, options, args, stderr)
if run_details.exit_code != -1:
bot_printer = buildbot_results.BuildBotPrinter(stdout, options.debug_rwt_logging)
bot_printer.print_results(run_details)
return run_details.exit_code
except KeyboardInterrupt:
return INTERRUPTED_EXIT_STATUS
except BaseException as e:
if isinstance(e, Exception):
print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e))
traceback.print_exc(file=stderr)
return EXCEPTIONAL_EXIT_STATUS
def parse_args(args):
option_group_definitions = []
option_group_definitions.append(("Platform options", platform_options()))
option_group_definitions.append(("Configuration options", configuration_options()))
option_group_definitions.append(("Printing Options", printing.print_options()))
option_group_definitions.append(("EFL-specific Options", [
optparse.make_option("--webprocess-cmd-prefix", type="string",
default=False, help="Prefix used when spawning the Web process (Debug mode only)"),
]))
option_group_definitions.append(("WebKit Options", [
optparse.make_option("--gc-between-tests", action="store_true", default=False,
help="Force garbage collection between each test"),
optparse.make_option("--complex-text", action="store_true", default=False,
help="Use the complex text code path for all text (Mac OS X and Windows only)"),
optparse.make_option("-l", "--leaks", action="store_true", default=False,
help="Enable leaks checking (Mac OS X only)"),
optparse.make_option("-g", "--guard-malloc", action="store_true", default=False,
help="Enable Guard Malloc (Mac OS X only)"),
optparse.make_option("--threaded", action="store_true", default=False,
help="Run a concurrent JavaScript thread with each test"),
optparse.make_option("--webkit-test-runner", "-2", action="store_true",
help="Use WebKitTestRunner rather than DumpRenderTree."),
# FIXME: We should merge this w/ --build-directory and only have one flag.
optparse.make_option("--root", action="store",
help="Path to a directory containing the executables needed to run tests."),
]))
option_group_definitions.append(("Results Options", [
optparse.make_option("-p", "--pixel", "--pixel-tests", action="store_true",
dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"),
optparse.make_option("--no-pixel", "--no-pixel-tests", action="store_false",
dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"),
optparse.make_option("--no-sample-on-timeout", action="store_false",
dest="sample_on_timeout", help="Don't run sample on timeout (Mac OS X only)"),
optparse.make_option("--no-ref-tests", action="store_true",
dest="no_ref_tests", help="Skip all ref tests"),
optparse.make_option("--tolerance",
help="Ignore image differences less than this percentage (some "
"ports may ignore this option)", type="float"),
optparse.make_option("--results-directory", help="Location of test results"),
optparse.make_option("--build-directory",
help="Path to the directory under which build files are kept (should not include configuration)"),
optparse.make_option("--add-platform-exceptions", action="store_true", default=False,
help="Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"),
optparse.make_option("--new-baseline", action="store_true",
default=False, help="Save generated results as new baselines "
"into the *most-specific-platform* directory, overwriting whatever's "
"already there. Equivalent to --reset-results --add-platform-exceptions"),
optparse.make_option("--reset-results", action="store_true",
default=False, help="Reset expectations to the "
"generated results in their existing location."),
optparse.make_option("--no-new-test-results", action="store_false",
dest="new_test_results", default=True,
help="Don't create new baselines when no expected results exist"),
#FIXME: we should support a comma separated list with --pixel-test-directory as well.
optparse.make_option("--pixel-test-directory", action="append", default=[], dest="pixel_test_directories",
help="A directory where it is allowed to execute tests as pixel tests. "
"Specify multiple times to add multiple directories. "
"This option implies --pixel-tests. If specified, only those tests "
"will be executed as pixel tests that are located in one of the "
"directories enumerated with the option. Some ports may ignore this "
"option while others can have a default value that can be overridden here."),
optparse.make_option("--skip-failing-tests", action="store_true",
default=False, help="Skip tests that are expected to fail. "
"Note: When using this option, you might miss new crashes "
"in these tests."),
optparse.make_option("--additional-drt-flag", action="append",
default=[], help="Additional command line flag to pass to DumpRenderTree "
"Specify multiple times to add multiple flags."),
optparse.make_option("--driver-name", type="string",
help="Alternative DumpRenderTree binary to use"),
optparse.make_option("--additional-platform-directory", action="append",
default=[], help="Additional directory where to look for test "
"baselines (will take precendence over platform baselines). "
"Specify multiple times to add multiple search path entries."),
optparse.make_option("--additional-expectations", action="append", default=[],
help="Path to a test_expectations file that will override previous expectations. "
"Specify multiple times for multiple sets of overrides."),
optparse.make_option("--compare-port", action="store", default=None,
help="Use the specified port's baselines first"),
optparse.make_option("--no-show-results", action="store_false",
default=True, dest="show_results",
help="Don't launch a browser with results after the tests "
"are done"),
optparse.make_option("--full-results-html", action="store_true",
default=False,
help="Show all failures in results.html, rather than only regressions"),
optparse.make_option("--clobber-old-results", action="store_true",
default=False, help="Clobbers test results from previous runs."),
optparse.make_option("--http", action="store_true", dest="http",
default=True, help="Run HTTP and WebSocket tests (default)"),
optparse.make_option("--no-http", action="store_false", dest="http",
help="Don't run HTTP and WebSocket tests"),
optparse.make_option("--ignore-metrics", action="store_true", dest="ignore_metrics",
default=False, help="Ignore rendering metrics related information from test "
"output, only compare the structure of the rendertree."),
optparse.make_option("--nocheck-sys-deps", action="store_true",
default=False,
help="Don't check the system dependencies (themes)"),
optparse.make_option("--nojava", action="store_true",
default=False,
help="Don't build java support files"),
]))
option_group_definitions.append(("Testing Options", [
optparse.make_option("--build", dest="build",
action="store_true", default=True,
help="Check to ensure the DumpRenderTree build is up-to-date "
"(default)."),
optparse.make_option("--no-build", dest="build",
action="store_false", help="Don't check to see if the "
"DumpRenderTree build is up-to-date."),
optparse.make_option("-n", "--dry-run", action="store_true",
default=False,
help="Do everything but actually run the tests or upload results."),
optparse.make_option("--wrapper",
help="wrapper command to insert before invocations of "
"DumpRenderTree; option is split on whitespace before "
"running. (Example: --wrapper='valgrind --smc-check=all')"),
optparse.make_option("-i", "--ignore-tests", action="append", default=[],
help="directories or test to ignore (may specify multiple times)"),
optparse.make_option("--test-list", action="append",
help="read list of tests to run from file", metavar="FILE"),
optparse.make_option("--skipped", action="store", default="default",
help=("control how tests marked SKIP are run. "
"'default' == Skip tests unless explicitly listed on the command line, "
"'ignore' == Run them anyway, "
"'only' == only run the SKIP tests, "
"'always' == always skip, even if listed on the command line.")),
optparse.make_option("--force", dest="skipped", action="store_const", const='ignore',
help="Run all tests, even those marked SKIP in the test list (same as --skipped=ignore)"),
optparse.make_option("--time-out-ms",
help="Set the timeout for each test"),
optparse.make_option("--order", action="store", default="natural",
help=("determine the order in which the test cases will be run. "
"'none' == use the order in which the tests were listed either in arguments or test list, "
"'natural' == use the natural order (default), "
"'random' == randomize the test order.")),
optparse.make_option("--run-chunk",
help=("Run a specified chunk (n:l), the nth of len l, "
"of the layout tests")),
optparse.make_option("--run-part", help=("Run a specified part (n:m), "
"the nth of m parts, of the layout tests")),
optparse.make_option("--batch-size",
help=("Run a the tests in batches (n), after every n tests, "
"DumpRenderTree is relaunched."), type="int", default=None),
optparse.make_option("--run-singly", action="store_true",
default=False, help="run a separate DumpRenderTree for each test (implies --verbose)"),
optparse.make_option("--child-processes",
help="Number of DumpRenderTrees to run in parallel."),
# FIXME: Display default number of child processes that will run.
optparse.make_option("-f", "--fully-parallel", action="store_true",
help="run all tests in parallel"),
optparse.make_option("--exit-after-n-failures", type="int", default=None,
help="Exit after the first N failures instead of running all "
"tests"),
optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int",
default=None, help="Exit after the first N crashes instead of "
"running all tests"),
optparse.make_option("--iterations", type="int", default=1, help="Number of times to run the set of tests (e.g. ABCABCABC)"),
optparse.make_option("--repeat-each", type="int", default=1, help="Number of times to run each test (e.g. AAABBBCCC)"),
optparse.make_option("--retry-failures", action="store_true",
default=True,
help="Re-try any tests that produce unexpected results (default)"),
optparse.make_option("--no-retry-failures", action="store_false",
dest="retry_failures",
help="Don't re-try any tests that produce unexpected results."),
optparse.make_option("--max-locked-shards", type="int", default=0,
help="Set the maximum number of locked shards"),
optparse.make_option("--additional-env-var", type="string", action="append", default=[],
help="Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"),
optparse.make_option("--profile", action="store_true",
help="Output per-test profile information."),
optparse.make_option("--profiler", action="store",
help="Output per-test profile information, using the specified profiler."),
]))
option_group_definitions.append(("Miscellaneous Options", [
optparse.make_option("--lint-test-files", action="store_true",
default=False, help=("Makes sure the test files parse for all "
"configurations. Does not run any tests.")),
]))
# FIXME: Move these into json_results_generator.py
option_group_definitions.append(("Result JSON Options", [
optparse.make_option("--master-name", help="The name of the buildbot master."),
optparse.make_option("--builder-name", default="",
help=("The name of the builder shown on the waterfall running "
"this script e.g. WebKit.")),
optparse.make_option("--build-name", default="DUMMY_BUILD_NAME",
help=("The name of the builder used in its path, e.g. "
"webkit-rel.")),
optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER",
help=("The build number of the builder running this script.")),
optparse.make_option("--test-results-server", default="",
help=("If specified, upload results json files to this appengine "
"server.")),
]))
option_parser = optparse.OptionParser()
for group_name, group_options in option_group_definitions:
option_group = optparse.OptionGroup(option_parser, group_name)
option_group.add_options(group_options)
option_parser.add_option_group(option_group)
return option_parser.parse_args(args)
def _set_up_derived_options(port, options):
"""Sets the options values that depend on other options values."""
if not options.child_processes:
options.child_processes = os.environ.get("WEBKIT_TEST_CHILD_PROCESSES",
str(port.default_child_processes()))
if not options.max_locked_shards:
options.max_locked_shards = int(os.environ.get("WEBKIT_TEST_MAX_LOCKED_SHARDS",
str(port.default_max_locked_shards())))
if not options.configuration:
options.configuration = port.default_configuration()
if options.pixel_tests is None:
options.pixel_tests = port.default_pixel_tests()
if not options.time_out_ms:
options.time_out_ms = str(port.default_timeout_ms())
options.slow_time_out_ms = str(5 * int(options.time_out_ms))
if options.additional_platform_directory:
additional_platform_directories = []
for path in options.additional_platform_directory:
additional_platform_directories.append(port.host.filesystem.abspath(path))
options.additional_platform_directory = additional_platform_directories
if not options.http and options.skipped in ('ignore', 'only'):
_log.warning("--force/--skipped=%s overrides --no-http." % (options.skipped))
options.http = True
if options.ignore_metrics and (options.new_baseline or options.reset_results):
_log.warning("--ignore-metrics has no effect with --new-baselines or with --reset-results")
if options.new_baseline:
options.reset_results = True
options.add_platform_exceptions = True
if options.pixel_test_directories:
options.pixel_tests = True
varified_dirs = set()
pixel_test_directories = options.pixel_test_directories
for directory in pixel_test_directories:
# FIXME: we should support specifying the directories all the ways we support it for additional
# arguments specifying which tests and directories to run. We should also move the logic for that
# to Port.
filesystem = port.host.filesystem
if not filesystem.isdir(filesystem.join(port.layout_tests_dir(), directory)):
_log.warning("'%s' was passed to --pixel-test-directories, which doesn't seem to be a directory" % str(directory))
else:
varified_dirs.add(directory)
options.pixel_test_directories = list(varified_dirs)
if options.run_singly:
options.verbose = True
def run(port, options, args, logging_stream):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG if options.debug_rwt_logging else logging.INFO)
try:
printer = printing.Printer(port, options, logging_stream, logger=logger)
_set_up_derived_options(port, options)
manager = Manager(port, options, printer)
printer.print_config(port.results_directory())
run_details = manager.run(args)
_log.debug("Testing completed, Exit status: %d" % run_details.exit_code)
return run_details
finally:
printer.cleanup()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:], sys.stdout, sys.stderr))
| bsd-3-clause |
seize-the-dave/XlsxWriter | xlsxwriter/test/comparison/test_chart_column03.py | 8 | 1586 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_column03.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column', 'subtype': 'percent_stacked'})
chart.axis_ids = [49388544, 69387008]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
dorotan/pythontraining | env/Lib/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.py | 353 | 14161 | """Module for supporting the lxml.etree library. The idea here is to use as much
of the native library as possible, without using fragile hacks like custom element
names that break between releases. The downside of this is that we cannot represent
all possible trees; specifically the following are known to cause problems:
Text or comments as siblings of the root element
Docypes with no name
When any of these things occur, we emit a DataLossWarning
"""
from __future__ import absolute_import, division, unicode_literals
# pylint:disable=protected-access
import warnings
import re
import sys
from . import base
from ..constants import DataLossWarning
from .. import constants
from . import etree as etree_builders
from .. import _ihatexml
import lxml.etree as etree
fullTree = True
tag_regexp = re.compile("{([^}]*)}(.*)")
comment_type = etree.Comment("asd").tag
class DocumentType(object):
def __init__(self, name, publicId, systemId):
self.name = name
self.publicId = publicId
self.systemId = systemId
class Document(object):
def __init__(self):
self._elementTree = None
self._childNodes = []
def appendChild(self, element):
self._elementTree.getroot().addnext(element._element)
def _getChildNodes(self):
return self._childNodes
childNodes = property(_getChildNodes)
def testSerializer(element):
rv = []
infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True)
def serializeElement(element, indent=0):
if not hasattr(element, "tag"):
if hasattr(element, "getroot"):
# Full tree case
rv.append("#document")
if element.docinfo.internalDTD:
if not (element.docinfo.public_id or
element.docinfo.system_url):
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
else:
dtd_str = """<!DOCTYPE %s "%s" "%s">""" % (
element.docinfo.root_name,
element.docinfo.public_id,
element.docinfo.system_url)
rv.append("|%s%s" % (' ' * (indent + 2), dtd_str))
next_element = element.getroot()
while next_element.getprevious() is not None:
next_element = next_element.getprevious()
while next_element is not None:
serializeElement(next_element, indent + 2)
next_element = next_element.getnext()
elif isinstance(element, str) or isinstance(element, bytes):
# Text in a fragment
assert isinstance(element, str) or sys.version_info[0] == 2
rv.append("|%s\"%s\"" % (' ' * indent, element))
else:
# Fragment case
rv.append("#document-fragment")
for next_element in element:
serializeElement(next_element, indent + 2)
elif element.tag == comment_type:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" % (' ' * indent, element.tail))
else:
assert isinstance(element, etree._Element)
nsmatch = etree_builders.tag_regexp.match(element.tag)
if nsmatch is not None:
ns = nsmatch.group(1)
tag = nsmatch.group(2)
prefix = constants.prefixes[ns]
rv.append("|%s<%s %s>" % (' ' * indent, prefix,
infosetFilter.fromXmlName(tag)))
else:
rv.append("|%s<%s>" % (' ' * indent,
infosetFilter.fromXmlName(element.tag)))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.items():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
name = infosetFilter.fromXmlName(name)
prefix = constants.prefixes[ns]
attr_string = "%s %s" % (prefix, name)
else:
attr_string = infosetFilter.fromXmlName(name)
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
if element.text:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
indent += 2
for child in element:
serializeElement(child, indent)
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
serializeElement(element, 0)
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
def serializeElement(element):
if not hasattr(element, "tag"):
if element.docinfo.internalDTD:
if element.docinfo.doctype:
dtd_str = element.docinfo.doctype
else:
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
rv.append(dtd_str)
serializeElement(element.getroot())
elif element.tag == comment_type:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (element.tag,))
else:
attr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if hasattr(element, "tail") and element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv)
class TreeBuilder(base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = None
commentClass = None
fragmentClass = Document
implementation = etree
def __init__(self, namespaceHTMLElements, fullTree=False):
builder = etree_builders.getETreeModule(etree, fullTree=fullTree)
infosetFilter = self.infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True)
self.namespaceHTMLElements = namespaceHTMLElements
class Attributes(dict):
def __init__(self, element, value=None):
if value is None:
value = {}
self._element = element
dict.__init__(self, value) # pylint:disable=non-parent-init-called
for key, value in self.items():
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
else:
name = infosetFilter.coerceAttribute(key)
self._element._element.attrib[name] = value
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
else:
name = infosetFilter.coerceAttribute(key)
self._element._element.attrib[name] = value
class Element(builder.Element):
def __init__(self, name, namespace):
name = infosetFilter.coerceElement(name)
builder.Element.__init__(self, name, namespace=namespace)
self._attributes = Attributes(self)
def _setName(self, name):
self._name = infosetFilter.coerceElement(name)
self._element.tag = self._getETreeTag(
self._name, self._namespace)
def _getName(self):
return infosetFilter.fromXmlName(self._name)
name = property(_getName, _setName)
def _getAttributes(self):
return self._attributes
def _setAttributes(self, attributes):
self._attributes = Attributes(self, attributes)
attributes = property(_getAttributes, _setAttributes)
def insertText(self, data, insertBefore=None):
data = infosetFilter.coerceCharacters(data)
builder.Element.insertText(self, data, insertBefore)
def appendChild(self, child):
builder.Element.appendChild(self, child)
class Comment(builder.Comment):
def __init__(self, data):
data = infosetFilter.coerceComment(data)
builder.Comment.__init__(self, data)
def _setData(self, data):
data = infosetFilter.coerceComment(data)
self._element.text = data
def _getData(self):
return self._element.text
data = property(_getData, _setData)
self.elementClass = Element
self.commentClass = Comment
# self.fragmentClass = builder.DocumentFragment
base.TreeBuilder.__init__(self, namespaceHTMLElements)
def reset(self):
base.TreeBuilder.reset(self)
self.insertComment = self.insertCommentInitial
self.initial_comments = []
self.doctype = None
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._elementTree
else:
return self.document._elementTree.getroot()
def getFragment(self):
fragment = []
element = self.openElements[0]._element
if element.text:
fragment.append(element.text)
fragment.extend(list(element))
if element.tail:
fragment.append(element.tail)
return fragment
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
if not name:
warnings.warn("lxml cannot represent empty doctype", DataLossWarning)
self.doctype = None
else:
coercedName = self.infosetFilter.coerceElement(name)
if coercedName != name:
warnings.warn("lxml cannot represent non-xml doctype", DataLossWarning)
doctype = self.doctypeClass(coercedName, publicId, systemId)
self.doctype = doctype
def insertCommentInitial(self, data, parent=None):
assert parent is None or parent is self.document
assert self.document._elementTree is None
self.initial_comments.append(data)
def insertCommentMain(self, data, parent=None):
if (parent == self.document and
self.document._elementTree.getroot()[-1].tag == comment_type):
warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning)
super(TreeBuilder, self).insertComment(data, parent)
def insertRoot(self, token):
"""Create the document root"""
# Because of the way libxml2 works, it doesn't seem to be possible to
# alter information like the doctype after the tree has been parsed.
# Therefore we need to use the built-in parser to create our initial
# tree, after which we can add elements like normal
docStr = ""
if self.doctype:
assert self.doctype.name
docStr += "<!DOCTYPE %s" % self.doctype.name
if (self.doctype.publicId is not None or
self.doctype.systemId is not None):
docStr += (' PUBLIC "%s" ' %
(self.infosetFilter.coercePubid(self.doctype.publicId or "")))
if self.doctype.systemId:
sysid = self.doctype.systemId
if sysid.find("'") >= 0 and sysid.find('"') >= 0:
warnings.warn("DOCTYPE system cannot contain single and double quotes", DataLossWarning)
sysid = sysid.replace("'", 'U00027')
if sysid.find("'") >= 0:
docStr += '"%s"' % sysid
else:
docStr += "'%s'" % sysid
else:
docStr += "''"
docStr += ">"
if self.doctype.name != token["name"]:
warnings.warn("lxml cannot represent doctype with a different name to the root element", DataLossWarning)
docStr += "<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>"
root = etree.fromstring(docStr)
# Append the initial comments:
for comment_token in self.initial_comments:
comment = self.commentClass(comment_token["data"])
root.addprevious(comment._element)
# Create the root document and add the ElementTree to it
self.document = self.documentClass()
self.document._elementTree = root.getroottree()
# Give the root element the right name
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s" % (namespace, name)
root.tag = etree_tag
# Add the root element to the internal child/open data structures
root_element = self.elementClass(name, namespace)
root_element._element = root
self.document._childNodes.append(root_element)
self.openElements.append(root_element)
# Reset to the default insert comment function
self.insertComment = self.insertCommentMain
| apache-2.0 |
huiyi1990/grab | grab/util/module.py | 12 | 2512 | import logging
import six
from grab.spider.base import Spider
from grab.spider.error import SpiderInternalError
from grab.util.config import build_root_config, build_spider_config
SPIDER_REGISTRY = {}
logger = logging.getLogger('grab.util.module')
def build_spider_registry(config):
SPIDER_REGISTRY.clear()
opt_modules = []
opt_modules = config['global'].get('spider_modules', [])
for path in opt_modules:
if ':' in path:
path, cls_name = path.split(':')
else:
cls_name = None
try:
mod = __import__(path, None, None, ['foo'])
except ImportError as ex:
if path not in six.text_type(ex):
logging.error('', exc_info=ex)
else:
for key in dir(mod):
if key == 'Spider':
continue
if cls_name is None or key == cls_name:
val = getattr(mod, key)
if isinstance(val, type) and issubclass(val, Spider):
if val.Meta.abstract:
pass
else:
spider_name = val.get_spider_name()
logger.debug(
'Module `%s`, found spider `%s` '
'with name `%s`' % (
path, val.__name__, spider_name))
if spider_name in SPIDER_REGISTRY:
mod = SPIDER_REGISTRY[spider_name].__module__
raise SpiderInternalError(
'There are two different spiders with '
'the same name "%s". '
'Modules: %s and %s' % (
spider_name, mod, val.__module__))
else:
SPIDER_REGISTRY[spider_name] = val
return SPIDER_REGISTRY
def load_spider_class(config, spider_name):
if not SPIDER_REGISTRY:
build_spider_registry(config)
if spider_name not in SPIDER_REGISTRY:
raise SpiderInternalError('Unknown spider: %s' % spider_name)
else:
return SPIDER_REGISTRY[spider_name]
def build_spider_instance(cls, settings_module, **kwargs):
root_config = build_root_config(settings_module)
spider_config = build_spider_config(cls, root_config)
return cls(config=spider_config)
| mit |
gameduell/duell | bin/mac/python2.7.9/lib/python2.7/encodings/iso8859_11.py | 593 | 12591 | """ Python Character Mapping Codec iso8859_11 generated from 'MAPPINGS/ISO8859/8859-11.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-11',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
u'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
u'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
u'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
u'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
u'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
u'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
u'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
u'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
u'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
u'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
u'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
u'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
u'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
u'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
u'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
u'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
u'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
u'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
u'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
u'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
u'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
u'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
u'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
u'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
u'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
u'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
u'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
u'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
u'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
u'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
u'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
u'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
u'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
u'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
u'\u0e24' # 0xC4 -> THAI CHARACTER RU
u'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
u'\u0e26' # 0xC6 -> THAI CHARACTER LU
u'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
u'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
u'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
u'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
u'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
u'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
u'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
u'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
u'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
u'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
u'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
u'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
u'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
u'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
u'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
u'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
u'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
u'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
u'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
u'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
u'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
u'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
u'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
u'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
u'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
u'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
u'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
u'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
u'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
u'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
u'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
u'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
u'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
u'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
u'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
u'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
u'\u0e50' # 0xF0 -> THAI DIGIT ZERO
u'\u0e51' # 0xF1 -> THAI DIGIT ONE
u'\u0e52' # 0xF2 -> THAI DIGIT TWO
u'\u0e53' # 0xF3 -> THAI DIGIT THREE
u'\u0e54' # 0xF4 -> THAI DIGIT FOUR
u'\u0e55' # 0xF5 -> THAI DIGIT FIVE
u'\u0e56' # 0xF6 -> THAI DIGIT SIX
u'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
u'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
u'\u0e59' # 0xF9 -> THAI DIGIT NINE
u'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
u'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-2-clause |
xiaoixa/python | ddkangfu/0003/0003.py | 40 | 1351 | #coding=utf-8
import uuid
import redis
"""
003, 将 0001 题生成的 200 个激活码(或者优惠券)保存到 **Redis** 非关系型数据库中.
"""
def get_redis_instance(host='localhost', port=6379):
return redis.StrictRedis(host=host, port=port)
def generate_activation_code(count):
code_list = []
for i in xrange(count):
code = str(uuid.uuid4()).replace('-', '').upper()
if not code in code_list:
code_list.append(code)
return code_list
def store_to_redise(codes):
if codes:
cache = get_redis_instance()
try:
cache.set('code:count', len(codes))
for i in xrange(len(codes)):
cache.set('code:{0}'.format(i), codes[i])
cache.save()
return True
except:
print 'Can not connect to redis server !!!'
return False
def print_activation_code():
cache = get_redis_instance()
try:
count = cache.get('code:count')
count = 0 if count is None else int(count)
for i in xrange(count):
print cache.get('code:%d'%i)
except:
print 'Can not connect to redis server !!!'
if __name__ == "__main__":
code_list = generate_activation_code(200)
if store_to_redise(code_list):
print_activation_code()
| mit |
cmoutard/mne-python | logo/generate_mne_logos.py | 12 | 6091 | # -*- coding: utf-8 -*-
"""
===============================================================================
Script 'mne logo'
===============================================================================
This script makes the logo for MNE.
"""
# @author: drmccloy
# Created on Mon Jul 20 11:28:16 2015
# License: BSD (3-clause)
import numpy as np
import os.path as op
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.mlab import bivariate_normal
from matplotlib.path import Path
from matplotlib.text import TextPath
from matplotlib.patches import PathPatch
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.transforms import Bbox
# manually set values
dpi = 72.
center_fudge = np.array([2, 0]) # compensate for font bounding box padding
tagline_scale_fudge = 0.98 # to get justification right
tagline_offset_fudge = np.array([0.4, 0])
static_dir = op.join('..', 'doc', '_static')
# font, etc
rcp = {'font.sans-serif': ['Primetime'], 'font.style': 'normal',
'font.weight': 'black', 'font.variant': 'normal', 'figure.dpi': dpi,
'savefig.dpi': dpi, 'contour.negative_linestyle': 'solid'}
plt.rcdefaults()
rcParams.update(rcp)
# initialize figure (no axes, margins, etc)
fig = plt.figure(1, figsize=(5, 3), frameon=False, dpi=dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# fake field data
delta = 0.1
x = np.arange(-8.0, 8.0, delta)
y = np.arange(-3.0, 3.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = bivariate_normal(X, Y, 8.0, 7.0, -5.0, 0.9, 1.0)
Z2 = bivariate_normal(X, Y, 15.0, 2.5, 2.6, -2.5, 2.5)
Z = Z2 - 0.7 * Z1
# color map: field gradient (yellow-red-transparent-blue-cyan)
yrtbc = {'red': ((0.0, 1.0, 1.0), (0.5, 1.0, 0.0), (1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.0), (0.5, 0.0, 1.0), (1.0, 1.0, 1.0)),
'green': ((0.0, 1.0, 1.0), (0.5, 0.0, 0.0), (1.0, 1.0, 1.0)),
'alpha': ((0.0, 1.0, 1.0), (0.4, 0.8, 0.8), (0.5, 0.2, 0.2),
(0.6, 0.8, 0.8), (1.0, 1.0, 1.0))}
# color map: field lines (red | blue)
redbl = {'red': ((0., 1., 1.), (0.5, 1., 0.), (1., 0., 0.)),
'blue': ((0., 0., 0.), (0.5, 0., 1.), (1., 1., 1.)),
'green': ((0., 0., 0.), (1., 0., 0.)),
'alpha': ((0., 0.4, 0.4), (1., 0.4, 0.4))}
mne_field_grad_cols = LinearSegmentedColormap('mne_grad', yrtbc)
mne_field_line_cols = LinearSegmentedColormap('mne_line', redbl)
# plot gradient and contour lines
im = plt.imshow(Z, cmap=mne_field_grad_cols, aspect='equal')
cs = plt.contour(Z, 9, cmap=mne_field_line_cols, linewidths=1)
plot_dims = np.r_[np.diff(ax.get_xbound()), np.diff(ax.get_ybound())]
# create MNE clipping mask
mne_path = TextPath((0, 0), 'MNE')
dims = mne_path.vertices.max(0) - mne_path.vertices.min(0)
vert = mne_path.vertices - dims / 2.
mult = (plot_dims / dims).min()
mult = [mult, -mult] # y axis is inverted (origin at top left)
offset = plot_dims / 2. - center_fudge
mne_clip = Path(offset + vert * mult, mne_path.codes)
# apply clipping mask to field gradient and lines
im.set_clip_path(mne_clip, transform=im.get_transform())
for coll in cs.collections:
coll.set_clip_path(mne_clip, transform=im.get_transform())
# get final position of clipping mask
mne_corners = mne_clip.get_extents().corners()
# add tagline
rcParams.update({'font.sans-serif': ['Cooper Hewitt'], 'font.weight': 'light'})
tag_path = TextPath((0, 0), 'MEG + EEG ANALYSIS & VISUALIZATION')
dims = tag_path.vertices.max(0) - tag_path.vertices.min(0)
vert = tag_path.vertices - dims / 2.
mult = tagline_scale_fudge * (plot_dims / dims).min()
mult = [mult, -mult] # y axis is inverted
offset = mne_corners[-1] - np.array([mne_clip.get_extents().size[0] / 2.,
-dims[1]]) - tagline_offset_fudge
tag_clip = Path(offset + vert * mult, tag_path.codes)
tag_patch = PathPatch(tag_clip, facecolor='k', edgecolor='none', zorder=10)
ax.add_patch(tag_patch)
yl = ax.get_ylim()
yy = np.max([tag_clip.vertices.max(0)[-1],
tag_clip.vertices.min(0)[-1]])
ax.set_ylim(np.ceil(yy), yl[-1])
# only save actual image extent plus a bit of padding
extent = Bbox(np.c_[ax.get_xlim(), ax.get_ylim()])
extent = extent.transformed(ax.transData + fig.dpi_scale_trans.inverted())
plt.draw()
plt.savefig(op.join(static_dir, 'mne_logo.png'),
bbox_inches=extent.expanded(1.2, 1.))
plt.close()
# 92x22 image
w_px = 92
h_px = 22
center_fudge = np.array([12, 0.5])
scale_fudge = 2.1
rcParams.update({'font.sans-serif': ['Primetime'], 'font.weight': 'black'})
x = np.linspace(-8., 8., w_px / 2.)
y = np.linspace(-3., 3., h_px / 2.)
X, Y = np.meshgrid(x, y)
# initialize figure (no axes, margins, etc)
fig = plt.figure(1, figsize=(w_px / dpi, h_px / dpi), frameon=False, dpi=dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# plot rainbow
im = plt.imshow(X, cmap=mne_field_grad_cols, aspect='equal')
plot_dims = np.r_[np.diff(ax.get_xbound()), np.diff(ax.get_ybound())]
# MNE text in white
mne_path = TextPath((0, 0), 'MNE')
dims = mne_path.vertices.max(0) - mne_path.vertices.min(0)
vert = mne_path.vertices - dims / 2.
mult = scale_fudge * (plot_dims / dims).min()
mult = [mult, -mult] # y axis is inverted (origin at top left)
offset = np.array([scale_fudge, 1.]) * \
np.array([-dims[0], plot_dims[-1]]) / 2. - center_fudge
mne_clip = Path(offset + vert * mult, mne_path.codes)
mne_patch = PathPatch(mne_clip, facecolor='w', edgecolor='none', zorder=10)
ax.add_patch(mne_patch)
# adjust xlim and ylim
mne_corners = mne_clip.get_extents().corners()
xmin, ymin = np.min(mne_corners, axis=0)
xmax, ymax = np.max(mne_corners, axis=0)
xl = ax.get_xlim()
yl = ax.get_ylim()
xpad = np.abs(np.diff([xmin, xl[1]])) / 20.
ypad = np.abs(np.diff([ymax, ymin])) / 20.
ax.set_xlim(xmin - xpad, xl[1] + xpad)
ax.set_ylim(ymax + ypad, ymin - ypad)
extent = Bbox(np.c_[ax.get_xlim(), ax.get_ylim()])
extent = extent.transformed(ax.transData + fig.dpi_scale_trans.inverted())
plt.draw()
plt.savefig(op.join(static_dir, 'mne_logo_small.png'), transparent=True,
bbox_inches=extent)
plt.close()
| bsd-3-clause |
JianyuWang/nova | nova/tests/unit/volume/encryptors/test_cryptsetup.py | 21 | 3918 | # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import array
import mock
import six
from nova import exception
from nova.keymgr import key
from nova.tests.unit.volume.encryptors import test_base
from nova.volume.encryptors import cryptsetup
def fake__get_key(context):
raw = array.array('B', ('0' * 64).decode('hex')).tolist()
symmetric_key = key.SymmetricKey('AES', raw)
return symmetric_key
class CryptsetupEncryptorTestCase(test_base.VolumeEncryptorTestCase):
def _create(self, connection_info):
return cryptsetup.CryptsetupEncryptor(connection_info)
def setUp(self):
super(CryptsetupEncryptorTestCase, self).setUp()
self.dev_path = self.connection_info['data']['device_path']
self.dev_name = self.dev_path.split('/')[-1]
self.symlink_path = self.dev_path
@mock.patch('nova.utils.execute')
def test__open_volume(self, mock_execute):
self.encryptor._open_volume("passphrase")
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'create', '--key-file=-', self.dev_name,
self.dev_path, process_input='passphrase',
run_as_root=True, check_exit_code=True),
])
self.assertEqual(1, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test_attach_volume(self, mock_execute):
self.encryptor._get_key = mock.MagicMock()
self.encryptor._get_key.return_value = fake__get_key(None)
self.encryptor.attach_volume(None)
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'create', '--key-file=-', self.dev_name,
self.dev_path, process_input='0' * 32,
run_as_root=True, check_exit_code=True),
mock.call('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
run_as_root=True, check_exit_code=True),
])
self.assertEqual(2, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test__close_volume(self, mock_execute):
self.encryptor.detach_volume()
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'remove', self.dev_name,
run_as_root=True, check_exit_code=True),
])
self.assertEqual(1, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test_detach_volume(self, mock_execute):
self.encryptor.detach_volume()
mock_execute.assert_has_calls([
mock.call('cryptsetup', 'remove', self.dev_name,
run_as_root=True, check_exit_code=True),
])
self.assertEqual(1, mock_execute.call_count)
def test_init_volume_encryption_not_supported(self):
# Tests that creating a CryptsetupEncryptor fails if there is no
# device_path key.
type = 'unencryptable'
data = dict(volume_id='a194699b-aa07-4433-a945-a5d23802043e')
connection_info = dict(driver_volume_type=type, data=data)
exc = self.assertRaises(exception.VolumeEncryptionNotSupported,
cryptsetup.CryptsetupEncryptor,
connection_info)
self.assertIn(type, six.text_type(exc))
| apache-2.0 |
Eric-Zhong/odoo | addons/website_membership/controllers/main.py | 115 | 9181 | # -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website.models.website import unslug
from openerp.tools.translate import _
import werkzeug.urls
class WebsiteMembership(http.Controller):
_references_per_page = 20
@http.route([
'/members',
'/members/page/<int:page>',
'/members/association/<membership_id>',
'/members/association/<membership_id>/page/<int:page>',
'/members/country/<int:country_id>',
'/members/country/<country_name>-<int:country_id>',
'/members/country/<int:country_id>/page/<int:page>',
'/members/country/<country_name>-<int:country_id>/page/<int:page>',
'/members/association/<membership_id>/country/<country_name>-<int:country_id>',
'/members/association/<membership_id>/country/<int:country_id>',
'/members/association/<membership_id>/country/<country_name>-<int:country_id>/page/<int:page>',
'/members/association/<membership_id>/country/<int:country_id>/page/<int:page>',
], type='http', auth="public", website=True)
def members(self, membership_id=None, country_name=None, country_id=0, page=1, **post):
cr, uid, context = request.cr, request.uid, request.context
product_obj = request.registry['product.product']
country_obj = request.registry['res.country']
membership_line_obj = request.registry['membership.membership_line']
partner_obj = request.registry['res.partner']
post_name = post.get('name', '')
current_country = None
# base domain for groupby / searches
base_line_domain = [("partner.website_published", "=", True), ('state', 'in', ['free', 'paid'])]
if membership_id and membership_id != 'free':
membership_id = int(membership_id)
base_line_domain.append(('membership_id', '=', membership_id))
membership = product_obj.browse(cr, uid, membership_id, context=context)
else:
membership = None
if post_name:
base_line_domain += ['|', ('partner.name', 'ilike', post_name),
('partner.website_description', 'ilike', post_name)]
# group by country, based on all customers (base domain)
if membership_id != 'free':
membership_line_ids = membership_line_obj.search(cr, SUPERUSER_ID, base_line_domain, context=context)
country_domain = [('member_lines', 'in', membership_line_ids)]
else:
membership_line_ids = []
country_domain = [('membership_state', '=', 'free')]
if post_name:
country_domain += ['|', ('name', 'ilike', post_name),
('website_description', 'ilike', post_name)]
countries = partner_obj.read_group(
cr, SUPERUSER_ID, country_domain + [("website_published", "=", True)], ["id", "country_id"],
groupby="country_id", orderby="country_id", context=request.context)
countries_total = sum(country_dict['country_id_count'] for country_dict in countries)
line_domain = list(base_line_domain)
if country_id:
line_domain.append(('partner.country_id', '=', country_id))
current_country = country_obj.read(cr, uid, country_id, ['id', 'name'], context)
if not any(x['country_id'][0] == country_id for x in countries if x['country_id']):
countries.append({
'country_id_count': 0,
'country_id': (country_id, current_country["name"])
})
countries = filter(lambda d:d['country_id'], countries)
countries.sort(key=lambda d: d['country_id'][1])
countries.insert(0, {
'country_id_count': countries_total,
'country_id': (0, _("All Countries"))
})
# format domain for group_by and memberships
membership_ids = product_obj.search(cr, uid, [('membership', '=', True)], order="website_sequence", context=context)
memberships = product_obj.browse(cr, uid, membership_ids, context=context)
# make sure we don't access to lines with unpublished membershipts
line_domain.append(('membership_id', 'in', membership_ids))
limit = self._references_per_page
offset = limit * (page - 1)
count_members = 0
membership_line_ids = []
# displayed non-free membership lines
if membership_id != 'free':
count_members = membership_line_obj.search_count(cr, SUPERUSER_ID, line_domain, context=context)
if offset <= count_members:
membership_line_ids = tuple(membership_line_obj.search(cr, SUPERUSER_ID, line_domain, offset, limit, context=context))
membership_lines = membership_line_obj.browse(cr, uid, membership_line_ids, context=context)
# TODO: Following line can be deleted in master. Kept for retrocompatibility.
membership_lines = sorted(membership_lines, key=lambda x: x.membership_id.website_sequence)
page_partner_ids = set(m.partner.id for m in membership_lines)
google_map_partner_ids = []
if request.env.ref('website_membership.opt_index_google_map').customize_show:
membership_lines_ids = membership_line_obj.search(cr, uid, line_domain, context=context)
google_map_partner_ids = membership_line_obj.get_published_companies(cr, uid, membership_line_ids, limit=2000, context=context)
search_domain = [('membership_state', '=', 'free'), ('website_published', '=', True)]
if post_name:
search_domain += ['|', ('name', 'ilike', post_name), ('website_description', 'ilike', post_name)]
if country_id:
search_domain += [('country_id', '=', country_id)]
free_partner_ids = partner_obj.search(cr, SUPERUSER_ID, search_domain, context=context)
memberships_data = []
for membership_record in memberships:
memberships_data.append({'id': membership_record.id, 'name': membership_record.name})
memberships_partner_ids = {}
for line in membership_lines:
memberships_partner_ids.setdefault(line.membership_id.id, []).append(line.partner.id)
if free_partner_ids:
memberships_data.append({'id': 'free', 'name': _('Free Members')})
if not membership_id or membership_id == 'free':
if count_members < offset + limit:
free_start = max(offset - count_members, 0)
free_end = max(offset + limit - count_members, 0)
memberships_partner_ids['free'] = free_partner_ids[free_start:free_end]
page_partner_ids |= set(memberships_partner_ids['free'])
google_map_partner_ids += free_partner_ids[:2000-len(google_map_partner_ids)]
count_members += len(free_partner_ids)
google_map_partner_ids = ",".join(map(str, google_map_partner_ids))
partners = { p.id: p for p in partner_obj.browse(request.cr, SUPERUSER_ID, list(page_partner_ids), request.context)}
base_url = '/members%s%s' % ('/association/%s' % membership_id if membership_id else '',
'/country/%s' % country_id if country_id else '')
# request pager for lines
pager = request.website.pager(url=base_url, total=count_members, page=page, step=limit, scope=7, url_args=post)
values = {
'partners': partners,
'membership_lines': membership_lines, # TODO: This line can be deleted in master. Kept for retrocompatibility.
'memberships': memberships, # TODO: This line too.
'membership': membership, # TODO: This line too.
'memberships_data': memberships_data,
'memberships_partner_ids': memberships_partner_ids,
'membership_id': membership_id,
'countries': countries,
'current_country': current_country and [current_country['id'], current_country['name']] or None,
'current_country_id': current_country and current_country['id'] or 0,
'google_map_partner_ids': google_map_partner_ids,
'pager': pager,
'post': post,
'search': "?%s" % werkzeug.url_encode(post),
}
return request.website.render("website_membership.index", values)
# Do not use semantic controller due to SUPERUSER_ID
@http.route(['/members/<partner_id>'], type='http', auth="public", website=True)
def partners_detail(self, partner_id, **post):
_, partner_id = unslug(partner_id)
if partner_id:
partner = request.registry['res.partner'].browse(request.cr, SUPERUSER_ID, partner_id, context=request.context)
if partner.exists() and partner.website_published:
values = {}
values['main_object'] = values['partner'] = partner
return request.website.render("website_membership.partner", values)
return self.members(**post)
| agpl-3.0 |
varunarya10/nova_test_latest | nova/tests/unit/virt/hyperv/test_networkutilsv2.py | 68 | 1724 | # Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.tests.unit.virt.hyperv import test_networkutils
from nova.virt.hyperv import networkutilsv2
class NetworkUtilsV2TestCase(test_networkutils.NetworkUtilsTestCase):
"""Unit tests for the Hyper-V NetworkUtilsV2 class."""
_MSVM_VIRTUAL_SWITCH = 'Msvm_VirtualEthernetSwitch'
def setUp(self):
super(NetworkUtilsV2TestCase, self).setUp()
self._networkutils = networkutilsv2.NetworkUtilsV2()
self._networkutils._conn = mock.MagicMock()
def _prepare_external_port(self, mock_vswitch, mock_ext_port):
mock_lep = mock_ext_port.associators()[0]
mock_lep1 = mock_lep.associators()[0]
mock_esw = mock_lep1.associators()[0]
mock_esw.associators.return_value = [mock_vswitch]
def test_create_vswitch_port(self):
self.assertRaises(
NotImplementedError,
self._networkutils.create_vswitch_port,
mock.sentinel.FAKE_VSWITCH_PATH,
mock.sentinel.FAKE_PORT_NAME)
def test_vswitch_port_needed(self):
self.assertFalse(self._networkutils.vswitch_port_needed())
| apache-2.0 |
cad-lab/blog | plugin/liquid_tags/b64img.py | 312 | 3085 | """
Image Tag
---------
This implements a Liquid-style image tag for Pelican,
based on the liquid img tag which is based on the octopress image tag [1]_
Syntax
------
{% b64img [class name(s)] [http[s]:/]/path/to/image [width [height]] [title text | "title text" ["alt text"]] %}
Examples
--------
{% b64img /images/ninja.png Ninja Attack! %}
{% b64img left half http://site.com/images/ninja.png Ninja Attack! %}
{% b64img left half http://site.com/images/ninja.png 150 150 "Ninja Attack!" "Ninja in attack posture" %}
Output
------
<img src="data:;base64,....">
<img class="left half" src="data:;base64,..." title="Ninja Attack!" alt="Ninja Attack!">
<img class="left half" src="data:;base64,..." width="150" height="150" title="Ninja Attack!" alt="Ninja in attack posture">
[1] https://github.com/imathis/octopress/blob/master/plugins/image_tag.rb
"""
import re
import base64
import urllib2
from .mdx_liquid_tags import LiquidTags
import six
SYNTAX = '{% b64img [class name(s)] [http[s]:/]/path/to/image [width [height]] [title text | "title text" ["alt text"]] %}'
# Regular expression to match the entire syntax
ReImg = re.compile("""(?P<class>\S.*\s+)?(?P<src>(?:https?:\/\/|\/|\S+\/)\S+)(?:\s+(?P<width>\d+))?(?:\s+(?P<height>\d+))?(?P<title>\s+.+)?""")
# Regular expression to split the title and alt text
ReTitleAlt = re.compile("""(?:"|')(?P<title>[^"']+)?(?:"|')\s+(?:"|')(?P<alt>[^"']+)?(?:"|')""")
def _get_file(src):
""" Return content from local or remote file. """
try:
if '://' in src or src[0:2] == '//': # Most likely this is remote file
response = urllib2.urlopen(src)
return response.read()
else:
with open(src, 'rb') as fh:
return fh.read()
except Exception as e:
raise RuntimeError('Error generating base64image: {}'.format(e))
def base64image(src):
""" Generate base64 encoded image from srouce file. """
return base64.b64encode(_get_file(src))
@LiquidTags.register('b64img')
def b64img(preprocessor, tag, markup):
attrs = None
# Parse the markup string
match = ReImg.search(markup)
if match:
attrs = dict([(key, val.strip())
for (key, val) in six.iteritems(match.groupdict()) if val])
else:
raise ValueError('Error processing input. '
'Expected syntax: {0}'.format(SYNTAX))
# Check if alt text is present -- if so, split it from title
if 'title' in attrs:
match = ReTitleAlt.search(attrs['title'])
if match:
attrs.update(match.groupdict())
if not attrs.get('alt'):
attrs['alt'] = attrs['title']
attrs['src'] = 'data:;base64,{}'.format(base64image(attrs['src']))
# Return the formatted text
return "<img {0}>".format(' '.join('{0}="{1}"'.format(key, val)
for (key, val) in six.iteritems(attrs)))
#----------------------------------------------------------------------
# This import allows image tag to be a Pelican plugin
from .liquid_tags import register
| agpl-3.0 |
giorgiop/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 13 | 26241 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_BOOLEAN_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
from sklearn.exceptions import DataConversionWarning
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses scikit-learn metric, cityblock (function) is
# scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# The string "cosine" uses sklearn.metric,
# while the function cosine is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unknown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# test that we convert to boolean arrays for boolean distances
rng = np.random.RandomState(0)
X = rng.randn(5, 4)
Y = X.copy()
Y[0, 0] = 1 - Y[0, 0]
for metric in PAIRWISE_BOOLEAN_FUNCTIONS:
for Z in [Y, None]:
res = pairwise_distances(X, Z, metric=metric)
res[np.isnan(res)] = 0
assert_true(np.sum(res != 0) == 0)
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1.]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1.]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean scikit-learn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
frdb194/django | django/apps/config.py | 224 | 8284 | import os
from importlib import import_module
from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured
from django.utils._os import upath
from django.utils.module_loading import module_has_submodule
MODELS_MODULE_NAME = 'models'
class AppConfig(object):
"""
Class representing a Django application and its configuration.
"""
def __init__(self, app_name, app_module):
# Full Python path to the application eg. 'django.contrib.admin'.
self.name = app_name
# Root module for the application eg. <module 'django.contrib.admin'
# from 'django/contrib/admin/__init__.pyc'>.
self.module = app_module
# The following attributes could be defined at the class level in a
# subclass, hence the test-and-set pattern.
# Last component of the Python path to the application eg. 'admin'.
# This value must be unique across a Django project.
if not hasattr(self, 'label'):
self.label = app_name.rpartition(".")[2]
# Human-readable name for the application eg. "Admin".
if not hasattr(self, 'verbose_name'):
self.verbose_name = self.label.title()
# Filesystem path to the application directory eg.
# u'/usr/lib/python2.7/dist-packages/django/contrib/admin'. Unicode on
# Python 2 and a str on Python 3.
if not hasattr(self, 'path'):
self.path = self._path_from_module(app_module)
# Module containing models eg. <module 'django.contrib.admin.models'
# from 'django/contrib/admin/models.pyc'>. Set by import_models().
# None if the application doesn't have a models module.
self.models_module = None
# Mapping of lower case model names to model classes. Initially set to
# None to prevent accidental access before import_models() runs.
self.models = None
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.label)
def _path_from_module(self, module):
"""Attempt to determine app's filesystem path from its module."""
# See #21874 for extended discussion of the behavior of this method in
# various cases.
# Convert paths to list because Python 3's _NamespacePath does not
# support indexing.
paths = list(getattr(module, '__path__', []))
if len(paths) != 1:
filename = getattr(module, '__file__', None)
if filename is not None:
paths = [os.path.dirname(filename)]
else:
# For unknown reasons, sometimes the list returned by __path__
# contains duplicates that must be removed (#25246).
paths = list(set(paths))
if len(paths) > 1:
raise ImproperlyConfigured(
"The app module %r has multiple filesystem locations (%r); "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module, paths))
elif not paths:
raise ImproperlyConfigured(
"The app module %r has no filesystem location, "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module,))
return upath(paths[0])
@classmethod
def create(cls, entry):
"""
Factory that creates an app config from an entry in INSTALLED_APPS.
"""
try:
# If import_module succeeds, entry is a path to an app module,
# which may specify an app config class with default_app_config.
# Otherwise, entry is a path to an app config class or an error.
module = import_module(entry)
except ImportError:
# Track that importing as an app module failed. If importing as an
# app config class fails too, we'll trigger the ImportError again.
module = None
mod_path, _, cls_name = entry.rpartition('.')
# Raise the original exception when entry cannot be a path to an
# app config class.
if not mod_path:
raise
else:
try:
# If this works, the app module specifies an app config class.
entry = module.default_app_config
except AttributeError:
# Otherwise, it simply uses the default app config class.
return cls(entry, module)
else:
mod_path, _, cls_name = entry.rpartition('.')
# If we're reaching this point, we must attempt to load the app config
# class located at <mod_path>.<cls_name>
mod = import_module(mod_path)
try:
cls = getattr(mod, cls_name)
except AttributeError:
if module is None:
# If importing as an app module failed, that error probably
# contains the most informative traceback. Trigger it again.
import_module(entry)
else:
raise
# Check for obvious errors. (This check prevents duck typing, but
# it could be removed if it became a problem in practice.)
if not issubclass(cls, AppConfig):
raise ImproperlyConfigured(
"'%s' isn't a subclass of AppConfig." % entry)
# Obtain app name here rather than in AppClass.__init__ to keep
# all error checking for entries in INSTALLED_APPS in one place.
try:
app_name = cls.name
except AttributeError:
raise ImproperlyConfigured(
"'%s' must supply a name attribute." % entry)
# Ensure app_name points to a valid module.
app_module = import_module(app_name)
# Entry is a path to an app config class.
return cls(app_name, app_module)
def check_models_ready(self):
"""
Raises an exception if models haven't been imported yet.
"""
if self.models is None:
raise AppRegistryNotReady(
"Models for app '%s' haven't been imported yet." % self.label)
def get_model(self, model_name):
"""
Returns the model with the given case-insensitive model_name.
Raises LookupError if no model exists with this name.
"""
self.check_models_ready()
try:
return self.models[model_name.lower()]
except KeyError:
raise LookupError(
"App '%s' doesn't have a '%s' model." % (self.label, model_name))
def get_models(self, include_auto_created=False,
include_deferred=False, include_swapped=False):
"""
Returns an iterable of models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models created to satisfy deferred attribute queries,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
Keyword arguments aren't documented; they're a private API.
"""
self.check_models_ready()
for model in self.models.values():
if model._deferred and not include_deferred:
continue
if model._meta.auto_created and not include_auto_created:
continue
if model._meta.swapped and not include_swapped:
continue
yield model
def import_models(self, all_models):
# Dictionary of models for this app, primarily maintained in the
# 'all_models' attribute of the Apps this AppConfig is attached to.
# Injected as a parameter because it gets populated when models are
# imported, which might happen before populate() imports models.
self.models = all_models
if module_has_submodule(self.module, MODELS_MODULE_NAME):
models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME)
self.models_module = import_module(models_module_name)
def ready(self):
"""
Override this method in subclasses to run code when Django starts.
"""
| bsd-3-clause |
joongh/robotframework | utest/writer/test_filewriters.py | 7 | 2607 | import unittest
from robot.parsing import TestCaseFile
from robot.parsing.model import TestCaseTable
from robot.utils import ET, ETSource, StringIO
from robot.utils.asserts import assert_equal
def create_test_case_file():
data = TestCaseFile(source='foo.txt')
table = TestCaseTable(data)
data.testcase_table = table
table.set_header(['test case', 'some', 'and other'])
test = table.add('A test')
test.add_step(['A kw', 'an arg'])
return data
class _WriterTestCase(unittest.TestCase):
def _test_rows_are_not_split_if_there_are_headers(self, format='txt'):
output = self._add_long_step_and_save(format)
assert_equal(len(output.splitlines()), 3)
def _add_long_step_and_save(self, format):
data = create_test_case_file()
data.testcase_table.tests[0].add_step(['A kw', '1', '2', '3', '4', '6', '7', '8'])
output = StringIO()
data.save(format=format, output=output)
return output.getvalue().strip()
class TestSpaceSeparatedWriter(_WriterTestCase):
def test_end_of_line_whitespace_is_removed(self):
output = StringIO()
create_test_case_file().save(output=output)
expected = '''\
*** test case *** some and other
A test A kw an arg
'''
assert_equal(output.getvalue(), expected)
def test_rows_are_not_split_if_there_are_headers(self):
self._test_rows_are_not_split_if_there_are_headers()
def test_configuring_number_of_separating_spaces(self):
output = StringIO()
create_test_case_file().save(output=output, txt_separating_spaces=8)
expected = '''\
*** test case *** some and other
A test A kw an arg
'''
assert_equal(output.getvalue(), expected)
class TestTsvWriter(_WriterTestCase):
def test_rows_are_not_split_if_there_are_headers(self):
try:
import csv
except ImportError:
pass # csv not available on IronPython 2.7
else:
self._test_rows_are_not_split_if_there_are_headers('tsv')
class TestHtmlWriter(_WriterTestCase):
def test_rows_are_not_split_if_there_are_headers(self):
output = self._add_long_step_and_save('html')
with ETSource('\n'.join(output.splitlines()[1:])) as source:
tree = ET.parse(source)
lines = tree.findall('body/table/tr')
assert_equal(len(lines), 3)
for l in lines:
cols = l.findall('td') or l.findall('th')
assert_equal(len(cols), 9)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
treejames/shaka-player | third_party/gjslint/python-gflags-2.0/tests/flags_modules_for_testing/module_foo.py | 139 | 5150 | #!/usr/bin/env python
#
# Copyright (c) 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Auxiliary module for testing gflags.py.
The purpose of this module is to define a few flags, and declare some
other flags as being important. We want to make sure the unit tests
for gflags.py involve more than one module.
"""
__author__ = '[email protected] (Alex Salcianu)'
__pychecker__ = 'no-local' # for unittest
import gflags
from flags_modules_for_testing import module_bar
FLAGS = gflags.FLAGS
DECLARED_KEY_FLAGS = ['tmod_bar_x', 'tmod_bar_z', 'tmod_bar_t',
# Special (not user-defined) flag:
'flagfile']
def DefineFlags(flag_values=FLAGS):
"""Defines a few flags."""
module_bar.DefineFlags(flag_values=flag_values)
# The 'tmod_foo_' prefix (short for 'test_module_foo') ensures that we
# have no name clash with existing flags.
gflags.DEFINE_boolean('tmod_foo_bool', True, 'Boolean flag from module foo.',
flag_values=flag_values)
gflags.DEFINE_string('tmod_foo_str', 'default', 'String flag.',
flag_values=flag_values)
gflags.DEFINE_integer('tmod_foo_int', 3, 'Sample int flag.',
flag_values=flag_values)
def DeclareKeyFlags(flag_values=FLAGS):
"""Declares a few key flags."""
for flag_name in DECLARED_KEY_FLAGS:
gflags.DECLARE_key_flag(flag_name, flag_values=flag_values)
def DeclareExtraKeyFlags(flag_values=FLAGS):
"""Declares some extra key flags."""
gflags.ADOPT_module_key_flags(module_bar, flag_values=flag_values)
def NamesOfDefinedFlags():
"""Returns: list of names of flags defined by this module."""
return ['tmod_foo_bool', 'tmod_foo_str', 'tmod_foo_int']
def NamesOfDeclaredKeyFlags():
"""Returns: list of names of key flags for this module."""
return NamesOfDefinedFlags() + DECLARED_KEY_FLAGS
def NamesOfDeclaredExtraKeyFlags():
"""Returns the list of names of additional key flags for this module.
These are the flags that became key for this module only as a result
of a call to DeclareExtraKeyFlags() above. I.e., the flags declared
by module_bar, that were not already declared as key for this
module.
Returns:
The list of names of additional key flags for this module.
"""
names_of_extra_key_flags = list(module_bar.NamesOfDefinedFlags())
for flag_name in NamesOfDeclaredKeyFlags():
while flag_name in names_of_extra_key_flags:
names_of_extra_key_flags.remove(flag_name)
return names_of_extra_key_flags
def RemoveFlags(flag_values=FLAGS):
"""Deletes the flag definitions done by the above DefineFlags()."""
for flag_name in NamesOfDefinedFlags():
module_bar.RemoveOneFlag(flag_name, flag_values=flag_values)
module_bar.RemoveFlags(flag_values=flag_values)
def GetModuleName():
"""Uses gflags._GetCallingModule() to return the name of this module.
For checking that _GetCallingModule works as expected.
Returns:
A string, the name of this module.
"""
# Calling the protected _GetCallingModule generates a lint warning,
# but we do not have any other alternative to test that function.
return gflags._GetCallingModule()
def DuplicateFlags(flagnames=None):
"""Returns a new FlagValues object with the requested flagnames.
Used to test DuplicateFlagError detection.
Args:
flagnames: str, A list of flag names to create.
Returns:
A FlagValues object with one boolean flag for each name in flagnames.
"""
flag_values = gflags.FlagValues()
for name in flagnames:
gflags.DEFINE_boolean(name, False, 'Flag named %s' % (name,),
flag_values=flag_values)
return flag_values
| apache-2.0 |
hachreak/invenio-records | setup.py | 1 | 4671 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Invenio-Records is a metadata storage module."""
import os
from setuptools import find_packages, setup
readme = open('README.rst').read()
history = open('CHANGES.rst').read()
tests_require = [
'check-manifest>=0.25',
'coverage>=4.0',
'isort>=4.2.2',
'mock>=1.0.0',
'pydocstyle>=1.0.0',
'pytest-cache>=1.0',
'pytest-cov>=1.8.0',
'pytest-pep8>=1.0.6',
'pytest>=2.8.0',
]
extras_require = {
'pidstore': [
'invenio-pidstore>=1.0.0b1',
],
'docs': [
'Sphinx>=1.5.1',
],
'mysql': [
'invenio-db[mysql,versioning]>=1.0.0b3',
],
'postgresql': [
'invenio-db[postgresql,versioning]>=1.0.0b3',
],
'sqlite': [
'invenio-db[versioning]>=1.0.0b3',
],
'admin': [
'Flask-Admin>=1.3.0',
],
'tests': tests_require,
}
extras_require['all'] = []
for name, reqs in extras_require.items():
if name in ('mysql', 'postgresql', 'sqlite'):
continue
extras_require['all'].extend(reqs)
setup_requires = [
'Babel>=1.3',
'pytest-runner>=2.6.2',
]
install_requires = [
'blinker>=1.4',
'flask-celeryext>=0.2.2',
'Flask>=0.11.1',
'jsonpatch>=1.11',
'jsonresolver>=0.1.0',
'jsonref>=0.1',
'jsonschema>=2.5.1',
'sqlalchemy-utils>=0.31.0',
]
packages = find_packages()
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join('invenio_records', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
setup(
name='invenio-records',
version=version,
description=__doc__,
long_description=readme + '\n\n' + history,
keywords='invenio metadata',
license='GPLv2',
author='CERN',
author_email='[email protected]',
url='https://github.com/inveniosoftware/invenio-records',
packages=packages,
zip_safe=False,
include_package_data=True,
platforms='any',
entry_points={
'flask.commands': [
'records = invenio_records.cli:records',
],
'invenio_admin.views': [
'invenio_records = invenio_records.admin:record_adminview',
],
'invenio_base.apps': [
'invenio_records = invenio_records:InvenioRecords',
],
'invenio_base.api_apps': [
'invenio_records = invenio_records:InvenioRecords',
],
'invenio_celery.tasks': [
'invenio_records = invenio_records.tasks.api',
],
'invenio_db.alembic': [
'invenio_records = invenio_records:alembic',
],
'invenio_db.models': [
'invenio_records = invenio_records.models',
],
'invenio_i18n.translations': [
'messages = invenio_records',
],
},
extras_require=extras_require,
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Development Status :: 4 - Beta',
],
)
| gpl-2.0 |
luofei98/qgis | python/plugins/processing/gui/ProcessingToolbox.py | 1 | 16100 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ProcessingToolbox.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.utils import iface
from processing.modeler.ModelerUtils import ModelerUtils
from processing.core.Processing import Processing
from processing.core.ProcessingLog import ProcessingLog
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.gui.MissingDependencyDialog import MissingDependencyDialog
from processing.gui.AlgorithmClassification import AlgorithmDecorator
from processing.gui.ParametersDialog import ParametersDialog
from processing.gui.BatchProcessingDialog import BatchProcessingDialog
from processing.gui.EditRenderingStylesDialog import EditRenderingStylesDialog
from processing.ui.ui_ProcessingToolbox import Ui_ProcessingToolbox
class ProcessingToolbox(QDockWidget, Ui_ProcessingToolbox):
USE_CATEGORIES = '/Processing/UseSimplifiedInterface'
def __init__(self):
QDockWidget.__init__(self, None)
self.setupUi(self)
self.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea)
self.modeComboBox.clear()
self.modeComboBox.addItems(['Simplified interface',
'Advanced interface'])
settings = QSettings()
if not settings.contains(self.USE_CATEGORIES):
settings.setValue(self.USE_CATEGORIES, True)
useCategories = settings.value(self.USE_CATEGORIES, type=bool)
if useCategories:
self.modeComboBox.setCurrentIndex(0)
else:
self.modeComboBox.setCurrentIndex(1)
self.modeComboBox.currentIndexChanged.connect(self.modeHasChanged)
self.searchBox.textChanged.connect(self.textChanged)
self.algorithmTree.customContextMenuRequested.connect(
self.showPopupMenu)
self.algorithmTree.doubleClicked.connect(self.executeAlgorithm)
if hasattr(self.searchBox, 'setPlaceholderText'):
self.searchBox.setPlaceholderText(self.tr('Search...'))
self.fillTree()
def textChanged(self):
text = self.searchBox.text().strip(' ').lower()
self._filterItem(self.algorithmTree.invisibleRootItem(), text)
if text:
self.algorithmTree.expandAll()
else:
self.algorithmTree.collapseAll()
self.algorithmTree.invisibleRootItem().child(0).setExpanded(True)
def _filterItem(self, item, text):
if (item.childCount() > 0):
show = False
for i in xrange(item.childCount()):
child = item.child(i)
showChild = self._filterItem(child, text)
show = showChild or show
item.setHidden(not show)
return show
elif isinstance(item, (TreeAlgorithmItem, TreeActionItem)):
hide = bool(text) and (text not in item.text(0).lower())
item.setHidden(hide)
return not hide
else:
item.setHidden(True)
return False
def modeHasChanged(self):
idx = self.modeComboBox.currentIndex()
settings = QSettings()
if idx == 0:
# Simplified
settings.setValue(self.USE_CATEGORIES, True)
else:
settings.setValue(self.USE_CATEGORIES, False)
self.fillTree()
def algsListHasChanged(self):
self.fillTree()
def updateProvider(self, providerName, updateAlgsList = True):
if updateAlgsList:
Processing.updateAlgsList()
for i in xrange(self.algorithmTree.invisibleRootItem().childCount()):
child = self.algorithmTree.invisibleRootItem().child(i)
if isinstance(child, TreeProviderItem):
if child.providerName == providerName:
child.refresh()
# sort categories and items in categories
child.sortChildren(0, Qt.AscendingOrder)
for i in xrange(child.childCount()):
child.child(i).sortChildren(0, Qt.AscendingOrder)
break
def showPopupMenu(self, point):
item = self.algorithmTree.itemAt(point)
if isinstance(item, TreeAlgorithmItem):
alg = item.alg
popupmenu = QMenu()
executeAction = QAction(self.tr('Execute'), self.algorithmTree)
executeAction.triggered.connect(self.executeAlgorithm)
popupmenu.addAction(executeAction)
if alg.canRunInBatchMode and not alg.allowOnlyOpenedLayers:
executeBatchAction = QAction(
self.tr('Execute as batch process'),
self.algorithmTree)
executeBatchAction.triggered.connect(
self.executeAlgorithmAsBatchProcess)
popupmenu.addAction(executeBatchAction)
popupmenu.addSeparator()
editRenderingStylesAction = QAction(
self.tr('Edit rendering styles for outputs'),
self.algorithmTree)
editRenderingStylesAction.triggered.connect(
self.editRenderingStyles)
popupmenu.addAction(editRenderingStylesAction)
actions = Processing.contextMenuActions
if len(actions) > 0:
popupmenu.addSeparator()
for action in actions:
action.setData(alg, self)
if action.isEnabled():
contextMenuAction = QAction(action.name,
self.algorithmTree)
contextMenuAction.triggered.connect(action.execute)
popupmenu.addAction(contextMenuAction)
popupmenu.exec_(self.algorithmTree.mapToGlobal(point))
def editRenderingStyles(self):
item = self.algorithmTree.currentItem()
if isinstance(item, TreeAlgorithmItem):
alg = Processing.getAlgorithm(item.alg.commandLineName())
dlg = EditRenderingStylesDialog(alg)
dlg.exec_()
def executeAlgorithmAsBatchProcess(self):
item = self.algorithmTree.currentItem()
if isinstance(item, TreeAlgorithmItem):
alg = Processing.getAlgorithm(item.alg.commandLineName())
dlg = BatchProcessingDialog(alg)
dlg.exec_()
def executeAlgorithm(self):
item = self.algorithmTree.currentItem()
if isinstance(item, TreeAlgorithmItem):
alg = Processing.getAlgorithm(item.alg.commandLineName())
message = alg.checkBeforeOpeningParametersDialog()
if message:
dlg = MissingDependencyDialog(message)
dlg.exec_()
return
alg = alg.getCopy()
dlg = alg.getCustomParametersDialog()
if not dlg:
dlg = ParametersDialog(alg)
canvas = iface.mapCanvas()
prevMapTool = canvas.mapTool()
dlg.show()
dlg.exec_()
if canvas.mapTool() != prevMapTool:
try:
canvas.mapTool().reset()
except:
pass
canvas.setMapTool(prevMapTool)
if dlg.executed:
showRecent = ProcessingConfig.getSetting(
ProcessingConfig.SHOW_RECENT_ALGORITHMS)
if showRecent:
self.addRecentAlgorithms(True)
if isinstance(item, TreeActionItem):
action = item.action
action.setData(self)
action.execute()
def fillTree(self):
settings = QSettings()
useCategories = settings.value(self.USE_CATEGORIES, type=bool)
if useCategories:
self.fillTreeUsingCategories()
else:
self.fillTreeUsingProviders()
self.algorithmTree.sortItems(0, Qt.AscendingOrder)
self.addRecentAlgorithms(False)
def addRecentAlgorithms(self, updating):
showRecent = ProcessingConfig.getSetting(
ProcessingConfig.SHOW_RECENT_ALGORITHMS)
if showRecent:
recent = ProcessingLog.getRecentAlgorithms()
if len(recent) != 0:
found = False
if updating:
recentItem = self.algorithmTree.topLevelItem(0)
treeWidget = recentItem.treeWidget()
treeWidget.takeTopLevelItem(
treeWidget.indexOfTopLevelItem(recentItem))
recentItem = QTreeWidgetItem()
recentItem.setText(0, self.tr('Recently used algorithms'))
for algname in recent:
alg = Processing.getAlgorithm(algname)
if alg is not None:
algItem = TreeAlgorithmItem(alg)
recentItem.addChild(algItem)
found = True
if found:
self.algorithmTree.insertTopLevelItem(0, recentItem)
recentItem.setExpanded(True)
self.algorithmTree.setWordWrap(True)
def fillTreeUsingCategories(self):
providersToExclude = ['model', 'script']
self.algorithmTree.clear()
text = unicode(self.searchBox.text())
groups = {}
for providerName in Processing.algs.keys():
provider = Processing.algs[providerName]
name = 'ACTIVATE_' + providerName.upper().replace(' ', '_')
if not ProcessingConfig.getSetting(name):
continue
if providerName in providersToExclude \
or len(ModelerUtils.providers[providerName].actions) != 0:
continue
algs = provider.values()
# add algorithms
for alg in algs:
if not alg.showInToolbox:
continue
(altgroup, altsubgroup, altname) = \
AlgorithmDecorator.getGroupsAndName(alg)
if altgroup is None:
continue
if text == '' or text.lower() in altname.lower():
if altgroup not in groups:
groups[altgroup] = {}
group = groups[altgroup]
if altsubgroup not in group:
groups[altgroup][altsubgroup] = []
subgroup = groups[altgroup][altsubgroup]
subgroup.append(alg)
if len(groups) > 0:
mainItem = QTreeWidgetItem()
mainItem.setText(0, 'Geoalgorithms')
mainItem.setIcon(0, GeoAlgorithm.getDefaultIcon())
mainItem.setToolTip(0, mainItem.text(0))
for (groupname, group) in groups.items():
groupItem = QTreeWidgetItem()
groupItem.setText(0, groupname)
groupItem.setIcon(0, GeoAlgorithm.getDefaultIcon())
groupItem.setToolTip(0, groupItem.text(0))
mainItem.addChild(groupItem)
for (subgroupname, subgroup) in group.items():
subgroupItem = QTreeWidgetItem()
subgroupItem.setText(0, subgroupname)
subgroupItem.setIcon(0, GeoAlgorithm.getDefaultIcon())
subgroupItem.setToolTip(0, subgroupItem.text(0))
groupItem.addChild(subgroupItem)
for alg in subgroup:
algItem = TreeAlgorithmItem(alg)
subgroupItem.addChild(algItem)
self.algorithmTree.addTopLevelItem(mainItem)
for providerName in Processing.algs.keys():
if providerName not in providersToExclude:
continue
name = 'ACTIVATE_' + providerName.upper().replace(' ', '_')
if not ProcessingConfig.getSetting(name):
continue
providerItem = TreeProviderItem(providerName)
self.algorithmTree.addTopLevelItem(providerItem)
def fillTreeUsingProviders(self):
self.algorithmTree.clear()
for providerName in Processing.algs.keys():
name = 'ACTIVATE_' + providerName.upper().replace(' ', '_')
if not ProcessingConfig.getSetting(name):
continue
providerItem = TreeProviderItem(providerName)
self.algorithmTree.addTopLevelItem(providerItem)
providerItem.setHidden(providerItem.childCount() == 0)
class TreeAlgorithmItem(QTreeWidgetItem):
def __init__(self, alg):
settings = QSettings()
useCategories = settings.value(ProcessingToolbox.USE_CATEGORIES,
type=bool)
QTreeWidgetItem.__init__(self)
self.alg = alg
icon = alg.getIcon()
name = alg.name
if useCategories:
icon = GeoAlgorithm.getDefaultIcon()
(group, subgroup, name) = AlgorithmDecorator.getGroupsAndName(alg)
self.setIcon(0, icon)
self.setToolTip(0, name)
self.setText(0, name)
class TreeActionItem(QTreeWidgetItem):
def __init__(self, action):
QTreeWidgetItem.__init__(self)
self.action = action
self.setText(0, action.name)
self.setIcon(0, action.getIcon())
class TreeProviderItem(QTreeWidgetItem):
def __init__(self, providerName):
QTreeWidgetItem.__init__(self)
self.providerName = providerName
self.provider = Processing.getProviderFromName(providerName)
self.setIcon(0, self.provider.getIcon())
self.populate()
def refresh(self):
self.takeChildren()
self.populate()
def populate(self):
groups = {}
count = 0
provider = Processing.algs[self.providerName]
algs = provider.values()
# Add algorithms
for alg in algs:
if not alg.showInToolbox:
continue
if alg.group in groups:
groupItem = groups[alg.group]
else:
groupItem = QTreeWidgetItem()
groupItem.setText(0, alg.group)
groupItem.setToolTip(0, alg.group)
groups[alg.group] = groupItem
algItem = TreeAlgorithmItem(alg)
groupItem.addChild(algItem)
count += 1
actions = Processing.actions[self.providerName]
for action in actions:
if action.group in groups:
groupItem = groups[action.group]
else:
groupItem = QTreeWidgetItem()
groupItem.setText(0, action.group)
groups[action.group] = groupItem
algItem = TreeActionItem(action)
groupItem.addChild(algItem)
self.setText(0, self.provider.getDescription()
+ ' [' + str(count) + ' geoalgorithms]')
self.setToolTip(0, self.text(0))
for groupItem in groups.values():
self.addChild(groupItem)
| gpl-2.0 |
m1ck/bookadoptions | django/views/decorators/vary.py | 307 | 1285 | try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django.utils.cache import patch_vary_headers
from django.utils.decorators import available_attrs
def vary_on_headers(*headers):
"""
A view decorator that adds the specified headers to the Vary header of the
response. Usage:
@vary_on_headers('Cookie', 'Accept-language')
def index(request):
...
Note that the header names are not case-sensitive.
"""
def decorator(func):
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, headers)
return response
return wraps(func, assigned=available_attrs(func))(inner_func)
return decorator
def vary_on_cookie(func):
"""
A view decorator that adds "Cookie" to the Vary header of a response. This
indicates that a page's contents depends on cookies. Usage:
@vary_on_cookie
def index(request):
...
"""
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, ('Cookie',))
return response
return wraps(func, assigned=available_attrs(func))(inner_func)
| bsd-3-clause |
jiangzhuo/kbengine | kbe/res/scripts/common/Lib/pickletools.py | 74 | 91170 | '''"Executable documentation" for the pickle module.
Extensive comments about the pickle protocols and pickle-machine opcodes
can be found here. Some functions meant for external use:
genops(pickle)
Generate all the opcodes in a pickle, as (opcode, arg, position) triples.
dis(pickle, out=None, memo=None, indentlevel=4)
Print a symbolic disassembly of a pickle.
'''
import codecs
import io
import pickle
import re
import sys
__all__ = ['dis', 'genops', 'optimize']
bytes_types = pickle.bytes_types
# Other ideas:
#
# - A pickle verifier: read a pickle and check it exhaustively for
# well-formedness. dis() does a lot of this already.
#
# - A protocol identifier: examine a pickle and return its protocol number
# (== the highest .proto attr value among all the opcodes in the pickle).
# dis() already prints this info at the end.
#
# - A pickle optimizer: for example, tuple-building code is sometimes more
# elaborate than necessary, catering for the possibility that the tuple
# is recursive. Or lots of times a PUT is generated that's never accessed
# by a later GET.
# "A pickle" is a program for a virtual pickle machine (PM, but more accurately
# called an unpickling machine). It's a sequence of opcodes, interpreted by the
# PM, building an arbitrarily complex Python object.
#
# For the most part, the PM is very simple: there are no looping, testing, or
# conditional instructions, no arithmetic and no function calls. Opcodes are
# executed once each, from first to last, until a STOP opcode is reached.
#
# The PM has two data areas, "the stack" and "the memo".
#
# Many opcodes push Python objects onto the stack; e.g., INT pushes a Python
# integer object on the stack, whose value is gotten from a decimal string
# literal immediately following the INT opcode in the pickle bytestream. Other
# opcodes take Python objects off the stack. The result of unpickling is
# whatever object is left on the stack when the final STOP opcode is executed.
#
# The memo is simply an array of objects, or it can be implemented as a dict
# mapping little integers to objects. The memo serves as the PM's "long term
# memory", and the little integers indexing the memo are akin to variable
# names. Some opcodes pop a stack object into the memo at a given index,
# and others push a memo object at a given index onto the stack again.
#
# At heart, that's all the PM has. Subtleties arise for these reasons:
#
# + Object identity. Objects can be arbitrarily complex, and subobjects
# may be shared (for example, the list [a, a] refers to the same object a
# twice). It can be vital that unpickling recreate an isomorphic object
# graph, faithfully reproducing sharing.
#
# + Recursive objects. For example, after "L = []; L.append(L)", L is a
# list, and L[0] is the same list. This is related to the object identity
# point, and some sequences of pickle opcodes are subtle in order to
# get the right result in all cases.
#
# + Things pickle doesn't know everything about. Examples of things pickle
# does know everything about are Python's builtin scalar and container
# types, like ints and tuples. They generally have opcodes dedicated to
# them. For things like module references and instances of user-defined
# classes, pickle's knowledge is limited. Historically, many enhancements
# have been made to the pickle protocol in order to do a better (faster,
# and/or more compact) job on those.
#
# + Backward compatibility and micro-optimization. As explained below,
# pickle opcodes never go away, not even when better ways to do a thing
# get invented. The repertoire of the PM just keeps growing over time.
# For example, protocol 0 had two opcodes for building Python integers (INT
# and LONG), protocol 1 added three more for more-efficient pickling of short
# integers, and protocol 2 added two more for more-efficient pickling of
# long integers (before protocol 2, the only ways to pickle a Python long
# took time quadratic in the number of digits, for both pickling and
# unpickling). "Opcode bloat" isn't so much a subtlety as a source of
# wearying complication.
#
#
# Pickle protocols:
#
# For compatibility, the meaning of a pickle opcode never changes. Instead new
# pickle opcodes get added, and each version's unpickler can handle all the
# pickle opcodes in all protocol versions to date. So old pickles continue to
# be readable forever. The pickler can generally be told to restrict itself to
# the subset of opcodes available under previous protocol versions too, so that
# users can create pickles under the current version readable by older
# versions. However, a pickle does not contain its version number embedded
# within it. If an older unpickler tries to read a pickle using a later
# protocol, the result is most likely an exception due to seeing an unknown (in
# the older unpickler) opcode.
#
# The original pickle used what's now called "protocol 0", and what was called
# "text mode" before Python 2.3. The entire pickle bytestream is made up of
# printable 7-bit ASCII characters, plus the newline character, in protocol 0.
# That's why it was called text mode. Protocol 0 is small and elegant, but
# sometimes painfully inefficient.
#
# The second major set of additions is now called "protocol 1", and was called
# "binary mode" before Python 2.3. This added many opcodes with arguments
# consisting of arbitrary bytes, including NUL bytes and unprintable "high bit"
# bytes. Binary mode pickles can be substantially smaller than equivalent
# text mode pickles, and sometimes faster too; e.g., BININT represents a 4-byte
# int as 4 bytes following the opcode, which is cheaper to unpickle than the
# (perhaps) 11-character decimal string attached to INT. Protocol 1 also added
# a number of opcodes that operate on many stack elements at once (like APPENDS
# and SETITEMS), and "shortcut" opcodes (like EMPTY_DICT and EMPTY_TUPLE).
#
# The third major set of additions came in Python 2.3, and is called "protocol
# 2". This added:
#
# - A better way to pickle instances of new-style classes (NEWOBJ).
#
# - A way for a pickle to identify its protocol (PROTO).
#
# - Time- and space- efficient pickling of long ints (LONG{1,4}).
#
# - Shortcuts for small tuples (TUPLE{1,2,3}}.
#
# - Dedicated opcodes for bools (NEWTRUE, NEWFALSE).
#
# - The "extension registry", a vector of popular objects that can be pushed
# efficiently by index (EXT{1,2,4}). This is akin to the memo and GET, but
# the registry contents are predefined (there's nothing akin to the memo's
# PUT).
#
# Another independent change with Python 2.3 is the abandonment of any
# pretense that it might be safe to load pickles received from untrusted
# parties -- no sufficient security analysis has been done to guarantee
# this and there isn't a use case that warrants the expense of such an
# analysis.
#
# To this end, all tests for __safe_for_unpickling__ or for
# copyreg.safe_constructors are removed from the unpickling code.
# References to these variables in the descriptions below are to be seen
# as describing unpickling in Python 2.2 and before.
# Meta-rule: Descriptions are stored in instances of descriptor objects,
# with plain constructors. No meta-language is defined from which
# descriptors could be constructed. If you want, e.g., XML, write a little
# program to generate XML from the objects.
##############################################################################
# Some pickle opcodes have an argument, following the opcode in the
# bytestream. An argument is of a specific type, described by an instance
# of ArgumentDescriptor. These are not to be confused with arguments taken
# off the stack -- ArgumentDescriptor applies only to arguments embedded in
# the opcode stream, immediately following an opcode.
# Represents the number of bytes consumed by an argument delimited by the
# next newline character.
UP_TO_NEWLINE = -1
# Represents the number of bytes consumed by a two-argument opcode where
# the first argument gives the number of bytes in the second argument.
TAKEN_FROM_ARGUMENT1 = -2 # num bytes is 1-byte unsigned int
TAKEN_FROM_ARGUMENT4 = -3 # num bytes is 4-byte signed little-endian int
TAKEN_FROM_ARGUMENT4U = -4 # num bytes is 4-byte unsigned little-endian int
TAKEN_FROM_ARGUMENT8U = -5 # num bytes is 8-byte unsigned little-endian int
class ArgumentDescriptor(object):
__slots__ = (
# name of descriptor record, also a module global name; a string
'name',
# length of argument, in bytes; an int; UP_TO_NEWLINE and
# TAKEN_FROM_ARGUMENT{1,4,8} are negative values for variable-length
# cases
'n',
# a function taking a file-like object, reading this kind of argument
# from the object at the current position, advancing the current
# position by n bytes, and returning the value of the argument
'reader',
# human-readable docs for this arg descriptor; a string
'doc',
)
def __init__(self, name, n, reader, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(n, int) and (n >= 0 or
n in (UP_TO_NEWLINE,
TAKEN_FROM_ARGUMENT1,
TAKEN_FROM_ARGUMENT4,
TAKEN_FROM_ARGUMENT4U,
TAKEN_FROM_ARGUMENT8U))
self.n = n
self.reader = reader
assert isinstance(doc, str)
self.doc = doc
from struct import unpack as _unpack
def read_uint1(f):
r"""
>>> import io
>>> read_uint1(io.BytesIO(b'\xff'))
255
"""
data = f.read(1)
if data:
return data[0]
raise ValueError("not enough data in stream to read uint1")
uint1 = ArgumentDescriptor(
name='uint1',
n=1,
reader=read_uint1,
doc="One-byte unsigned integer.")
def read_uint2(f):
r"""
>>> import io
>>> read_uint2(io.BytesIO(b'\xff\x00'))
255
>>> read_uint2(io.BytesIO(b'\xff\xff'))
65535
"""
data = f.read(2)
if len(data) == 2:
return _unpack("<H", data)[0]
raise ValueError("not enough data in stream to read uint2")
uint2 = ArgumentDescriptor(
name='uint2',
n=2,
reader=read_uint2,
doc="Two-byte unsigned integer, little-endian.")
def read_int4(f):
r"""
>>> import io
>>> read_int4(io.BytesIO(b'\xff\x00\x00\x00'))
255
>>> read_int4(io.BytesIO(b'\x00\x00\x00\x80')) == -(2**31)
True
"""
data = f.read(4)
if len(data) == 4:
return _unpack("<i", data)[0]
raise ValueError("not enough data in stream to read int4")
int4 = ArgumentDescriptor(
name='int4',
n=4,
reader=read_int4,
doc="Four-byte signed integer, little-endian, 2's complement.")
def read_uint4(f):
r"""
>>> import io
>>> read_uint4(io.BytesIO(b'\xff\x00\x00\x00'))
255
>>> read_uint4(io.BytesIO(b'\x00\x00\x00\x80')) == 2**31
True
"""
data = f.read(4)
if len(data) == 4:
return _unpack("<I", data)[0]
raise ValueError("not enough data in stream to read uint4")
uint4 = ArgumentDescriptor(
name='uint4',
n=4,
reader=read_uint4,
doc="Four-byte unsigned integer, little-endian.")
def read_uint8(f):
r"""
>>> import io
>>> read_uint8(io.BytesIO(b'\xff\x00\x00\x00\x00\x00\x00\x00'))
255
>>> read_uint8(io.BytesIO(b'\xff' * 8)) == 2**64-1
True
"""
data = f.read(8)
if len(data) == 8:
return _unpack("<Q", data)[0]
raise ValueError("not enough data in stream to read uint8")
uint8 = ArgumentDescriptor(
name='uint8',
n=8,
reader=read_uint8,
doc="Eight-byte unsigned integer, little-endian.")
def read_stringnl(f, decode=True, stripquotes=True):
r"""
>>> import io
>>> read_stringnl(io.BytesIO(b"'abcd'\nefg\n"))
'abcd'
>>> read_stringnl(io.BytesIO(b"\n"))
Traceback (most recent call last):
...
ValueError: no string quotes around b''
>>> read_stringnl(io.BytesIO(b"\n"), stripquotes=False)
''
>>> read_stringnl(io.BytesIO(b"''\n"))
''
>>> read_stringnl(io.BytesIO(b'"abcd"'))
Traceback (most recent call last):
...
ValueError: no newline found when trying to read stringnl
Embedded escapes are undone in the result.
>>> read_stringnl(io.BytesIO(br"'a\n\\b\x00c\td'" + b"\n'e'"))
'a\n\\b\x00c\td'
"""
data = f.readline()
if not data.endswith(b'\n'):
raise ValueError("no newline found when trying to read stringnl")
data = data[:-1] # lose the newline
if stripquotes:
for q in (b'"', b"'"):
if data.startswith(q):
if not data.endswith(q):
raise ValueError("strinq quote %r not found at both "
"ends of %r" % (q, data))
data = data[1:-1]
break
else:
raise ValueError("no string quotes around %r" % data)
if decode:
data = codecs.escape_decode(data)[0].decode("ascii")
return data
stringnl = ArgumentDescriptor(
name='stringnl',
n=UP_TO_NEWLINE,
reader=read_stringnl,
doc="""A newline-terminated string.
This is a repr-style string, with embedded escapes, and
bracketing quotes.
""")
def read_stringnl_noescape(f):
return read_stringnl(f, stripquotes=False)
stringnl_noescape = ArgumentDescriptor(
name='stringnl_noescape',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape,
doc="""A newline-terminated string.
This is a str-style string, without embedded escapes,
or bracketing quotes. It should consist solely of
printable ASCII characters.
""")
def read_stringnl_noescape_pair(f):
r"""
>>> import io
>>> read_stringnl_noescape_pair(io.BytesIO(b"Queue\nEmpty\njunk"))
'Queue Empty'
"""
return "%s %s" % (read_stringnl_noescape(f), read_stringnl_noescape(f))
stringnl_noescape_pair = ArgumentDescriptor(
name='stringnl_noescape_pair',
n=UP_TO_NEWLINE,
reader=read_stringnl_noescape_pair,
doc="""A pair of newline-terminated strings.
These are str-style strings, without embedded
escapes, or bracketing quotes. They should
consist solely of printable ASCII characters.
The pair is returned as a single string, with
a single blank separating the two strings.
""")
def read_string1(f):
r"""
>>> import io
>>> read_string1(io.BytesIO(b"\x00"))
''
>>> read_string1(io.BytesIO(b"\x03abcdef"))
'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data.decode("latin-1")
raise ValueError("expected %d bytes in a string1, but only %d remain" %
(n, len(data)))
string1 = ArgumentDescriptor(
name="string1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_string1,
doc="""A counted string.
The first argument is a 1-byte unsigned int giving the number
of bytes in the string, and the second argument is that many
bytes.
""")
def read_string4(f):
r"""
>>> import io
>>> read_string4(io.BytesIO(b"\x00\x00\x00\x00abc"))
''
>>> read_string4(io.BytesIO(b"\x03\x00\x00\x00abcdef"))
'abc'
>>> read_string4(io.BytesIO(b"\x00\x00\x00\x03abcdef"))
Traceback (most recent call last):
...
ValueError: expected 50331648 bytes in a string4, but only 6 remain
"""
n = read_int4(f)
if n < 0:
raise ValueError("string4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) == n:
return data.decode("latin-1")
raise ValueError("expected %d bytes in a string4, but only %d remain" %
(n, len(data)))
string4 = ArgumentDescriptor(
name="string4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_string4,
doc="""A counted string.
The first argument is a 4-byte little-endian signed int giving
the number of bytes in the string, and the second argument is
that many bytes.
""")
def read_bytes1(f):
r"""
>>> import io
>>> read_bytes1(io.BytesIO(b"\x00"))
b''
>>> read_bytes1(io.BytesIO(b"\x03abcdef"))
b'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes1, but only %d remain" %
(n, len(data)))
bytes1 = ArgumentDescriptor(
name="bytes1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_bytes1,
doc="""A counted bytes string.
The first argument is a 1-byte unsigned int giving the number
of bytes in the string, and the second argument is that many
bytes.
""")
def read_bytes1(f):
r"""
>>> import io
>>> read_bytes1(io.BytesIO(b"\x00"))
b''
>>> read_bytes1(io.BytesIO(b"\x03abcdef"))
b'abc'
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes1, but only %d remain" %
(n, len(data)))
bytes1 = ArgumentDescriptor(
name="bytes1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_bytes1,
doc="""A counted bytes string.
The first argument is a 1-byte unsigned int giving the number
of bytes, and the second argument is that many bytes.
""")
def read_bytes4(f):
r"""
>>> import io
>>> read_bytes4(io.BytesIO(b"\x00\x00\x00\x00abc"))
b''
>>> read_bytes4(io.BytesIO(b"\x03\x00\x00\x00abcdef"))
b'abc'
>>> read_bytes4(io.BytesIO(b"\x00\x00\x00\x03abcdef"))
Traceback (most recent call last):
...
ValueError: expected 50331648 bytes in a bytes4, but only 6 remain
"""
n = read_uint4(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("bytes4 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes4, but only %d remain" %
(n, len(data)))
bytes4 = ArgumentDescriptor(
name="bytes4",
n=TAKEN_FROM_ARGUMENT4U,
reader=read_bytes4,
doc="""A counted bytes string.
The first argument is a 4-byte little-endian unsigned int giving
the number of bytes, and the second argument is that many bytes.
""")
def read_bytes8(f):
r"""
>>> import io, struct, sys
>>> read_bytes8(io.BytesIO(b"\x00\x00\x00\x00\x00\x00\x00\x00abc"))
b''
>>> read_bytes8(io.BytesIO(b"\x03\x00\x00\x00\x00\x00\x00\x00abcdef"))
b'abc'
>>> bigsize8 = struct.pack("<Q", sys.maxsize//3)
>>> read_bytes8(io.BytesIO(bigsize8 + b"abcdef")) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: expected ... bytes in a bytes8, but only 6 remain
"""
n = read_uint8(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("bytes8 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return data
raise ValueError("expected %d bytes in a bytes8, but only %d remain" %
(n, len(data)))
bytes8 = ArgumentDescriptor(
name="bytes8",
n=TAKEN_FROM_ARGUMENT8U,
reader=read_bytes8,
doc="""A counted bytes string.
The first argument is a 8-byte little-endian unsigned int giving
the number of bytes, and the second argument is that many bytes.
""")
def read_unicodestringnl(f):
r"""
>>> import io
>>> read_unicodestringnl(io.BytesIO(b"abc\\uabcd\njunk")) == 'abc\uabcd'
True
"""
data = f.readline()
if not data.endswith(b'\n'):
raise ValueError("no newline found when trying to read "
"unicodestringnl")
data = data[:-1] # lose the newline
return str(data, 'raw-unicode-escape')
unicodestringnl = ArgumentDescriptor(
name='unicodestringnl',
n=UP_TO_NEWLINE,
reader=read_unicodestringnl,
doc="""A newline-terminated Unicode string.
This is raw-unicode-escape encoded, so consists of
printable ASCII characters, and may contain embedded
escape sequences.
""")
def read_unicodestring1(f):
r"""
>>> import io
>>> s = 'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
b'abcd\xea\xaf\x8d'
>>> n = bytes([len(enc)]) # little-endian 1-byte length
>>> t = read_unicodestring1(io.BytesIO(n + enc + b'junk'))
>>> s == t
True
>>> read_unicodestring1(io.BytesIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring1, but only 6 remain
"""
n = read_uint1(f)
assert n >= 0
data = f.read(n)
if len(data) == n:
return str(data, 'utf-8', 'surrogatepass')
raise ValueError("expected %d bytes in a unicodestring1, but only %d "
"remain" % (n, len(data)))
unicodestring1 = ArgumentDescriptor(
name="unicodestring1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_unicodestring1,
doc="""A counted Unicode string.
The first argument is a 1-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_unicodestring4(f):
r"""
>>> import io
>>> s = 'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
b'abcd\xea\xaf\x8d'
>>> n = bytes([len(enc), 0, 0, 0]) # little-endian 4-byte length
>>> t = read_unicodestring4(io.BytesIO(n + enc + b'junk'))
>>> s == t
True
>>> read_unicodestring4(io.BytesIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring4, but only 6 remain
"""
n = read_uint4(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("unicodestring4 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return str(data, 'utf-8', 'surrogatepass')
raise ValueError("expected %d bytes in a unicodestring4, but only %d "
"remain" % (n, len(data)))
unicodestring4 = ArgumentDescriptor(
name="unicodestring4",
n=TAKEN_FROM_ARGUMENT4U,
reader=read_unicodestring4,
doc="""A counted Unicode string.
The first argument is a 4-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_unicodestring8(f):
r"""
>>> import io
>>> s = 'abcd\uabcd'
>>> enc = s.encode('utf-8')
>>> enc
b'abcd\xea\xaf\x8d'
>>> n = bytes([len(enc)]) + bytes(7) # little-endian 8-byte length
>>> t = read_unicodestring8(io.BytesIO(n + enc + b'junk'))
>>> s == t
True
>>> read_unicodestring8(io.BytesIO(n + enc[:-1]))
Traceback (most recent call last):
...
ValueError: expected 7 bytes in a unicodestring8, but only 6 remain
"""
n = read_uint8(f)
assert n >= 0
if n > sys.maxsize:
raise ValueError("unicodestring8 byte count > sys.maxsize: %d" % n)
data = f.read(n)
if len(data) == n:
return str(data, 'utf-8', 'surrogatepass')
raise ValueError("expected %d bytes in a unicodestring8, but only %d "
"remain" % (n, len(data)))
unicodestring8 = ArgumentDescriptor(
name="unicodestring8",
n=TAKEN_FROM_ARGUMENT8U,
reader=read_unicodestring8,
doc="""A counted Unicode string.
The first argument is a 8-byte little-endian signed int
giving the number of bytes in the string, and the second
argument-- the UTF-8 encoding of the Unicode string --
contains that many bytes.
""")
def read_decimalnl_short(f):
r"""
>>> import io
>>> read_decimalnl_short(io.BytesIO(b"1234\n56"))
1234
>>> read_decimalnl_short(io.BytesIO(b"1234L\n56"))
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: b'1234L'
"""
s = read_stringnl(f, decode=False, stripquotes=False)
# There's a hack for True and False here.
if s == b"00":
return False
elif s == b"01":
return True
return int(s)
def read_decimalnl_long(f):
r"""
>>> import io
>>> read_decimalnl_long(io.BytesIO(b"1234L\n56"))
1234
>>> read_decimalnl_long(io.BytesIO(b"123456789012345678901234L\n6"))
123456789012345678901234
"""
s = read_stringnl(f, decode=False, stripquotes=False)
if s[-1:] == b'L':
s = s[:-1]
return int(s)
decimalnl_short = ArgumentDescriptor(
name='decimalnl_short',
n=UP_TO_NEWLINE,
reader=read_decimalnl_short,
doc="""A newline-terminated decimal integer literal.
This never has a trailing 'L', and the integer fit
in a short Python int on the box where the pickle
was written -- but there's no guarantee it will fit
in a short Python int on the box where the pickle
is read.
""")
decimalnl_long = ArgumentDescriptor(
name='decimalnl_long',
n=UP_TO_NEWLINE,
reader=read_decimalnl_long,
doc="""A newline-terminated decimal integer literal.
This has a trailing 'L', and can represent integers
of any size.
""")
def read_floatnl(f):
r"""
>>> import io
>>> read_floatnl(io.BytesIO(b"-1.25\n6"))
-1.25
"""
s = read_stringnl(f, decode=False, stripquotes=False)
return float(s)
floatnl = ArgumentDescriptor(
name='floatnl',
n=UP_TO_NEWLINE,
reader=read_floatnl,
doc="""A newline-terminated decimal floating literal.
In general this requires 17 significant digits for roundtrip
identity, and pickling then unpickling infinities, NaNs, and
minus zero doesn't work across boxes, or on some boxes even
on itself (e.g., Windows can't read the strings it produces
for infinities or NaNs).
""")
def read_float8(f):
r"""
>>> import io, struct
>>> raw = struct.pack(">d", -1.25)
>>> raw
b'\xbf\xf4\x00\x00\x00\x00\x00\x00'
>>> read_float8(io.BytesIO(raw + b"\n"))
-1.25
"""
data = f.read(8)
if len(data) == 8:
return _unpack(">d", data)[0]
raise ValueError("not enough data in stream to read float8")
float8 = ArgumentDescriptor(
name='float8',
n=8,
reader=read_float8,
doc="""An 8-byte binary representation of a float, big-endian.
The format is unique to Python, and shared with the struct
module (format string '>d') "in theory" (the struct and pickle
implementations don't share the code -- they should). It's
strongly related to the IEEE-754 double format, and, in normal
cases, is in fact identical to the big-endian 754 double format.
On other boxes the dynamic range is limited to that of a 754
double, and "add a half and chop" rounding is used to reduce
the precision to 53 bits. However, even on a 754 box,
infinities, NaNs, and minus zero may not be handled correctly
(may not survive roundtrip pickling intact).
""")
# Protocol 2 formats
from pickle import decode_long
def read_long1(f):
r"""
>>> import io
>>> read_long1(io.BytesIO(b"\x00"))
0
>>> read_long1(io.BytesIO(b"\x02\xff\x00"))
255
>>> read_long1(io.BytesIO(b"\x02\xff\x7f"))
32767
>>> read_long1(io.BytesIO(b"\x02\x00\xff"))
-256
>>> read_long1(io.BytesIO(b"\x02\x00\x80"))
-32768
"""
n = read_uint1(f)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long1")
return decode_long(data)
long1 = ArgumentDescriptor(
name="long1",
n=TAKEN_FROM_ARGUMENT1,
reader=read_long1,
doc="""A binary long, little-endian, using 1-byte size.
This first reads one byte as an unsigned size, then reads that
many bytes and interprets them as a little-endian 2's-complement long.
If the size is 0, that's taken as a shortcut for the long 0L.
""")
def read_long4(f):
r"""
>>> import io
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\xff\x00"))
255
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\xff\x7f"))
32767
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\x00\xff"))
-256
>>> read_long4(io.BytesIO(b"\x02\x00\x00\x00\x00\x80"))
-32768
>>> read_long1(io.BytesIO(b"\x00\x00\x00\x00"))
0
"""
n = read_int4(f)
if n < 0:
raise ValueError("long4 byte count < 0: %d" % n)
data = f.read(n)
if len(data) != n:
raise ValueError("not enough data in stream to read long4")
return decode_long(data)
long4 = ArgumentDescriptor(
name="long4",
n=TAKEN_FROM_ARGUMENT4,
reader=read_long4,
doc="""A binary representation of a long, little-endian.
This first reads four bytes as a signed size (but requires the
size to be >= 0), then reads that many bytes and interprets them
as a little-endian 2's-complement long. If the size is 0, that's taken
as a shortcut for the int 0, although LONG1 should really be used
then instead (and in any case where # of bytes < 256).
""")
##############################################################################
# Object descriptors. The stack used by the pickle machine holds objects,
# and in the stack_before and stack_after attributes of OpcodeInfo
# descriptors we need names to describe the various types of objects that can
# appear on the stack.
class StackObject(object):
__slots__ = (
# name of descriptor record, for info only
'name',
# type of object, or tuple of type objects (meaning the object can
# be of any type in the tuple)
'obtype',
# human-readable docs for this kind of stack object; a string
'doc',
)
def __init__(self, name, obtype, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(obtype, type) or isinstance(obtype, tuple)
if isinstance(obtype, tuple):
for contained in obtype:
assert isinstance(contained, type)
self.obtype = obtype
assert isinstance(doc, str)
self.doc = doc
def __repr__(self):
return self.name
pyint = pylong = StackObject(
name='int',
obtype=int,
doc="A Python integer object.")
pyinteger_or_bool = StackObject(
name='int_or_bool',
obtype=(int, bool),
doc="A Python integer or boolean object.")
pybool = StackObject(
name='bool',
obtype=bool,
doc="A Python boolean object.")
pyfloat = StackObject(
name='float',
obtype=float,
doc="A Python float object.")
pybytes_or_str = pystring = StackObject(
name='bytes_or_str',
obtype=(bytes, str),
doc="A Python bytes or (Unicode) string object.")
pybytes = StackObject(
name='bytes',
obtype=bytes,
doc="A Python bytes object.")
pyunicode = StackObject(
name='str',
obtype=str,
doc="A Python (Unicode) string object.")
pynone = StackObject(
name="None",
obtype=type(None),
doc="The Python None object.")
pytuple = StackObject(
name="tuple",
obtype=tuple,
doc="A Python tuple object.")
pylist = StackObject(
name="list",
obtype=list,
doc="A Python list object.")
pydict = StackObject(
name="dict",
obtype=dict,
doc="A Python dict object.")
pyset = StackObject(
name="set",
obtype=set,
doc="A Python set object.")
pyfrozenset = StackObject(
name="frozenset",
obtype=set,
doc="A Python frozenset object.")
anyobject = StackObject(
name='any',
obtype=object,
doc="Any kind of object whatsoever.")
markobject = StackObject(
name="mark",
obtype=StackObject,
doc="""'The mark' is a unique object.
Opcodes that operate on a variable number of objects
generally don't embed the count of objects in the opcode,
or pull it off the stack. Instead the MARK opcode is used
to push a special marker object on the stack, and then
some other opcodes grab all the objects from the top of
the stack down to (but not including) the topmost marker
object.
""")
stackslice = StackObject(
name="stackslice",
obtype=StackObject,
doc="""An object representing a contiguous slice of the stack.
This is used in conjunction with markobject, to represent all
of the stack following the topmost markobject. For example,
the POP_MARK opcode changes the stack from
[..., markobject, stackslice]
to
[...]
No matter how many object are on the stack after the topmost
markobject, POP_MARK gets rid of all of them (including the
topmost markobject too).
""")
##############################################################################
# Descriptors for pickle opcodes.
class OpcodeInfo(object):
__slots__ = (
# symbolic name of opcode; a string
'name',
# the code used in a bytestream to represent the opcode; a
# one-character string
'code',
# If the opcode has an argument embedded in the byte string, an
# instance of ArgumentDescriptor specifying its type. Note that
# arg.reader(s) can be used to read and decode the argument from
# the bytestream s, and arg.doc documents the format of the raw
# argument bytes. If the opcode doesn't have an argument embedded
# in the bytestream, arg should be None.
'arg',
# what the stack looks like before this opcode runs; a list
'stack_before',
# what the stack looks like after this opcode runs; a list
'stack_after',
# the protocol number in which this opcode was introduced; an int
'proto',
# human-readable docs for this opcode; a string
'doc',
)
def __init__(self, name, code, arg,
stack_before, stack_after, proto, doc):
assert isinstance(name, str)
self.name = name
assert isinstance(code, str)
assert len(code) == 1
self.code = code
assert arg is None or isinstance(arg, ArgumentDescriptor)
self.arg = arg
assert isinstance(stack_before, list)
for x in stack_before:
assert isinstance(x, StackObject)
self.stack_before = stack_before
assert isinstance(stack_after, list)
for x in stack_after:
assert isinstance(x, StackObject)
self.stack_after = stack_after
assert isinstance(proto, int) and 0 <= proto <= pickle.HIGHEST_PROTOCOL
self.proto = proto
assert isinstance(doc, str)
self.doc = doc
I = OpcodeInfo
opcodes = [
# Ways to spell integers.
I(name='INT',
code='I',
arg=decimalnl_short,
stack_before=[],
stack_after=[pyinteger_or_bool],
proto=0,
doc="""Push an integer or bool.
The argument is a newline-terminated decimal literal string.
The intent may have been that this always fit in a short Python int,
but INT can be generated in pickles written on a 64-bit box that
require a Python long on a 32-bit box. The difference between this
and LONG then is that INT skips a trailing 'L', and produces a short
int whenever possible.
Another difference is due to that, when bool was introduced as a
distinct type in 2.3, builtin names True and False were also added to
2.2.2, mapping to ints 1 and 0. For compatibility in both directions,
True gets pickled as INT + "I01\\n", and False as INT + "I00\\n".
Leading zeroes are never produced for a genuine integer. The 2.3
(and later) unpicklers special-case these and return bool instead;
earlier unpicklers ignore the leading "0" and return the int.
"""),
I(name='BININT',
code='J',
arg=int4,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a four-byte signed integer.
This handles the full range of Python (short) integers on a 32-bit
box, directly as binary bytes (1 for the opcode and 4 for the integer).
If the integer is non-negative and fits in 1 or 2 bytes, pickling via
BININT1 or BININT2 saves space.
"""),
I(name='BININT1',
code='K',
arg=uint1,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a one-byte unsigned integer.
This is a space optimization for pickling very small non-negative ints,
in range(256).
"""),
I(name='BININT2',
code='M',
arg=uint2,
stack_before=[],
stack_after=[pyint],
proto=1,
doc="""Push a two-byte unsigned integer.
This is a space optimization for pickling small positive ints, in
range(256, 2**16). Integers in range(256) can also be pickled via
BININT2, but BININT1 instead saves a byte.
"""),
I(name='LONG',
code='L',
arg=decimalnl_long,
stack_before=[],
stack_after=[pyint],
proto=0,
doc="""Push a long integer.
The same as INT, except that the literal ends with 'L', and always
unpickles to a Python long. There doesn't seem a real purpose to the
trailing 'L'.
Note that LONG takes time quadratic in the number of digits when
unpickling (this is simply due to the nature of decimal->binary
conversion). Proto 2 added linear-time (in C; still quadratic-time
in Python) LONG1 and LONG4 opcodes.
"""),
I(name="LONG1",
code='\x8a',
arg=long1,
stack_before=[],
stack_after=[pyint],
proto=2,
doc="""Long integer using one-byte length.
A more efficient encoding of a Python long; the long1 encoding
says it all."""),
I(name="LONG4",
code='\x8b',
arg=long4,
stack_before=[],
stack_after=[pyint],
proto=2,
doc="""Long integer using found-byte length.
A more efficient encoding of a Python long; the long4 encoding
says it all."""),
# Ways to spell strings (8-bit, not Unicode).
I(name='STRING',
code='S',
arg=stringnl,
stack_before=[],
stack_after=[pybytes_or_str],
proto=0,
doc="""Push a Python string object.
The argument is a repr-style string, with bracketing quote characters,
and perhaps embedded escapes. The argument extends until the next
newline character. These are usually decoded into a str instance
using the encoding given to the Unpickler constructor. or the default,
'ASCII'. If the encoding given was 'bytes' however, they will be
decoded as bytes object instead.
"""),
I(name='BINSTRING',
code='T',
arg=string4,
stack_before=[],
stack_after=[pybytes_or_str],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 4-byte little-endian
signed int giving the number of bytes in the string, and the
second is that many bytes, which are taken literally as the string
content. These are usually decoded into a str instance using the
encoding given to the Unpickler constructor. or the default,
'ASCII'. If the encoding given was 'bytes' however, they will be
decoded as bytes object instead.
"""),
I(name='SHORT_BINSTRING',
code='U',
arg=string1,
stack_before=[],
stack_after=[pybytes_or_str],
proto=1,
doc="""Push a Python string object.
There are two arguments: the first is a 1-byte unsigned int giving
the number of bytes in the string, and the second is that many
bytes, which are taken literally as the string content. These are
usually decoded into a str instance using the encoding given to
the Unpickler constructor. or the default, 'ASCII'. If the
encoding given was 'bytes' however, they will be decoded as bytes
object instead.
"""),
# Bytes (protocol 3 only; older protocols don't support bytes at all)
I(name='BINBYTES',
code='B',
arg=bytes4,
stack_before=[],
stack_after=[pybytes],
proto=3,
doc="""Push a Python bytes object.
There are two arguments: the first is a 4-byte little-endian unsigned int
giving the number of bytes, and the second is that many bytes, which are
taken literally as the bytes content.
"""),
I(name='SHORT_BINBYTES',
code='C',
arg=bytes1,
stack_before=[],
stack_after=[pybytes],
proto=3,
doc="""Push a Python bytes object.
There are two arguments: the first is a 1-byte unsigned int giving
the number of bytes, and the second is that many bytes, which are taken
literally as the string content.
"""),
I(name='BINBYTES8',
code='\x8e',
arg=bytes8,
stack_before=[],
stack_after=[pybytes],
proto=4,
doc="""Push a Python bytes object.
There are two arguments: the first is a 8-byte unsigned int giving
the number of bytes in the string, and the second is that many bytes,
which are taken literally as the string content.
"""),
# Ways to spell None.
I(name='NONE',
code='N',
arg=None,
stack_before=[],
stack_after=[pynone],
proto=0,
doc="Push None on the stack."),
# Ways to spell bools, starting with proto 2. See INT for how this was
# done before proto 2.
I(name='NEWTRUE',
code='\x88',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push True onto the stack."""),
I(name='NEWFALSE',
code='\x89',
arg=None,
stack_before=[],
stack_after=[pybool],
proto=2,
doc="""True.
Push False onto the stack."""),
# Ways to spell Unicode strings.
I(name='UNICODE',
code='V',
arg=unicodestringnl,
stack_before=[],
stack_after=[pyunicode],
proto=0, # this may be pure-text, but it's a later addition
doc="""Push a Python Unicode string object.
The argument is a raw-unicode-escape encoding of a Unicode string,
and so may contain embedded escape sequences. The argument extends
until the next newline character.
"""),
I(name='SHORT_BINUNICODE',
code='\x8c',
arg=unicodestring1,
stack_before=[],
stack_after=[pyunicode],
proto=4,
doc="""Push a Python Unicode string object.
There are two arguments: the first is a 1-byte little-endian signed int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
I(name='BINUNICODE',
code='X',
arg=unicodestring4,
stack_before=[],
stack_after=[pyunicode],
proto=1,
doc="""Push a Python Unicode string object.
There are two arguments: the first is a 4-byte little-endian unsigned int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
I(name='BINUNICODE8',
code='\x8d',
arg=unicodestring8,
stack_before=[],
stack_after=[pyunicode],
proto=4,
doc="""Push a Python Unicode string object.
There are two arguments: the first is a 8-byte little-endian signed int
giving the number of bytes in the string. The second is that many
bytes, and is the UTF-8 encoding of the Unicode string.
"""),
# Ways to spell floats.
I(name='FLOAT',
code='F',
arg=floatnl,
stack_before=[],
stack_after=[pyfloat],
proto=0,
doc="""Newline-terminated decimal float literal.
The argument is repr(a_float), and in general requires 17 significant
digits for roundtrip conversion to be an identity (this is so for
IEEE-754 double precision values, which is what Python float maps to
on most boxes).
In general, FLOAT cannot be used to transport infinities, NaNs, or
minus zero across boxes (or even on a single box, if the platform C
library can't read the strings it produces for such things -- Windows
is like that), but may do less damage than BINFLOAT on boxes with
greater precision or dynamic range than IEEE-754 double.
"""),
I(name='BINFLOAT',
code='G',
arg=float8,
stack_before=[],
stack_after=[pyfloat],
proto=1,
doc="""Float stored in binary form, with 8 bytes of data.
This generally requires less than half the space of FLOAT encoding.
In general, BINFLOAT cannot be used to transport infinities, NaNs, or
minus zero, raises an exception if the exponent exceeds the range of
an IEEE-754 double, and retains no more than 53 bits of precision (if
there are more than that, "add a half and chop" rounding is used to
cut it back to 53 significant bits).
"""),
# Ways to build lists.
I(name='EMPTY_LIST',
code=']',
arg=None,
stack_before=[],
stack_after=[pylist],
proto=1,
doc="Push an empty list."),
I(name='APPEND',
code='a',
arg=None,
stack_before=[pylist, anyobject],
stack_after=[pylist],
proto=0,
doc="""Append an object to a list.
Stack before: ... pylist anyobject
Stack after: ... pylist+[anyobject]
although pylist is really extended in-place.
"""),
I(name='APPENDS',
code='e',
arg=None,
stack_before=[pylist, markobject, stackslice],
stack_after=[pylist],
proto=1,
doc="""Extend a list by a slice of stack objects.
Stack before: ... pylist markobject stackslice
Stack after: ... pylist+stackslice
although pylist is really extended in-place.
"""),
I(name='LIST',
code='l',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pylist],
proto=0,
doc="""Build a list out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python list, which single list object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... [1, 2, 3, 'abc']
"""),
# Ways to build tuples.
I(name='EMPTY_TUPLE',
code=')',
arg=None,
stack_before=[],
stack_after=[pytuple],
proto=1,
doc="Push an empty tuple."),
I(name='TUPLE',
code='t',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pytuple],
proto=0,
doc="""Build a tuple out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python tuple, which single tuple object replaces all of the
stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... (1, 2, 3, 'abc')
"""),
I(name='TUPLE1',
code='\x85',
arg=None,
stack_before=[anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a one-tuple out of the topmost item on the stack.
This code pops one value off the stack and pushes a tuple of
length 1 whose one item is that value back onto it. In other
words:
stack[-1] = tuple(stack[-1:])
"""),
I(name='TUPLE2',
code='\x86',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a two-tuple out of the top two items on the stack.
This code pops two values off the stack and pushes a tuple of
length 2 whose items are those values back onto it. In other
words:
stack[-2:] = [tuple(stack[-2:])]
"""),
I(name='TUPLE3',
code='\x87',
arg=None,
stack_before=[anyobject, anyobject, anyobject],
stack_after=[pytuple],
proto=2,
doc="""Build a three-tuple out of the top three items on the stack.
This code pops three values off the stack and pushes a tuple of
length 3 whose items are those values back onto it. In other
words:
stack[-3:] = [tuple(stack[-3:])]
"""),
# Ways to build dicts.
I(name='EMPTY_DICT',
code='}',
arg=None,
stack_before=[],
stack_after=[pydict],
proto=1,
doc="Push an empty dict."),
I(name='DICT',
code='d',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pydict],
proto=0,
doc="""Build a dict out of the topmost stack slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python dict, which single dict object replaces all of the
stack from the topmost markobject onward. The stack slice alternates
key, value, key, value, .... For example,
Stack before: ... markobject 1 2 3 'abc'
Stack after: ... {1: 2, 3: 'abc'}
"""),
I(name='SETITEM',
code='s',
arg=None,
stack_before=[pydict, anyobject, anyobject],
stack_after=[pydict],
proto=0,
doc="""Add a key+value pair to an existing dict.
Stack before: ... pydict key value
Stack after: ... pydict
where pydict has been modified via pydict[key] = value.
"""),
I(name='SETITEMS',
code='u',
arg=None,
stack_before=[pydict, markobject, stackslice],
stack_after=[pydict],
proto=1,
doc="""Add an arbitrary number of key+value pairs to an existing dict.
The slice of the stack following the topmost markobject is taken as
an alternating sequence of keys and values, added to the dict
immediately under the topmost markobject. Everything at and after the
topmost markobject is popped, leaving the mutated dict at the top
of the stack.
Stack before: ... pydict markobject key_1 value_1 ... key_n value_n
Stack after: ... pydict
where pydict has been modified via pydict[key_i] = value_i for i in
1, 2, ..., n, and in that order.
"""),
# Ways to build sets
I(name='EMPTY_SET',
code='\x8f',
arg=None,
stack_before=[],
stack_after=[pyset],
proto=4,
doc="Push an empty set."),
I(name='ADDITEMS',
code='\x90',
arg=None,
stack_before=[pyset, markobject, stackslice],
stack_after=[pyset],
proto=4,
doc="""Add an arbitrary number of items to an existing set.
The slice of the stack following the topmost markobject is taken as
a sequence of items, added to the set immediately under the topmost
markobject. Everything at and after the topmost markobject is popped,
leaving the mutated set at the top of the stack.
Stack before: ... pyset markobject item_1 ... item_n
Stack after: ... pyset
where pyset has been modified via pyset.add(item_i) = item_i for i in
1, 2, ..., n, and in that order.
"""),
# Way to build frozensets
I(name='FROZENSET',
code='\x91',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[pyfrozenset],
proto=4,
doc="""Build a frozenset out of the topmost slice, after markobject.
All the stack entries following the topmost markobject are placed into
a single Python frozenset, which single frozenset object replaces all
of the stack from the topmost markobject onward. For example,
Stack before: ... markobject 1 2 3
Stack after: ... frozenset({1, 2, 3})
"""),
# Stack manipulation.
I(name='POP',
code='0',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="Discard the top stack item, shrinking the stack by one item."),
I(name='DUP',
code='2',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject, anyobject],
proto=0,
doc="Push the top stack item onto the stack again, duplicating it."),
I(name='MARK',
code='(',
arg=None,
stack_before=[],
stack_after=[markobject],
proto=0,
doc="""Push markobject onto the stack.
markobject is a unique object, used by other opcodes to identify a
region of the stack containing a variable number of objects for them
to work on. See markobject.doc for more detail.
"""),
I(name='POP_MARK',
code='1',
arg=None,
stack_before=[markobject, stackslice],
stack_after=[],
proto=1,
doc="""Pop all the stack objects at and above the topmost markobject.
When an opcode using a variable number of stack objects is done,
POP_MARK is used to remove those objects, and to remove the markobject
that delimited their starting position on the stack.
"""),
# Memo manipulation. There are really only two operations (get and put),
# each in all-text, "short binary", and "long binary" flavors.
I(name='GET',
code='g',
arg=decimalnl_short,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the newline-terminated
decimal string following. BINGET and LONG_BINGET are space-optimized
versions.
"""),
I(name='BINGET',
code='h',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 1-byte unsigned
integer following.
"""),
I(name='LONG_BINGET',
code='j',
arg=uint4,
stack_before=[],
stack_after=[anyobject],
proto=1,
doc="""Read an object from the memo and push it on the stack.
The index of the memo object to push is given by the 4-byte unsigned
little-endian integer following.
"""),
I(name='PUT',
code='p',
arg=decimalnl_short,
stack_before=[],
stack_after=[],
proto=0,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the newline-
terminated decimal string following. BINPUT and LONG_BINPUT are
space-optimized versions.
"""),
I(name='BINPUT',
code='q',
arg=uint1,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 1-byte
unsigned integer following.
"""),
I(name='LONG_BINPUT',
code='r',
arg=uint4,
stack_before=[],
stack_after=[],
proto=1,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write into is given by the 4-byte
unsigned little-endian integer following.
"""),
I(name='MEMOIZE',
code='\x94',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject],
proto=4,
doc="""Store the stack top into the memo. The stack is not popped.
The index of the memo location to write is the number of
elements currently present in the memo.
"""),
# Access the extension registry (predefined objects). Akin to the GET
# family.
I(name='EXT1',
code='\x82',
arg=uint1,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
This code and the similar EXT2 and EXT4 allow using a registry
of popular objects that are pickled by name, typically classes.
It is envisioned that through a global negotiation and
registration process, third parties can set up a mapping between
ints and object names.
In order to guarantee pickle interchangeability, the extension
code registry ought to be global, although a range of codes may
be reserved for private use.
EXT1 has a 1-byte integer argument. This is used to index into the
extension registry, and the object at that index is pushed on the stack.
"""),
I(name='EXT2',
code='\x83',
arg=uint2,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT2 has a two-byte integer argument.
"""),
I(name='EXT4',
code='\x84',
arg=int4,
stack_before=[],
stack_after=[anyobject],
proto=2,
doc="""Extension code.
See EXT1. EXT4 has a four-byte integer argument.
"""),
# Push a class object, or module function, on the stack, via its module
# and name.
I(name='GLOBAL',
code='c',
arg=stringnl_noescape_pair,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push a global object (module.attr) on the stack.
Two newline-terminated strings follow the GLOBAL opcode. The first is
taken as a module name, and the second as a class name. The class
object module.class is pushed on the stack. More accurately, the
object returned by self.find_class(module, class) is pushed on the
stack, so unpickling subclasses can override this form of lookup.
"""),
I(name='STACK_GLOBAL',
code='\x93',
arg=None,
stack_before=[pyunicode, pyunicode],
stack_after=[anyobject],
proto=0,
doc="""Push a global object (module.attr) on the stack.
"""),
# Ways to build objects of classes pickle doesn't know about directly
# (user-defined classes). I despair of documenting this accurately
# and comprehensibly -- you really have to read the pickle code to
# find all the special cases.
I(name='REDUCE',
code='R',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Push an object built from a callable and an argument tuple.
The opcode is named to remind of the __reduce__() method.
Stack before: ... callable pytuple
Stack after: ... callable(*pytuple)
The callable and the argument tuple are the first two items returned
by a __reduce__ method. Applying the callable to the argtuple is
supposed to reproduce the original object, or at least get it started.
If the __reduce__ method returns a 3-tuple, the last component is an
argument to be passed to the object's __setstate__, and then the REDUCE
opcode is followed by code to create setstate's argument, and then a
BUILD opcode to apply __setstate__ to that argument.
If not isinstance(callable, type), REDUCE complains unless the
callable has been registered with the copyreg module's
safe_constructors dict, or the callable has a magic
'__safe_for_unpickling__' attribute with a true value. I'm not sure
why it does this, but I've sure seen this complaint often enough when
I didn't want to <wink>.
"""),
I(name='BUILD',
code='b',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=0,
doc="""Finish building an object, via __setstate__ or dict update.
Stack before: ... anyobject argument
Stack after: ... anyobject
where anyobject may have been mutated, as follows:
If the object has a __setstate__ method,
anyobject.__setstate__(argument)
is called.
Else the argument must be a dict, the object must have a __dict__, and
the object is updated via
anyobject.__dict__.update(argument)
"""),
I(name='INST',
code='i',
arg=stringnl_noescape_pair,
stack_before=[markobject, stackslice],
stack_after=[anyobject],
proto=0,
doc="""Build a class instance.
This is the protocol 0 version of protocol 1's OBJ opcode.
INST is followed by two newline-terminated strings, giving a
module and class name, just as for the GLOBAL opcode (and see
GLOBAL for more details about that). self.find_class(module, name)
is used to get a class object.
In addition, all the objects on the stack following the topmost
markobject are gathered into a tuple and popped (along with the
topmost markobject), just as for the TUPLE opcode.
Now it gets complicated. If all of these are true:
+ The argtuple is empty (markobject was at the top of the stack
at the start).
+ The class object does not have a __getinitargs__ attribute.
then we want to create an old-style class instance without invoking
its __init__() method (pickle has waffled on this over the years; not
calling __init__() is current wisdom). In this case, an instance of
an old-style dummy class is created, and then we try to rebind its
__class__ attribute to the desired class object. If this succeeds,
the new instance object is pushed on the stack, and we're done.
Else (the argtuple is not empty, it's not an old-style class object,
or the class object does have a __getinitargs__ attribute), the code
first insists that the class object have a __safe_for_unpickling__
attribute. Unlike as for the __safe_for_unpickling__ check in REDUCE,
it doesn't matter whether this attribute has a true or false value, it
only matters whether it exists (XXX this is a bug). If
__safe_for_unpickling__ doesn't exist, UnpicklingError is raised.
Else (the class object does have a __safe_for_unpickling__ attr),
the class object obtained from INST's arguments is applied to the
argtuple obtained from the stack, and the resulting instance object
is pushed on the stack.
NOTE: checks for __safe_for_unpickling__ went away in Python 2.3.
NOTE: the distinction between old-style and new-style classes does
not make sense in Python 3.
"""),
I(name='OBJ',
code='o',
arg=None,
stack_before=[markobject, anyobject, stackslice],
stack_after=[anyobject],
proto=1,
doc="""Build a class instance.
This is the protocol 1 version of protocol 0's INST opcode, and is
very much like it. The major difference is that the class object
is taken off the stack, allowing it to be retrieved from the memo
repeatedly if several instances of the same class are created. This
can be much more efficient (in both time and space) than repeatedly
embedding the module and class names in INST opcodes.
Unlike INST, OBJ takes no arguments from the opcode stream. Instead
the class object is taken off the stack, immediately above the
topmost markobject:
Stack before: ... markobject classobject stackslice
Stack after: ... new_instance_object
As for INST, the remainder of the stack above the markobject is
gathered into an argument tuple, and then the logic seems identical,
except that no __safe_for_unpickling__ check is done (XXX this is
a bug). See INST for the gory details.
NOTE: In Python 2.3, INST and OBJ are identical except for how they
get the class object. That was always the intent; the implementations
had diverged for accidental reasons.
"""),
I(name='NEWOBJ',
code='\x81',
arg=None,
stack_before=[anyobject, anyobject],
stack_after=[anyobject],
proto=2,
doc="""Build an object instance.
The stack before should be thought of as containing a class
object followed by an argument tuple (the tuple being the stack
top). Call these cls and args. They are popped off the stack,
and the value returned by cls.__new__(cls, *args) is pushed back
onto the stack.
"""),
I(name='NEWOBJ_EX',
code='\x92',
arg=None,
stack_before=[anyobject, anyobject, anyobject],
stack_after=[anyobject],
proto=4,
doc="""Build an object instance.
The stack before should be thought of as containing a class
object followed by an argument tuple and by a keyword argument dict
(the dict being the stack top). Call these cls and args. They are
popped off the stack, and the value returned by
cls.__new__(cls, *args, *kwargs) is pushed back onto the stack.
"""),
# Machine control.
I(name='PROTO',
code='\x80',
arg=uint1,
stack_before=[],
stack_after=[],
proto=2,
doc="""Protocol version indicator.
For protocol 2 and above, a pickle must start with this opcode.
The argument is the protocol version, an int in range(2, 256).
"""),
I(name='STOP',
code='.',
arg=None,
stack_before=[anyobject],
stack_after=[],
proto=0,
doc="""Stop the unpickling machine.
Every pickle ends with this opcode. The object at the top of the stack
is popped, and that's the result of unpickling. The stack should be
empty then.
"""),
# Framing support.
I(name='FRAME',
code='\x95',
arg=uint8,
stack_before=[],
stack_after=[],
proto=4,
doc="""Indicate the beginning of a new frame.
The unpickler may use this opcode to safely prefetch data from its
underlying stream.
"""),
# Ways to deal with persistent IDs.
I(name='PERSID',
code='P',
arg=stringnl_noescape,
stack_before=[],
stack_after=[anyobject],
proto=0,
doc="""Push an object identified by a persistent ID.
The pickle module doesn't define what a persistent ID means. PERSID's
argument is a newline-terminated str-style (no embedded escapes, no
bracketing quote characters) string, which *is* "the persistent ID".
The unpickler passes this string to self.persistent_load(). Whatever
object that returns is pushed on the stack. There is no implementation
of persistent_load() in Python's unpickler: it must be supplied by an
unpickler subclass.
"""),
I(name='BINPERSID',
code='Q',
arg=None,
stack_before=[anyobject],
stack_after=[anyobject],
proto=1,
doc="""Push an object identified by a persistent ID.
Like PERSID, except the persistent ID is popped off the stack (instead
of being a string embedded in the opcode bytestream). The persistent
ID is passed to self.persistent_load(), and whatever object that
returns is pushed on the stack. See PERSID for more detail.
"""),
]
del I
# Verify uniqueness of .name and .code members.
name2i = {}
code2i = {}
for i, d in enumerate(opcodes):
if d.name in name2i:
raise ValueError("repeated name %r at indices %d and %d" %
(d.name, name2i[d.name], i))
if d.code in code2i:
raise ValueError("repeated code %r at indices %d and %d" %
(d.code, code2i[d.code], i))
name2i[d.name] = i
code2i[d.code] = i
del name2i, code2i, i, d
##############################################################################
# Build a code2op dict, mapping opcode characters to OpcodeInfo records.
# Also ensure we've got the same stuff as pickle.py, although the
# introspection here is dicey.
code2op = {}
for d in opcodes:
code2op[d.code] = d
del d
def assure_pickle_consistency(verbose=False):
copy = code2op.copy()
for name in pickle.__all__:
if not re.match("[A-Z][A-Z0-9_]+$", name):
if verbose:
print("skipping %r: it doesn't look like an opcode name" % name)
continue
picklecode = getattr(pickle, name)
if not isinstance(picklecode, bytes) or len(picklecode) != 1:
if verbose:
print(("skipping %r: value %r doesn't look like a pickle "
"code" % (name, picklecode)))
continue
picklecode = picklecode.decode("latin-1")
if picklecode in copy:
if verbose:
print("checking name %r w/ code %r for consistency" % (
name, picklecode))
d = copy[picklecode]
if d.name != name:
raise ValueError("for pickle code %r, pickle.py uses name %r "
"but we're using name %r" % (picklecode,
name,
d.name))
# Forget this one. Any left over in copy at the end are a problem
# of a different kind.
del copy[picklecode]
else:
raise ValueError("pickle.py appears to have a pickle opcode with "
"name %r and code %r, but we don't" %
(name, picklecode))
if copy:
msg = ["we appear to have pickle opcodes that pickle.py doesn't have:"]
for code, d in copy.items():
msg.append(" name %r with code %r" % (d.name, code))
raise ValueError("\n".join(msg))
assure_pickle_consistency()
del assure_pickle_consistency
##############################################################################
# A pickle opcode generator.
def _genops(data, yield_end_pos=False):
if isinstance(data, bytes_types):
data = io.BytesIO(data)
if hasattr(data, "tell"):
getpos = data.tell
else:
getpos = lambda: None
while True:
pos = getpos()
code = data.read(1)
opcode = code2op.get(code.decode("latin-1"))
if opcode is None:
if code == b"":
raise ValueError("pickle exhausted before seeing STOP")
else:
raise ValueError("at position %s, opcode %r unknown" % (
"<unknown>" if pos is None else pos,
code))
if opcode.arg is None:
arg = None
else:
arg = opcode.arg.reader(data)
if yield_end_pos:
yield opcode, arg, pos, getpos()
else:
yield opcode, arg, pos
if code == b'.':
assert opcode.name == 'STOP'
break
def genops(pickle):
"""Generate all the opcodes in a pickle.
'pickle' is a file-like object, or string, containing the pickle.
Each opcode in the pickle is generated, from the current pickle position,
stopping after a STOP opcode is delivered. A triple is generated for
each opcode:
opcode, arg, pos
opcode is an OpcodeInfo record, describing the current opcode.
If the opcode has an argument embedded in the pickle, arg is its decoded
value, as a Python object. If the opcode doesn't have an argument, arg
is None.
If the pickle has a tell() method, pos was the value of pickle.tell()
before reading the current opcode. If the pickle is a bytes object,
it's wrapped in a BytesIO object, and the latter's tell() result is
used. Else (the pickle doesn't have a tell(), and it's not obvious how
to query its current position) pos is None.
"""
return _genops(pickle)
##############################################################################
# A pickle optimizer.
def optimize(p):
'Optimize a pickle string by removing unused PUT opcodes'
not_a_put = object()
gets = { not_a_put } # set of args used by a GET opcode
opcodes = [] # (startpos, stoppos, putid)
proto = 0
for opcode, arg, pos, end_pos in _genops(p, yield_end_pos=True):
if 'PUT' in opcode.name:
opcodes.append((pos, end_pos, arg))
elif 'FRAME' in opcode.name:
pass
else:
if 'GET' in opcode.name:
gets.add(arg)
elif opcode.name == 'PROTO':
assert pos == 0, pos
proto = arg
opcodes.append((pos, end_pos, not_a_put))
prevpos, prevarg = pos, None
# Copy the opcodes except for PUTS without a corresponding GET
out = io.BytesIO()
opcodes = iter(opcodes)
if proto >= 2:
# Write the PROTO header before any framing
start, stop, _ = next(opcodes)
out.write(p[start:stop])
buf = pickle._Framer(out.write)
if proto >= 4:
buf.start_framing()
for start, stop, putid in opcodes:
if putid in gets:
buf.commit_frame()
buf.write(p[start:stop])
if proto >= 4:
buf.end_framing()
return out.getvalue()
##############################################################################
# A symbolic pickle disassembler.
def dis(pickle, out=None, memo=None, indentlevel=4, annotate=0):
"""Produce a symbolic disassembly of a pickle.
'pickle' is a file-like object, or string, containing a (at least one)
pickle. The pickle is disassembled from the current position, through
the first STOP opcode encountered.
Optional arg 'out' is a file-like object to which the disassembly is
printed. It defaults to sys.stdout.
Optional arg 'memo' is a Python dict, used as the pickle's memo. It
may be mutated by dis(), if the pickle contains PUT or BINPUT opcodes.
Passing the same memo object to another dis() call then allows disassembly
to proceed across multiple pickles that were all created by the same
pickler with the same memo. Ordinarily you don't need to worry about this.
Optional arg 'indentlevel' is the number of blanks by which to indent
a new MARK level. It defaults to 4.
Optional arg 'annotate' if nonzero instructs dis() to add short
description of the opcode on each line of disassembled output.
The value given to 'annotate' must be an integer and is used as a
hint for the column where annotation should start. The default
value is 0, meaning no annotations.
In addition to printing the disassembly, some sanity checks are made:
+ All embedded opcode arguments "make sense".
+ Explicit and implicit pop operations have enough items on the stack.
+ When an opcode implicitly refers to a markobject, a markobject is
actually on the stack.
+ A memo entry isn't referenced before it's defined.
+ The markobject isn't stored in the memo.
+ A memo entry isn't redefined.
"""
# Most of the hair here is for sanity checks, but most of it is needed
# anyway to detect when a protocol 0 POP takes a MARK off the stack
# (which in turn is needed to indent MARK blocks correctly).
stack = [] # crude emulation of unpickler stack
if memo is None:
memo = {} # crude emulation of unpickler memo
maxproto = -1 # max protocol number seen
markstack = [] # bytecode positions of MARK opcodes
indentchunk = ' ' * indentlevel
errormsg = None
annocol = annotate # column hint for annotations
for opcode, arg, pos in genops(pickle):
if pos is not None:
print("%5d:" % pos, end=' ', file=out)
line = "%-4s %s%s" % (repr(opcode.code)[1:-1],
indentchunk * len(markstack),
opcode.name)
maxproto = max(maxproto, opcode.proto)
before = opcode.stack_before # don't mutate
after = opcode.stack_after # don't mutate
numtopop = len(before)
# See whether a MARK should be popped.
markmsg = None
if markobject in before or (opcode.name == "POP" and
stack and
stack[-1] is markobject):
assert markobject not in after
if __debug__:
if markobject in before:
assert before[-1] is stackslice
if markstack:
markpos = markstack.pop()
if markpos is None:
markmsg = "(MARK at unknown opcode offset)"
else:
markmsg = "(MARK at %d)" % markpos
# Pop everything at and after the topmost markobject.
while stack[-1] is not markobject:
stack.pop()
stack.pop()
# Stop later code from popping too much.
try:
numtopop = before.index(markobject)
except ValueError:
assert opcode.name == "POP"
numtopop = 0
else:
errormsg = markmsg = "no MARK exists on stack"
# Check for correct memo usage.
if opcode.name in ("PUT", "BINPUT", "LONG_BINPUT", "MEMOIZE"):
if opcode.name == "MEMOIZE":
memo_idx = len(memo)
else:
assert arg is not None
memo_idx = arg
if memo_idx in memo:
errormsg = "memo key %r already defined" % arg
elif not stack:
errormsg = "stack is empty -- can't store into memo"
elif stack[-1] is markobject:
errormsg = "can't store markobject in the memo"
else:
memo[memo_idx] = stack[-1]
elif opcode.name in ("GET", "BINGET", "LONG_BINGET"):
if arg in memo:
assert len(after) == 1
after = [memo[arg]] # for better stack emulation
else:
errormsg = "memo key %r has never been stored into" % arg
if arg is not None or markmsg:
# make a mild effort to align arguments
line += ' ' * (10 - len(opcode.name))
if arg is not None:
line += ' ' + repr(arg)
if markmsg:
line += ' ' + markmsg
if annotate:
line += ' ' * (annocol - len(line))
# make a mild effort to align annotations
annocol = len(line)
if annocol > 50:
annocol = annotate
line += ' ' + opcode.doc.split('\n', 1)[0]
print(line, file=out)
if errormsg:
# Note that we delayed complaining until the offending opcode
# was printed.
raise ValueError(errormsg)
# Emulate the stack effects.
if len(stack) < numtopop:
raise ValueError("tries to pop %d items from stack with "
"only %d items" % (numtopop, len(stack)))
if numtopop:
del stack[-numtopop:]
if markobject in after:
assert markobject not in before
markstack.append(pos)
stack.extend(after)
print("highest protocol among opcodes =", maxproto, file=out)
if stack:
raise ValueError("stack not empty after STOP: %r" % stack)
# For use in the doctest, simply as an example of a class to pickle.
class _Example:
def __init__(self, value):
self.value = value
_dis_test = r"""
>>> import pickle
>>> x = [1, 2, (3, 4), {b'abc': "def"}]
>>> pkl0 = pickle.dumps(x, 0)
>>> dis(pkl0)
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: L LONG 1
9: a APPEND
10: L LONG 2
14: a APPEND
15: ( MARK
16: L LONG 3
20: L LONG 4
24: t TUPLE (MARK at 15)
25: p PUT 1
28: a APPEND
29: ( MARK
30: d DICT (MARK at 29)
31: p PUT 2
34: c GLOBAL '_codecs encode'
50: p PUT 3
53: ( MARK
54: V UNICODE 'abc'
59: p PUT 4
62: V UNICODE 'latin1'
70: p PUT 5
73: t TUPLE (MARK at 53)
74: p PUT 6
77: R REDUCE
78: p PUT 7
81: V UNICODE 'def'
86: p PUT 8
89: s SETITEM
90: a APPEND
91: . STOP
highest protocol among opcodes = 0
Try again with a "binary" pickle.
>>> pkl1 = pickle.dumps(x, 1)
>>> dis(pkl1)
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: K BININT1 1
6: K BININT1 2
8: ( MARK
9: K BININT1 3
11: K BININT1 4
13: t TUPLE (MARK at 8)
14: q BINPUT 1
16: } EMPTY_DICT
17: q BINPUT 2
19: c GLOBAL '_codecs encode'
35: q BINPUT 3
37: ( MARK
38: X BINUNICODE 'abc'
46: q BINPUT 4
48: X BINUNICODE 'latin1'
59: q BINPUT 5
61: t TUPLE (MARK at 37)
62: q BINPUT 6
64: R REDUCE
65: q BINPUT 7
67: X BINUNICODE 'def'
75: q BINPUT 8
77: s SETITEM
78: e APPENDS (MARK at 3)
79: . STOP
highest protocol among opcodes = 1
Exercise the INST/OBJ/BUILD family.
>>> import pickletools
>>> dis(pickle.dumps(pickletools.dis, 0))
0: c GLOBAL 'pickletools dis'
17: p PUT 0
20: . STOP
highest protocol among opcodes = 0
>>> from pickletools import _Example
>>> x = [_Example(42)] * 2
>>> dis(pickle.dumps(x, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: c GLOBAL 'copy_reg _reconstructor'
30: p PUT 1
33: ( MARK
34: c GLOBAL 'pickletools _Example'
56: p PUT 2
59: c GLOBAL '__builtin__ object'
79: p PUT 3
82: N NONE
83: t TUPLE (MARK at 33)
84: p PUT 4
87: R REDUCE
88: p PUT 5
91: ( MARK
92: d DICT (MARK at 91)
93: p PUT 6
96: V UNICODE 'value'
103: p PUT 7
106: L LONG 42
111: s SETITEM
112: b BUILD
113: a APPEND
114: g GET 5
117: a APPEND
118: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(x, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: c GLOBAL 'copy_reg _reconstructor'
29: q BINPUT 1
31: ( MARK
32: c GLOBAL 'pickletools _Example'
54: q BINPUT 2
56: c GLOBAL '__builtin__ object'
76: q BINPUT 3
78: N NONE
79: t TUPLE (MARK at 31)
80: q BINPUT 4
82: R REDUCE
83: q BINPUT 5
85: } EMPTY_DICT
86: q BINPUT 6
88: X BINUNICODE 'value'
98: q BINPUT 7
100: K BININT1 42
102: s SETITEM
103: b BUILD
104: h BINGET 5
106: e APPENDS (MARK at 3)
107: . STOP
highest protocol among opcodes = 1
Try "the canonical" recursive-object test.
>>> L = []
>>> T = L,
>>> L.append(T)
>>> L[0] is T
True
>>> T[0] is L
True
>>> L[0][0] is L
True
>>> T[0][0] is T
True
>>> dis(pickle.dumps(L, 0))
0: ( MARK
1: l LIST (MARK at 0)
2: p PUT 0
5: ( MARK
6: g GET 0
9: t TUPLE (MARK at 5)
10: p PUT 1
13: a APPEND
14: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(L, 1))
0: ] EMPTY_LIST
1: q BINPUT 0
3: ( MARK
4: h BINGET 0
6: t TUPLE (MARK at 3)
7: q BINPUT 1
9: a APPEND
10: . STOP
highest protocol among opcodes = 1
Note that, in the protocol 0 pickle of the recursive tuple, the disassembler
has to emulate the stack in order to realize that the POP opcode at 16 gets
rid of the MARK at 0.
>>> dis(pickle.dumps(T, 0))
0: ( MARK
1: ( MARK
2: l LIST (MARK at 1)
3: p PUT 0
6: ( MARK
7: g GET 0
10: t TUPLE (MARK at 6)
11: p PUT 1
14: a APPEND
15: 0 POP
16: 0 POP (MARK at 0)
17: g GET 1
20: . STOP
highest protocol among opcodes = 0
>>> dis(pickle.dumps(T, 1))
0: ( MARK
1: ] EMPTY_LIST
2: q BINPUT 0
4: ( MARK
5: h BINGET 0
7: t TUPLE (MARK at 4)
8: q BINPUT 1
10: a APPEND
11: 1 POP_MARK (MARK at 0)
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 1
Try protocol 2.
>>> dis(pickle.dumps(L, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: . STOP
highest protocol among opcodes = 2
>>> dis(pickle.dumps(T, 2))
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: h BINGET 0
7: \x85 TUPLE1
8: q BINPUT 1
10: a APPEND
11: 0 POP
12: h BINGET 1
14: . STOP
highest protocol among opcodes = 2
Try protocol 3 with annotations:
>>> dis(pickle.dumps(T, 3), annotate=1)
0: \x80 PROTO 3 Protocol version indicator.
2: ] EMPTY_LIST Push an empty list.
3: q BINPUT 0 Store the stack top into the memo. The stack is not popped.
5: h BINGET 0 Read an object from the memo and push it on the stack.
7: \x85 TUPLE1 Build a one-tuple out of the topmost item on the stack.
8: q BINPUT 1 Store the stack top into the memo. The stack is not popped.
10: a APPEND Append an object to a list.
11: 0 POP Discard the top stack item, shrinking the stack by one item.
12: h BINGET 1 Read an object from the memo and push it on the stack.
14: . STOP Stop the unpickling machine.
highest protocol among opcodes = 2
"""
_memo_test = r"""
>>> import pickle
>>> import io
>>> f = io.BytesIO()
>>> p = pickle.Pickler(f, 2)
>>> x = [1, 2, 3]
>>> p.dump(x)
>>> p.dump(x)
>>> f.seek(0)
0
>>> memo = {}
>>> dis(f, memo=memo)
0: \x80 PROTO 2
2: ] EMPTY_LIST
3: q BINPUT 0
5: ( MARK
6: K BININT1 1
8: K BININT1 2
10: K BININT1 3
12: e APPENDS (MARK at 5)
13: . STOP
highest protocol among opcodes = 2
>>> dis(f, memo=memo)
14: \x80 PROTO 2
16: h BINGET 0
18: . STOP
highest protocol among opcodes = 2
"""
__test__ = {'disassembler_test': _dis_test,
'disassembler_memo_test': _memo_test,
}
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
import sys, argparse
parser = argparse.ArgumentParser(
description='disassemble one or more pickle files')
parser.add_argument(
'pickle_file', type=argparse.FileType('br'),
nargs='*', help='the pickle file')
parser.add_argument(
'-o', '--output', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the output should be written')
parser.add_argument(
'-m', '--memo', action='store_true',
help='preserve memo between disassemblies')
parser.add_argument(
'-l', '--indentlevel', default=4, type=int,
help='the number of blanks by which to indent a new MARK level')
parser.add_argument(
'-a', '--annotate', action='store_true',
help='annotate each line with a short opcode description')
parser.add_argument(
'-p', '--preamble', default="==> {name} <==",
help='if more than one pickle file is specified, print this before'
' each disassembly')
parser.add_argument(
'-t', '--test', action='store_true',
help='run self-test suite')
parser.add_argument(
'-v', action='store_true',
help='run verbosely; only affects self-test run')
args = parser.parse_args()
if args.test:
_test()
else:
annotate = 30 if args.annotate else 0
if not args.pickle_file:
parser.print_help()
elif len(args.pickle_file) == 1:
dis(args.pickle_file[0], args.output, None,
args.indentlevel, annotate)
else:
memo = {} if args.memo else None
for f in args.pickle_file:
preamble = args.preamble.format(name=f.name)
args.output.write(preamble + '\n')
dis(f, args.output, memo, args.indentlevel, annotate)
| lgpl-3.0 |
niteoweb/libcloud | libcloud/compute/drivers/ecs.py | 5 | 56343 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Node driver for Aliyun.
"""
try:
import simplejson as json
except ImportError:
import json
import time
from libcloud.common.aliyun import AliyunXmlResponse, SignedAliyunConnection
from libcloud.common.types import LibcloudError
from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeSize, \
StorageVolume, VolumeSnapshot, NodeLocation
from libcloud.compute.types import NodeState, StorageVolumeState, \
VolumeSnapshotState
from libcloud.utils.py3 import _real_unicode as u
from libcloud.utils.xml import findall, findattr, findtext
__all__ = [
'DiskCategory',
'InternetChargeType',
'ECS_API_VERSION',
'ECSDriver',
'ECSSecurityGroup',
'ECSZone'
]
ECS_API_VERSION = '2014-05-26'
ECS_API_ENDPOINT = 'ecs.aliyuncs.com'
DEFAULT_SIGNATURE_VERSION = '1.0'
def _parse_bool(value):
if isinstance(value, bool):
return value
if u(value).lower() == 'true':
return True
return False
"""
Define the extra dictionary for specific resources
"""
RESOURCE_EXTRA_ATTRIBUTES_MAP = {
'node': {
'description': {
'xpath': 'Description',
'transform_func': u
},
'image_id': {
'xpath': 'ImageId',
'transform_func': u
},
'zone_id': {
'xpath': 'ZoneId',
'transform_func': u
},
'instance_type': {
'xpath': 'InstanceType',
'transform_func': u
},
'instance_type_family': {
'xpath': 'InstanceTypeFamily',
'transform_func': u
},
'hostname': {
'xpath': 'HostName',
'transform_func': u
},
'serial_number': {
'xpath': 'SerialNumber',
'transform_func': u
},
'internet_charge_type': {
'xpath': 'InternetChargeType',
'transform_func': u
},
'creation_time': {
'xpath': 'CreationTime',
'transform_func': u
},
'instance_network_type': {
'xpath': 'InstanceNetworkType',
'transform_func': u
},
'instance_charge_type': {
'xpath': 'InstanceChargeType',
'transform_func': u
},
'device_available': {
'xpath': 'DeviceAvailable',
'transform_func': u
},
'io_optimized': {
'xpath': 'IoOptimized',
'transform_func': u
},
'expired_time': {
'xpath': 'ExpiredTime',
'transform_func': u
}
},
'vpc_attributes': {
'vpc_id': {
'xpath': 'VpcId',
'transform_func': u
},
'vswitch_id': {
'xpath': 'VSwitchId',
'transform_func': u
},
'private_ip_address': {
'xpath': 'PrivateIpAddress/IpAddress',
'transform_func': u
},
'nat_ip_address': {
'xpath': 'NatIpAddress',
'transform_func': u
}
},
'eip_address_associate': {
'allocation_id': {
'xpath': 'AllocationId',
'transform_func': u
},
'ip_address': {
'xpath': 'IpAddress',
'transform_func': u
},
'bandwidth': {
'xpath': 'Bandwidth',
'transform_func': int
},
'internet_charge_type': {
'xpath': 'InternetChargeType',
'transform_func': u
}
},
'operation_locks': {
'lock_reason': {
'xpath': 'LockReason',
'transform_func': u
}
},
'volume': {
'region_id': {
'xpath': 'RegionId',
'transform_func': u
},
'zone_id': {
'xpath': 'ZoneId',
'transform_func': u
},
'description': {
'xpath': 'Description',
'transform_func': u
},
'type': {
'xpath': 'Type',
'transform_func': u
},
'category': {
'xpath': 'Category',
'transform_func': u
},
'image_id': {
'xpath': 'ImageId',
'transform_func': u
},
'source_snapshot_id': {
'xpath': 'SourceSnapshotId',
'transform_func': u
},
'product_code': {
'xpath': 'ProductCode',
'transform_func': u
},
'portable': {
'xpath': 'Portable',
'transform_func': _parse_bool
},
'instance_id': {
'xpath': 'InstanceId',
'transform_func': u
},
'device': {
'xpath': 'Device',
'transform_func': u
},
'delete_with_instance': {
'xpath': 'DeleteWithInstance',
'transform_func': _parse_bool
},
'enable_auto_snapshot': {
'xpath': 'EnableAutoSnapshot',
'transform_func': _parse_bool
},
'creation_time': {
'xpath': 'CreationTime',
'transform_func': u
},
'attached_time': {
'xpath': 'AttachedTime',
'transform_func': u
},
'detached_time': {
'xpath': 'DetachedTime',
'transform_func': u
},
'disk_charge_type': {
'xpath': 'DiskChargeType',
'transform_func': u
}
},
'snapshot': {
'snapshot_name': {
'xpath': 'SnapshotName',
'transform_func': u
},
'description': {
'xpath': 'Description',
'transform_func': u
},
'progress': {
'xpath': 'Progress',
'transform_func': u
},
'source_disk_id': {
'xpath': 'SourceDiskId',
'transform_func': u
},
'source_disk_size': {
'xpath': 'SourceDiskSize',
'transform_func': int
},
'source_disk_type': {
'xpath': 'SourceDiskType',
'transform_func': u
},
'product_code': {
'xpath': 'ProductCode',
'transform_func': u
},
'usage': {
'xpath': 'Usage',
'transform_func': u
}
},
'image': {
'image_version': {
'xpath': 'ImageVersion',
'transform_func': u
},
'os_type': {
'xpath': 'OSType',
'transform_func': u
},
'platform': {
'xpath': 'Platform',
'transform_func': u
},
'architecture': {
'xpath': 'Architecture',
'transform_func': u
},
'description': {
'xpath': 'Description',
'transform_func': u
},
'size': {
'xpath': 'Size',
'transform_func': int
},
'image_owner_alias': {
'xpath': 'ImageOwnerAlias',
'transform_func': u
},
'os_name': {
'xpath': 'OSName',
'transform_func': u
},
'product_code': {
'xpath': 'ProductCode',
'transform_func': u
},
'is_subscribed': {
'xpath': 'IsSubscribed',
'transform_func': _parse_bool
},
'progress': {
'xpath': 'Progress',
'transform_func': u
},
'creation_time': {
'xpath': 'CreationTime',
'transform_func': u
},
'usage': {
'xpath': 'Usage',
'transform_func': u
},
'is_copied': {
'xpath': 'IsCopied',
'transform_func': _parse_bool
}
},
'disk_device_mapping': {
'snapshot_id': {
'xpath': 'SnapshotId',
'transform_func': u
},
'size': {
'xpath': 'Size',
'transform_func': int
},
'device': {
'xpath': 'Device',
'transform_func': u
},
'format': {
'xpath': 'Format',
'transform_func': u
},
'import_oss_bucket': {
'xpath': 'ImportOSSBucket',
'transform_func': u
},
'import_oss_object': {
'xpath': 'ImportOSSObject',
'transform_func': u
}
}
}
class ECSConnection(SignedAliyunConnection):
"""
Represents a single connection to the Aliyun ECS Endpoint.
"""
version = ECS_API_VERSION
host = ECS_API_ENDPOINT
responseCls = AliyunXmlResponse
service_name = 'ecs'
class ECSSecurityGroup(object):
"""
Security group used to control nodes internet and intranet accessibility.
"""
def __init__(self, id, name, description=None, driver=None, vpc_id=None,
creation_time=None):
self.id = id
self.name = name
self.description = description
self.driver = driver
self.vpc_id = vpc_id
self.creation_time = creation_time
def __repr__(self):
return ('<ECSSecurityGroup: id=%s, name=%s, driver=%s ...>' %
(self.id, self.name, self.driver.name))
class ECSZone(object):
"""
ECSZone used to represent an availability zone in a region.
"""
def __init__(self, id, name, driver=None,
available_resource_types=None,
available_instance_types=None,
available_disk_categories=None):
self.id = id
self.name = name
self.driver = driver
self.available_resource_types = available_resource_types
self.available_instance_types = available_instance_types
self.available_disk_categories = available_disk_categories
def __repr__(self):
return ('<ECSZone: id=%s, name=%s, driver=%s>' %
(self.id, self.name, self.driver))
class InternetChargeType(object):
"""
Internet connection billing types for Aliyun Nodes.
"""
BY_BANDWIDTH = 'PayByBandwidth'
BY_TRAFFIC = 'PayByTraffic'
class DiskCategory(object):
"""
Enum defined disk types supported by Aliyun system and data disks.
"""
CLOUD = 'cloud'
CLOUD_EFFICIENCY = 'cloud_efficiency'
CLOUD_SSD = 'cloud_ssd'
EPHEMERAL_SSD = 'ephemeral_ssd'
class Pagination(object):
"""
Pagination used to describe the multiple pages results.
"""
def __init__(self, total, size, current):
"""
Create a pagination.
:param total: the total count of the results
:param size: the page size of each page
:param current: the current page number, 1-based
"""
self.total = total
self.size = size
self.current = current
def next(self):
"""
Switch to the next page.
:return: the new pagination or None when no more page
:rtype: ``Pagination``
"""
if self.total is None or (self.size * self.current >= self.total):
return None
self.current += 1
return self
def to_dict(self):
return {'PageNumber': self.current,
'PageSize': self.size}
def __repr__(self):
return ('<Pagination total=%d, size=%d, current page=%d>' %
(self.total, self.size, self.current))
class ECSDriver(NodeDriver):
"""
Aliyun ECS node driver.
Used for Aliyun ECS service.
TODO:
Create public IP address
Get guest OS root password
Adjust internet bandwidth settings
Manage security groups and rules
"""
name = 'Aliyun ECS'
website = 'https://www.aliyun.com/product/ecs'
connectionCls = ECSConnection
features = {'create_node': ['password']}
namespace = None
path = '/'
internet_charge_types = InternetChargeType
disk_categories = DiskCategory
NODE_STATE_MAPPING = {
'Starting': NodeState.PENDING,
'Running': NodeState.RUNNING,
'Stopping': NodeState.PENDING,
'Stopped': NodeState.STOPPED
}
VOLUME_STATE_MAPPING = {
'In_use': StorageVolumeState.INUSE,
'Available': StorageVolumeState.AVAILABLE,
'Attaching': StorageVolumeState.ATTACHING,
'Detaching': StorageVolumeState.INUSE,
'Creating': StorageVolumeState.CREATING,
'ReIniting': StorageVolumeState.CREATING}
SNAPSHOT_STATE_MAPPING = {
'progressing': VolumeSnapshotState.CREATING,
'accomplished': VolumeSnapshotState.AVAILABLE,
'failed': VolumeSnapshotState.ERROR}
def list_nodes(self, ex_node_ids=None, ex_filters=None):
"""
List all nodes.
@inherits: :class:`NodeDriver.create_node`
:keyword ex_node_ids: a list of node's ids used to filter nodes.
Only the nodes which's id in this list
will be returned.
:type ex_node_ids: ``list`` of ``str``
:keyword ex_filters: node attribute and value pairs to filter nodes.
Only the nodes which matchs all the pairs will
be returned.
If the filter attribute need a json array value,
use ``list`` object, the driver will convert it.
:type ex_filters: ``dict``
"""
params = {'Action': 'DescribeInstances',
'RegionId': self.region}
if ex_node_ids:
if isinstance(ex_node_ids, list):
params['InstanceIds'] = self._list_to_json_array(ex_node_ids)
else:
raise AttributeError('ex_node_ids should be a list of '
'node ids.')
if ex_filters:
if isinstance(ex_filters, dict):
params.update(ex_filters)
else:
raise AttributeError('ex_filters should be a dict of '
'node attributes.')
nodes = self._request_multiple_pages(self.path, params,
self._to_nodes)
return nodes
def list_sizes(self, location=None):
params = {'Action': 'DescribeInstanceTypes'}
resp_body = self.connection.request(self.path, params).object
size_elements = findall(resp_body, 'InstanceTypes/InstanceType',
namespace=self.namespace)
sizes = [self._to_size(each) for each in size_elements]
return sizes
def list_locations(self):
params = {'Action': 'DescribeRegions'}
resp_body = self.connection.request(self.path, params).object
location_elements = findall(resp_body, 'Regions/Region',
namespace=self.namespace)
locations = [self._to_location(each) for each in location_elements]
return locations
def create_node(self, name, size, image, auth=None,
ex_security_group_id=None, ex_description=None,
ex_internet_charge_type=None,
ex_internet_max_bandwidth_out=None,
ex_internet_max_bandwidth_in=None,
ex_hostname=None, ex_io_optimized=None,
ex_system_disk=None, ex_data_disks=None,
ex_vswitch_id=None, ex_private_ip_address=None,
ex_client_token=None, **kwargs):
"""
@inherits: :class:`NodeDriver.create_node`
:param name: The name for this new node (required)
:type name: ``str``
:param image: The image to use when creating this node (required)
:type image: `NodeImage`
:param size: The size of the node to create (required)
:type size: `NodeSize`
:keyword auth: Initial authentication information for the node
(optional)
:type auth: :class:`NodeAuthSSHKey` or :class:`NodeAuthPassword`
:keyword ex_security_group_id: The id of the security group the
new created node is attached to.
(required)
:type ex_security_group_id: ``str``
:keyword ex_description: A description string for this node (optional)
:type ex_description: ``str``
:keyword ex_internet_charge_type: The internet charge type (optional)
:type ex_internet_charge_type: a ``str`` of 'PayByTraffic'
or 'PayByBandwidth'
:keyword ex_internet_max_bandwidth_out: The max output bandwidth,
in Mbps (optional)
Required for 'PayByTraffic'
internet charge type
:type ex_internet_max_bandwidth_out: a ``int`` in range [0, 100]
a ``int`` in range [1, 100] for
'PayByTraffic' internet charge
type
:keyword ex_internet_max_bandwidth_in: The max input bandwidth,
in Mbps (optional)
:type ex_internet_max_bandwidth_in: a ``int`` in range [1, 200]
default to 200 in server side
:keyword ex_hostname: The hostname for the node (optional)
:type ex_hostname: ``str``
:keyword ex_io_optimized: Whether the node is IO optimized (optional)
:type ex_io_optimized: ``boll``
:keyword ex_system_disk: The system disk for the node (optional)
:type ex_system_disk: ``dict``
:keyword ex_data_disks: The data disks for the node (optional)
:type ex_data_disks: a `list` of `dict`
:keyword ex_vswitch_id: The id of vswitch for a VPC type node
(optional)
:type ex_vswitch_id: ``str``
:keyword ex_private_ip_address: The IP address in private network
(optional)
:type ex_private_ip_address: ``str``
:keyword ex_client_token: A token generated by client to keep
requests idempotency (optional)
:type keyword ex_client_token: ``str``
"""
params = {'Action': 'CreateInstance',
'RegionId': self.region,
'ImageId': image.id,
'InstanceType': size.id,
'InstanceName': name}
if not ex_security_group_id:
raise AttributeError('ex_security_group_id is mandatory')
params['SecurityGroupId'] = ex_security_group_id
if ex_description:
params['Description'] = ex_description
inet_params = self._get_internet_related_params(
ex_internet_charge_type,
ex_internet_max_bandwidth_in,
ex_internet_max_bandwidth_out)
if inet_params:
params.update(inet_params)
if ex_hostname:
params['HostName'] = ex_hostname
if auth:
auth = self._get_and_check_auth(auth)
params['Password'] = auth.password
if ex_io_optimized is not None:
optimized = ex_io_optimized
if not isinstance(optimized, bool):
optimized = str(optimized).lower() == 'true'
params['IoOptimized'] = 'true' if optimized else 'false'
if ex_system_disk:
system_disk = self._get_system_disk(ex_system_disk)
if system_disk:
params.update(system_disk)
if ex_data_disks:
data_disks = self._get_data_disks(ex_data_disks)
if data_disks:
params.update(data_disks)
if ex_vswitch_id:
params['VSwitchId'] = ex_vswitch_id
if ex_private_ip_address:
if not ex_vswitch_id:
raise AttributeError('must provide ex_private_ip_address '
'and ex_vswitch_id at the same time')
else:
params['PrivateIpAddress'] = ex_private_ip_address
if ex_client_token:
params['ClientToken'] = ex_client_token
resp = self.connection.request(self.path, params=params)
node_id = findtext(resp.object, xpath='InstanceId',
namespace=self.namespace)
nodes = self.list_nodes(ex_node_ids=[node_id])
if len(nodes) != 1:
raise LibcloudError('could not find the new created node '
'with id %s. ' % node_id,
driver=self)
node = nodes[0]
self.ex_start_node(node)
self._wait_until_state(nodes, NodeState.RUNNING)
return node
def reboot_node(self, node, ex_force_stop=False):
"""
Reboot the given node
@inherits :class:`NodeDriver.reboot_node`
:keyword ex_force_stop: if ``True``, stop node force (maybe lose data)
otherwise, stop node normally,
default to ``False``
:type ex_force_stop: ``bool``
"""
params = {'Action': 'RebootInstance',
'InstanceId': node.id,
'ForceStop': u(ex_force_stop).lower()}
resp = self.connection.request(self.path, params=params)
return resp.success() and \
self._wait_until_state([node], NodeState.RUNNING)
def destroy_node(self, node):
nodes = self.list_nodes(ex_node_ids=[node.id])
if len(nodes) != 1 and node.id != nodes[0].id:
raise LibcloudError('could not find the node with id %s.'
% node.id)
current = nodes[0]
if current.state == NodeState.RUNNING:
# stop node first
self.ex_stop_node(node)
self._wait_until_state(nodes, NodeState.STOPPED)
params = {'Action': 'DeleteInstance',
'InstanceId': node.id}
resp = self.connection.request(self.path, params)
return resp.success()
def ex_start_node(self, node):
"""
Start node to running state.
:param node: the ``Node`` object to start
:type node: ``Node``
:return: starting operation result.
:rtype: ``bool``
"""
params = {'Action': 'StartInstance',
'InstanceId': node.id}
resp = self.connection.request(self.path, params)
return resp.success() and \
self._wait_until_state([node], NodeState.RUNNING)
def ex_stop_node(self, node, ex_force_stop=False):
"""
Stop a running node.
:param node: The node to stop
:type node: :class:`Node`
:keyword ex_force_stop: if ``True``, stop node force (maybe lose data)
otherwise, stop node normally,
default to ``False``
:type ex_force_stop: ``bool``
:return: stopping operation result.
:rtype: ``bool``
"""
params = {'Action': 'StopInstance',
'InstanceId': node.id,
'ForceStop': u(ex_force_stop).lower()}
resp = self.connection.request(self.path, params)
return resp.success() and \
self._wait_until_state([node], NodeState.STOPPED)
def ex_list_security_groups(self, ex_filters=None):
"""
List security groups in the current region.
:keyword ex_filters: security group attributes to filter results.
:type ex_filters: ``dict``
:return: a list of defined security groups
:rtype: ``list`` of ``ECSSecurityGroup``
"""
params = {'Action': 'DescribeSecurityGroups',
'RegionId': self.region}
if ex_filters and isinstance(ex_filters, dict):
ex_filters.update(params)
params = ex_filters
def _parse_response(resp_object):
sg_elements = findall(resp_object, 'SecurityGroups/SecurityGroup',
namespace=self.namespace)
sgs = [self._to_security_group(el) for el in sg_elements]
return sgs
return self._request_multiple_pages(self.path, params,
_parse_response)
def ex_list_zones(self, region_id=None):
"""
List availability zones in the given region or the current region.
:keyword region_id: the id of the region to query zones from
:type region_id: ``str``
:return: list of zones
:rtype: ``list`` of ``ECSZone``
"""
params = {'Action': 'DescribeZones'}
if region_id:
params['RegionId'] = region_id
else:
params['RegionId'] = self.region
resp_body = self.connection.request(self.path, params).object
zone_elements = findall(resp_body, 'Zones/Zone',
namespace=self.namespace)
zones = [self._to_zone(el) for el in zone_elements]
return zones
##
# Volume and snapshot management methods
##
def list_volumes(self, ex_volume_ids=None, ex_filters=None):
"""
List all volumes.
@inherits: :class:`NodeDriver.list_volumes`
:keyword ex_volume_ids: a list of volume's ids used to filter volumes.
Only the volumes which's id in this list
will be returned.
:type ex_volume_ids: ``list`` of ``str``
:keyword ex_filters: volume attribute and value pairs to filter
volumes. Only the volumes which matchs all will
be returned.
If the filter attribute need a json array value,
use ``list`` object, the driver will convert it.
:type ex_filters: ``dict``
"""
params = {'Action': 'DescribeDisks',
'RegionId': self.region}
if ex_volume_ids:
if isinstance(ex_volume_ids, list):
params['DiskIds'] = self._list_to_json_array(ex_volume_ids)
else:
raise AttributeError('ex_volume_ids should be a list of '
'volume ids.')
if ex_filters:
if not isinstance(ex_filters, dict):
raise AttributeError('ex_filters should be a dict of '
'volume attributes.')
else:
for key in ex_filters.keys():
params[key] = ex_filters[key]
def _parse_response(resp_object):
disk_elements = findall(resp_object, 'Disks/Disk',
namespace=self.namespace)
volumes = [self._to_volume(each) for each in disk_elements]
return volumes
return self._request_multiple_pages(self.path, params,
_parse_response)
def list_volume_snapshots(self, volume, ex_snapshot_ids=[],
ex_filters=None):
"""
List snapshots for a storage volume.
@inherites :class:`NodeDriver.list_volume_snapshots`
:keyword ex_snapshot_ids: a list of snapshot ids to filter the
snapshots returned.
:type ex_snapshot_ids: ``list`` of ``str``
:keyword ex_filters: snapshot attribute and value pairs to filter
snapshots. Only the snapshot which matchs all
the pairs will be returned.
If the filter attribute need a json array value,
use ``list`` object, the driver will convert it.
:type ex_filters: ``dict``
"""
params = {'Action': 'DescribeSnapshots',
'RegionId': self.region}
if volume:
params['DiskId'] = volume.id
if ex_snapshot_ids and isinstance(ex_snapshot_ids, list):
params['SnapshotIds'] = self._list_to_json_array(ex_snapshot_ids)
if ex_filters and isinstance(ex_filters, dict):
for key in ex_filters.keys():
params[key] = ex_filters[key]
def _parse_response(resp_body):
snapshot_elements = findall(resp_body, 'Snapshots/Snapshot',
namespace=self.namespace)
snapshots = [self._to_snapshot(each) for each in snapshot_elements]
return snapshots
return self._request_multiple_pages(self.path, params,
_parse_response)
def create_volume(self, size, name, location=None, snapshot=None,
ex_zone_id=None, ex_description=None,
ex_disk_category=None, ex_client_token=None):
"""
Create a new volume.
@inherites :class:`NodeDriver.create_volume`
:keyword ex_zone_id: the availability zone id (required)
:type ex_zone_id: ``str``
:keyword ex_description: volume description
:type ex_description: ``unicode``
:keyword ex_disk_category: disk category for data disk
:type ex_disk_category: ``str``
:keyword ex_client_token: a token generated by client to identify
each request.
:type ex_client_token: ``str``
"""
params = {'Action': 'CreateDisk',
'RegionId': self.region,
'DiskName': name,
'Size': size}
if ex_zone_id is None:
raise AttributeError('ex_zone_id is required')
params['ZoneId'] = ex_zone_id
if snapshot is not None and isinstance(snapshot, VolumeSnapshot):
params['SnapshotId'] = snapshot.id
if ex_description:
params['Description'] = ex_description
if ex_disk_category:
params['DiskCategory'] = ex_disk_category
if ex_client_token:
params['ClientToken'] = ex_client_token
resp = self.connection.request(self.path, params).object
volume_id = findtext(resp, 'DiskId', namespace=self.namespace)
volumes = self.list_volumes(ex_volume_ids=[volume_id])
if len(volumes) != 1:
raise LibcloudError('could not find the new create volume '
'with id %s.' % volume_id,
driver=self)
return volumes[0]
def create_volume_snapshot(self, volume, name=None, ex_description=None,
ex_client_token=None):
"""
Creates a snapshot of the storage volume.
@inherits :class:`NodeDriver.create_volume_snapshot`
:keyword ex_description: description of the snapshot.
:type ex_description: ``unicode``
:keyword ex_client_token: a token generated by client to identify
each request.
:type ex_client_token: ``str``
"""
params = {'Action': 'CreateSnapshot',
'DiskId': volume.id}
if name:
params['SnapshotName'] = name
if ex_description:
params['Description'] = ex_description
if ex_client_token:
params['ClientToken'] = ex_client_token
snapshot_elements = self.connection.request(self.path, params).object
snapshot_id = findtext(snapshot_elements, 'SnapshotId',
namespace=self.namespace)
snapshots = self.list_volume_snapshots(volume=None,
ex_snapshot_ids=[snapshot_id])
if len(snapshots) != 1:
raise LibcloudError('could not find new created snapshot with '
'id %s.' % snapshot_id, driver=self)
return snapshots[0]
def attach_volume(self, node, volume, device=None,
ex_delete_with_instance=None):
"""
Attaches volume to node.
@inherits :class:`NodeDriver.attach_volume`
:keyword device: device path allocated for this attached volume
:type device: ``str`` between /dev/xvdb to xvdz,
if empty, allocated by the system
:keyword ex_delete_with_instance: if to delete this volume when the
instance is deleted.
:type ex_delete_with_instance: ``bool``
"""
params = {'Action': 'AttachDisk',
'InstanceId': node.id,
'DiskId': volume.id}
if device:
params['Device'] = device
if ex_delete_with_instance:
params['DeleteWithInstance'] = \
str(bool(ex_delete_with_instance)).lower()
resp = self.connection.request(self.path, params)
return resp.success()
def detach_volume(self, volume, ex_instance_id=None):
"""
Detaches a volume from a node.
@inherits :class:`NodeDriver.detach_volume`
:keyword ex_instance_id: the id of the instance from which the volume
is detached.
:type ex_instance_id: ``str``
"""
params = {'Action': 'DetachDisk',
'DiskId': volume.id}
if ex_instance_id:
params['InstanceId'] = ex_instance_id
else:
volumes = self.list_volumes(ex_volume_ids=[volume.id])
if len(volumes) != 1:
raise AttributeError('could not find the instance id '
'the volume %s attached to, '
'ex_instance_id is required.' %
volume.id)
params['InstanceId'] = volumes[0].extra['instance_id']
resp = self.connection.request(self.path, params)
return resp.success()
def destroy_volume(self, volume):
params = {'Action': 'DeleteDisk',
'DiskId': volume.id}
volumes = self.list_volumes(ex_volume_ids=[volume.id])
if len(volumes) != 1:
raise LibcloudError('could not find the volume with id %s.' %
volume.id,
driver=self)
if volumes[0].state != StorageVolumeState.AVAILABLE:
raise LibcloudError('only volume in AVAILABLE state could be '
'destroyed.', driver=self)
resp = self.connection.request(self.path, params)
return resp.success()
def destroy_volume_snapshot(self, snapshot):
params = {'Action': 'DeleteSnapshot'}
if snapshot and isinstance(snapshot, VolumeSnapshot):
params['SnapshotId'] = snapshot.id
else:
raise AttributeError('snapshot is required and must be a '
'VolumeSnapshot')
resp = self.connection.request(self.path, params)
return resp.success()
##
# Image management methods
##
def list_images(self, location=None, ex_image_ids=None, ex_filters=None):
"""
List images on a provider.
@inherits :class:`NodeDriver.list_images`
:keyword ex_image_ids: a list of image ids to filter the images to
be returned.
:type ex_image_ids: ``list`` of ``str``
:keyword ex_filters: image attribute and value pairs to filter
images. Only the image which matchs all
the pairs will be returned.
If the filter attribute need a json array value,
use ``list`` object, the driver will convert it.
:type ex_filters: ``dict``
"""
if location and isinstance(location, NodeLocation):
region = location.id
else:
region = self.region
params = {'Action': 'DescribeImages',
'RegionId': region}
if ex_image_ids:
if isinstance(ex_image_ids, list):
params['ImageId'] = ','.join(ex_image_ids)
else:
raise AttributeError('ex_image_ids should be a list of '
'image ids')
if ex_filters and isinstance(ex_filters, dict):
for key in ex_filters.keys():
params[key] = ex_filters[key]
def _parse_response(resp_body):
image_elements = findall(resp_body, 'Images/Image',
namespace=self.namespace)
images = [self._to_image(each) for each in image_elements]
return images
return self._request_multiple_pages(self.path, params,
_parse_response)
def create_image(self, node, name, description=None, ex_snapshot_id=None,
ex_image_version=None, ex_client_token=None):
"""
Creates an image from a system disk snapshot.
@inherits :class:`NodeDriver.create_image`
:keyword ex_snapshot_id: the id of the snapshot to create the image.
(required)
:type ex_snapshot_id: ``str``
:keyword ex_image_version: the version number of the image
:type ex_image_version: ``str``
:keyword ex_client_token: a token generated by client to identify
each request.
:type ex_client_token: ``str``
"""
params = {'Action': 'CreateImage',
'RegionId': self.region}
if name:
params['ImageName'] = name
if description:
params['Description'] = description
if ex_snapshot_id:
params['SnapshotId'] = ex_snapshot_id
else:
raise AttributeError('ex_snapshot_id is required')
if ex_image_version:
params['ImageVersion'] = ex_image_version
if ex_client_token:
params['ClientToken'] = ex_client_token
resp = self.connection.request(self.path, params)
image_id = findtext(resp.object, 'ImageId', namespace=self.namespace)
return self.get_image(image_id=image_id)
def delete_image(self, node_image):
params = {'Action': 'DeleteImage',
'RegionId': self.region,
'ImageId': node_image.id}
resp = self.connection.request(self.path, params)
return resp.success()
def get_image(self, image_id, ex_region_id=None):
if ex_region_id:
region = ex_region_id
else:
region = self.region
location = NodeLocation(id=region, name=None, country=None,
driver=self)
images = self.list_images(location, ex_image_ids=[image_id])
if len(images) != 1:
raise LibcloudError('could not find the image with id %s' %
image_id,
driver=self)
return images[0]
def copy_image(self, source_region, node_image, name, description=None,
ex_destination_region_id=None, ex_client_token=None):
"""
Copies an image from a source region to the destination region.
If not provide a destination region, default to the current region.
@inherits :class:`NodeDriver.copy_image`
:keyword ex_destination_region_id: id of the destination region
:type ex_destination_region_id: ``str``
:keyword ex_client_token: a token generated by client to identify
each request.
:type ex_client_token: ``str``
"""
params = {'Action': 'CopyImage',
'RegionId': source_region,
'ImageId': node_image.id}
if ex_destination_region_id is not None:
params['DestinationRegionId'] = ex_destination_region_id
else:
params['DestinationRegionId'] = self.region
if name:
params['DestinationImageName'] = name
if description:
params['DestinationDescription'] = description
if ex_client_token:
params['ClientToken'] = ex_client_token
resp = self.connection.request(self.path, params)
image_id = findtext(resp.object, 'ImageId', namespace=self.namespace)
return self.get_image(image_id=image_id)
def _to_nodes(self, object):
"""
Convert response to Node object list
:param object: parsed response object
:return: a list of ``Node``
:rtype: ``list``
"""
node_elements = findall(object, 'Instances/Instance', self.namespace)
return [self._to_node(el) for el in node_elements]
def _to_node(self, instance):
"""
Convert an InstanceAttributesType object to ``Node`` object
:param instance: a xml element represents an instance
:return: a ``Node`` object
:rtype: ``Node``
"""
_id = findtext(element=instance, xpath='InstanceId',
namespace=self.namespace)
name = findtext(element=instance, xpath='InstanceName',
namespace=self.namespace)
instance_status = findtext(element=instance, xpath='Status',
namespace=self.namespace)
state = self.NODE_STATE_MAPPING.get(instance_status, NodeState.UNKNOWN)
def _get_ips(ip_address_els):
return [each.text for each in ip_address_els]
public_ip_els = findall(element=instance,
xpath='PublicIpAddress/IpAddress',
namespace=self.namespace)
public_ips = _get_ips(public_ip_els)
private_ip_els = findall(element=instance,
xpath='InnerIpAddress/IpAddress',
namespace=self.namespace)
private_ips = _get_ips(private_ip_els)
# Extra properties
extra = self._get_extra_dict(instance,
RESOURCE_EXTRA_ATTRIBUTES_MAP['node'])
extra['vpc_attributes'] = self._get_vpc_attributes(instance)
extra['eip_address'] = self._get_eip_address(instance)
extra['operation_locks'] = self._get_operation_locks(instance)
node = Node(id=_id, name=name, state=state,
public_ips=public_ips, private_ips=private_ips,
driver=self.connection.driver, extra=extra)
return node
def _get_extra_dict(self, element, mapping):
"""
Extract attributes from the element based on rules provided in the
mapping dictionary.
:param element: Element to parse the values from.
:type element: xml.etree.ElementTree.Element.
:param mapping: Dictionary with the extra layout
:type node: :class:`Node`
:rtype: ``dict``
"""
extra = {}
for attribute, values in mapping.items():
transform_func = values['transform_func']
value = findattr(element=element,
xpath=values['xpath'],
namespace=self.namespace)
if value:
try:
extra[attribute] = transform_func(value)
except Exception:
extra[attribute] = None
else:
extra[attribute] = value
return extra
def _get_internet_related_params(self, ex_internet_charge_type,
ex_internet_max_bandwidth_in,
ex_internet_max_bandwidth_out):
params = {}
if ex_internet_charge_type:
params['InternetChargeType'] = ex_internet_charge_type
if ex_internet_charge_type.lower() == 'paybytraffic':
if ex_internet_max_bandwidth_out:
params['InternetMaxBandwidthOut'] = \
ex_internet_max_bandwidth_out
else:
raise AttributeError('ex_internet_max_bandwidth_out is '
'mandatory for PayByTraffic internet'
' charge type.')
if ex_internet_max_bandwidth_in:
params['InternetMaxBandwidthIn'] = \
ex_internet_max_bandwidth_in
return params
def _get_system_disk(self, ex_system_disk):
if not isinstance(ex_system_disk, dict):
raise AttributeError('ex_system_disk is not a dict')
sys_disk_dict = ex_system_disk
key_base = 'SystemDisk.'
# TODO(samsong8610): Use a type instead of dict
mappings = {'category': 'Category',
'disk_name': 'DiskName',
'description': 'Description'}
params = {}
for attr in mappings.keys():
if attr in sys_disk_dict:
params[key_base + mappings[attr]] = sys_disk_dict[attr]
return params
def _get_data_disks(self, ex_data_disks):
if isinstance(ex_data_disks, dict):
data_disks = [ex_data_disks]
elif isinstance(ex_data_disks, list):
data_disks = ex_data_disks
else:
raise AttributeError('ex_data_disks should be a list of dict')
# TODO(samsong8610): Use a type instead of dict
mappings = {'size': 'Size',
'category': 'Category',
'snapshot_id': 'SnapshotId',
'disk_name': 'DiskName',
'description': 'Description',
'device': 'Device',
'delete_with_instance': 'DeleteWithInstance'}
params = {}
for idx, disk in enumerate(data_disks):
key_base = 'DataDisk.{0}.'.format(idx + 1)
for attr in mappings.keys():
if attr in disk:
if attr == 'delete_with_instance':
# Convert bool value to str
value = str(disk[attr]).lower()
else:
value = disk[attr]
params[key_base + mappings[attr]] = value
return params
def _get_vpc_attributes(self, instance):
vpcs = findall(instance, xpath='VpcAttributes',
namespace=self.namespace)
if len(vpcs) <= 0:
return None
return self._get_extra_dict(
vpcs[0], RESOURCE_EXTRA_ATTRIBUTES_MAP['vpc_attributes'])
def _get_eip_address(self, instance):
eips = findall(instance, xpath='EipAddress',
namespace=self.namespace)
if len(eips) <= 0:
return None
return self._get_extra_dict(
eips[0], RESOURCE_EXTRA_ATTRIBUTES_MAP['eip_address_associate'])
def _get_operation_locks(self, instance):
locks = findall(instance, xpath='OperationLocks',
namespace=self.namespace)
if len(locks) <= 0:
return None
return self._get_extra_dict(
locks[0], RESOURCE_EXTRA_ATTRIBUTES_MAP['operation_locks'])
def _wait_until_state(self, nodes, state, wait_period=3, timeout=600):
"""
Block until the provided nodes are in the desired state.
:param nodes: List of nodes to wait for
:type nodes: ``list`` of :class:`.Node`
:param state: desired state
:type state: ``NodeState``
:param wait_period: How many seconds to wait between each loop
iteration. (default is 3)
:type wait_period: ``int``
:param timeout: How many seconds to wait before giving up.
(default is 600)
:type timeout: ``int``
:return: if the nodes are in the desired state.
:rtype: ``bool``
"""
start = time.time()
end = start + timeout
node_ids = [node.id for node in nodes]
while(time.time() < end):
matched_nodes = self.list_nodes(ex_node_ids=node_ids)
if len(matched_nodes) > len(node_ids):
found_ids = [node.id for node in matched_nodes]
msg = ('found multiple nodes with same ids, '
'desired ids: %(ids)s, found ids: %(found_ids)s' %
{'ids': node_ids, 'found_ids': found_ids})
raise LibcloudError(value=msg, driver=self)
desired_nodes = [node for node in matched_nodes
if node.state == state]
if len(desired_nodes) == len(node_ids):
return True
else:
time.sleep(wait_period)
continue
raise LibcloudError(value='Timed out after %s seconds' % (timeout),
driver=self)
def _to_volume(self, element):
_id = findtext(element, 'DiskId', namespace=self.namespace)
name = findtext(element, 'DiskName', namespace=self.namespace)
size = int(findtext(element, 'Size', namespace=self.namespace))
status_str = findtext(element, 'Status', namespace=self.namespace)
status = self.VOLUME_STATE_MAPPING.get(status_str,
StorageVolumeState.UNKNOWN)
extra = self._get_extra_dict(element,
RESOURCE_EXTRA_ATTRIBUTES_MAP['volume'])
extra['operation_locks'] = self._get_operation_locks(element)
return StorageVolume(_id, name, size, self, state=status, extra=extra)
def _list_to_json_array(self, value):
try:
return json.dumps(value)
except Exception:
raise AttributeError('could not convert list to json array')
def _to_snapshot(self, element):
_id = findtext(element, 'SnapshotId', namespace=self.namespace)
created = findtext(element, 'CreationTime', namespace=self.namespace)
status_str = findtext(element, 'Status', namespace=self.namespace)
state = self.SNAPSHOT_STATE_MAPPING.get(status_str,
VolumeSnapshotState.UNKNOWN)
extra = self._get_extra_dict(element,
RESOURCE_EXTRA_ATTRIBUTES_MAP['snapshot'])
return VolumeSnapshot(id=_id, driver=self, extra=extra,
created=created, state=state)
def _to_size(self, element):
_id = findtext(element, 'InstanceTypeId', namespace=self.namespace)
ram = float(findtext(element, 'MemorySize', namespace=self.namespace))
extra = {}
extra['cpu_core_count'] = int(findtext(element, 'CpuCoreCount',
namespace=self.namespace))
extra['instance_type_family'] = findtext(element, 'InstanceTypeFamily',
namespace=self.namespace)
return NodeSize(id=_id, name=_id, ram=ram, disk=None, bandwidth=None,
price=None, driver=self, extra=extra)
def _to_location(self, element):
_id = findtext(element, 'RegionId', namespace=self.namespace)
localname = findtext(element, 'LocalName', namespace=self.namespace)
return NodeLocation(id=_id, name=localname, country=None, driver=self)
def _to_image(self, element):
_id = findtext(element, 'ImageId', namespace=self.namespace)
name = findtext(element, 'ImageName', namespace=self.namespace)
extra = self._get_extra_dict(element,
RESOURCE_EXTRA_ATTRIBUTES_MAP['image'])
extra['disk_device_mappings'] = self._get_disk_device_mappings(
element.find('DiskDeviceMappings'))
return NodeImage(id=_id, name=name, driver=self, extra=extra)
def _get_disk_device_mappings(self, element):
if element is None:
return None
mapping_element = element.find('DiskDeviceMapping')
if mapping_element is not None:
return self._get_extra_dict(
mapping_element,
RESOURCE_EXTRA_ATTRIBUTES_MAP['disk_device_mapping'])
return None
def _to_security_group(self, element):
_id = findtext(element, 'SecurityGroupId', namespace=self.namespace)
name = findtext(element, 'SecurityGroupName',
namespace=self.namespace)
description = findtext(element, 'Description',
namespace=self.namespace)
vpc_id = findtext(element, 'VpcId', namespace=self.namespace)
creation_time = findtext(element, 'CreationTime',
namespace=self.namespace)
return ECSSecurityGroup(_id, name, description=description,
driver=self, vpc_id=vpc_id,
creation_time=creation_time)
def _to_zone(self, element):
_id = findtext(element, 'ZoneId', namespace=self.namespace)
local_name = findtext(element, 'LocalName', namespace=self.namespace)
resource_types = findall(element,
'AvailableResourceCreation/ResourceTypes',
namespace=self.namespace)
instance_types = findall(element,
'AvailableInstanceTypes/InstanceTypes',
namespace=self.namespace)
disk_categories = findall(element,
'AvailableDiskCategories/DiskCategories',
namespace=self.namespace)
def _text(element):
return element.text
return ECSZone(id=_id, name=local_name, driver=self,
available_resource_types=list(
map(_text, resource_types)),
available_instance_types=list(
map(_text, instance_types)),
available_disk_categories=list(
map(_text, disk_categories)))
def _get_pagination(self, element):
page_number = int(findtext(element, 'PageNumber'))
total_count = int(findtext(element, 'TotalCount'))
page_size = int(findtext(element, 'PageSize'))
return Pagination(total=total_count, size=page_size,
current=page_number)
def _request_multiple_pages(self, path, params, parse_func):
"""
Request all resources by multiple pages.
:param path: the resource path
:type path: ``str``
:param params: the query parameters
:type params: ``dict``
:param parse_func: the function object to parse the response body
:param type: ``function``
:return: list of resource object, if not found any, return []
:rtype: ``list``
"""
results = []
while True:
one_page = self.connection.request(path, params).object
resources = parse_func(one_page)
results += resources
pagination = self._get_pagination(one_page)
if pagination.next() is None:
break
params.update(pagination.to_dict())
return results
| apache-2.0 |
jvkops/django | django/utils/html_parser.py | 348 | 5155 | import re
import sys
from django.utils import six
from django.utils.six.moves import html_parser as _html_parser
current_version = sys.version_info
use_workaround = current_version < (2, 7, 3)
try:
HTMLParseError = _html_parser.HTMLParseError
except AttributeError:
# create a dummy class for Python 3.5+ where it's been removed
class HTMLParseError(Exception):
pass
if not use_workaround:
if six.PY3:
class HTMLParser(_html_parser.HTMLParser):
"""Explicitly set convert_charrefs to be False.
This silences a deprecation warning on Python 3.4, but we can't do
it at call time because Python 2.7 does not have the keyword
argument.
"""
def __init__(self, convert_charrefs=False, **kwargs):
_html_parser.HTMLParser.__init__(self, convert_charrefs=convert_charrefs, **kwargs)
else:
HTMLParser = _html_parser.HTMLParser
else:
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
class HTMLParser(_html_parser.HTMLParser):
"""
Patched version of stdlib's HTMLParser with patch from:
http://bugs.python.org/issue670664
"""
def __init__(self):
_html_parser.HTMLParser.__init__(self)
self.cdata_tag = None
def set_cdata_mode(self, tag):
try:
self.interesting = _html_parser.interesting_cdata
except AttributeError:
self.interesting = re.compile(r'</\s*%s\s*>' % tag.lower(), re.I)
self.cdata_tag = tag.lower()
def clear_cdata_mode(self):
self.interesting = _html_parser.interesting_normal
self.cdata_tag = None
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i + 1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
m = _html_parser.attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif (attrvalue[:1] == '\'' == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = (len(self.__starttag_text)
- self.__starttag_text.rfind("\n"))
else:
offset = offset + len(self.__starttag_text)
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag) # <--------------------------- Changed
return endpos
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i + 2] == "</", "unexpected call to parse_endtag"
match = _html_parser.endendtag.search(rawdata, i + 1) # >
if not match:
return -1
j = match.end()
match = _html_parser.endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_tag is not None: # *** add ***
self.handle_data(rawdata[i:j]) # *** add ***
return j # *** add ***
self.error("bad end tag: %r" % (rawdata[i:j],))
# --- changed start ---------------------------------------------------
tag = match.group(1).strip()
if self.cdata_tag is not None:
if tag.lower() != self.cdata_tag:
self.handle_data(rawdata[i:j])
return j
# --- changed end -----------------------------------------------------
self.handle_endtag(tag.lower())
self.clear_cdata_mode()
return j
| bsd-3-clause |
neithere/monk | tests/molding_tests.py | 1 | 1746 | # coding: utf-8
#
# Monk is an unobtrusive data modeling, manipulation and validation library.
# Copyright © 2011—2015 Andrey Mikhaylenko
#
# This file is part of Monk.
#
# Monk is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Monk is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Monk. If not, see <http://gnu.org/licenses/>.
"""
~~~~~~~~~~~~~~~~~~~~~~
Tests for Data Molding
~~~~~~~~~~~~~~~~~~~~~~
"""
from monk.compat import text_type as t
from monk import manipulation
def test_normalize_to_list():
f = manipulation.normalize_to_list
assert [1] == f(1)
assert [1] == f([1])
def test_normalize_list_of_dicts():
f = manipulation.normalize_list_of_dicts
assert [{'x': 'a'}] == f([{'x': 'a'}], default_key='x')
assert [{'x': 'a'}] == f( {'x': 'a'}, default_key='x')
assert [{'x': 'a'}] == f( t('a'), default_key='x')
assert [{'x': 'a'}, {'x': 'b'}] == f([{'x': 'a'}, t('b')], default_key='x')
assert [] == f(None, default_key='x')
assert [{'x': t('y')}] == f(None, default_key='x', default_value=t('y'))
# edge cases (may need revision)
assert [{'x': 1}] == f({'x': 1}, default_key='y')
assert [] == f(None, default_key='y')
assert 123 == f(123, default_key='x')
| gpl-3.0 |
kagetsu-claire/IzuminShogiBot | izumin/math.py | 2 | 1802 | # -*- coding: utf-8 -*-
import importlib
from math import sqrt
import psycopg2
import psycopg2.extras
from izumin import config
if config.IS_PRODUCTION_ENVIRONMENT:
db = importlib.import_module("izumin.db")
else:
db = importlib.import_module("izumin.db_local")
def is_prime(x):
"""
引数が素数であればTrue、そうでなければFalseを返す。
:param x: 素数判定を行う数値
:return: 引数xが素数であるか
"""
if x < 2: # 2未満は素数ではない。
return False
if x == 2 or x == 3 or x == 5: # 2,3,5は素数である。
return True
if x % 2 == 0 or x % 3 == 0 or x % 5 == 0: # 2,3,5で割り切れるということは素数ではない。
return False
# 試し割り
prime = 7
step = 4
while prime <= sqrt(x):
if x % prime == 0:
return False
prime += step
step = 6 - step
return True
def is_perfect_number(x):
"""
引数が完全数であればTrue、そうでなければFalseを返す。
:param x: 完全数判定を行う数値
:return: 引数xが完全数であるか
"""
connection = psycopg2.connect(dbname=db.DATABASE_NAME,
user=db.DATABASE_USER,
password=db.DATABASE_PASSWORD,
host=db.DATABASE_HOST,
port=db.DATABASE_PORT)
cur = connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute("SELECT number FROM perfect_number")
for row in cur:
if x == row["number"]:
return True
else:
return False
if __name__ == '__main__':
if is_perfect_number(28):
print("True")
else:
print("False")
| mit |
raptorz/userga | config.py | 1 | 1121 | # -*- coding: utf-8 -*-
"""
default config file
:copyright: 20160204 by [email protected].
"""
#from __future__ import unicode_literals
import sys
PY3=sys.version>"3"
from os.path import dirname, abspath, expanduser, join as joinpath
import json
import logging
logger = logging.getLogger(__name__)
config_default = {
"db_url": "sqlite:///userga.dat",
"web_path": "userga",
"web_addr": "127.0.0.1",
"web_port": 8001,
"smtp_server": "",
"smtp_port": 25,
"smtp_user": "",
"smtp_pass": "",
"debug": True,
}
def get_fullname(*args):
root = dirname(abspath(__file__))
return joinpath(root, joinpath(*args)) if len(args) > 0 else root
def uniencode(s, coding="utf-8"):
return s.encode(coding) if s and (PY3 or not isinstance(s, str)) else s
def unidecode(s, coding="utf-8"):
return unicode(s, coding) if s and (not PY3 or isinstance(s, str)) else s
try:
with open(get_fullname("config.json"), "r") as f:
config = json.loads(f.read())
config_default.update(config)
config = config_default
except IOError:
config = config_default
| mit |
eyalfa/spark | python/pyspark/ml/common.py | 119 | 4372 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
if sys.version >= '3':
long = int
unicode = str
import py4j.protocol
from py4j.protocol import Py4JJavaError
from py4j.java_gateway import JavaObject
from py4j.java_collections import JavaArray, JavaList
from pyspark import RDD, SparkContext
from pyspark.serializers import PickleSerializer, AutoBatchedSerializer
from pyspark.sql import DataFrame, SQLContext
# Hack for support float('inf') in Py4j
_old_smart_decode = py4j.protocol.smart_decode
_float_str_mapping = {
'nan': 'NaN',
'inf': 'Infinity',
'-inf': '-Infinity',
}
def _new_smart_decode(obj):
if isinstance(obj, float):
s = str(obj)
return _float_str_mapping.get(s, s)
return _old_smart_decode(obj)
py4j.protocol.smart_decode = _new_smart_decode
_picklable_classes = [
'SparseVector',
'DenseVector',
'SparseMatrix',
'DenseMatrix',
]
# this will call the ML version of pythonToJava()
def _to_java_object_rdd(rdd):
""" Return an JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = rdd._reserialize(AutoBatchedSerializer(PickleSerializer()))
return rdd.ctx._jvm.org.apache.spark.ml.python.MLSerDe.pythonToJava(rdd._jrdd, True)
def _py2java(sc, obj):
""" Convert Python object into Java """
if isinstance(obj, RDD):
obj = _to_java_object_rdd(obj)
elif isinstance(obj, DataFrame):
obj = obj._jdf
elif isinstance(obj, SparkContext):
obj = obj._jsc
elif isinstance(obj, list):
obj = [_py2java(sc, x) for x in obj]
elif isinstance(obj, JavaObject):
pass
elif isinstance(obj, (int, long, float, bool, bytes, unicode)):
pass
else:
data = bytearray(PickleSerializer().dumps(obj))
obj = sc._jvm.org.apache.spark.ml.python.MLSerDe.loads(data)
return obj
def _java2py(sc, r, encoding="bytes"):
if isinstance(r, JavaObject):
clsName = r.getClass().getSimpleName()
# convert RDD into JavaRDD
if clsName != 'JavaRDD' and clsName.endswith("RDD"):
r = r.toJavaRDD()
clsName = 'JavaRDD'
if clsName == 'JavaRDD':
jrdd = sc._jvm.org.apache.spark.ml.python.MLSerDe.javaToPython(r)
return RDD(jrdd, sc)
if clsName == 'Dataset':
return DataFrame(r, SQLContext.getOrCreate(sc))
if clsName in _picklable_classes:
r = sc._jvm.org.apache.spark.ml.python.MLSerDe.dumps(r)
elif isinstance(r, (JavaArray, JavaList)):
try:
r = sc._jvm.org.apache.spark.ml.python.MLSerDe.dumps(r)
except Py4JJavaError:
pass # not pickable
if isinstance(r, (bytearray, bytes)):
r = PickleSerializer().loads(bytes(r), encoding=encoding)
return r
def callJavaFunc(sc, func, *args):
""" Call Java Function """
args = [_py2java(sc, a) for a in args]
return _java2py(sc, func(*args))
def inherit_doc(cls):
"""
A decorator that makes a class inherit documentation from its parents.
"""
for name, func in vars(cls).items():
# only inherit docstring for public functions
if name.startswith("_"):
continue
if not func.__doc__:
for parent in cls.__bases__:
parent_func = getattr(parent, name, None)
if parent_func and getattr(parent_func, "__doc__", None):
func.__doc__ = parent_func.__doc__
break
return cls
| apache-2.0 |
7kbird/chrome | tools/telemetry/telemetry/core/browser_info.py | 55 | 1147 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
_check_webgl_supported_script = """
(function () {
var c = document.createElement('canvas');
var gl = c.getContext('webgl');
if (gl == null) {
gl = c.getContext("experimental-webgl");
if (gl == null) {
return false;
}
}
return true;
})();
"""
class BrowserInfo(object):
"""A wrapper around browser object that allows looking up infos of the
browser.
"""
def __init__(self, browser):
self._browser = browser
def HasWebGLSupport(self):
result = False
# If no tab is opened, open one and close it after evaluate
# _check_webgl_supported_script
if len(self._browser.tabs) == 0 and self._browser.supports_tab_control:
self._browser.tabs.New()
tab = self._browser.tabs[0]
result = tab.EvaluateJavaScript(_check_webgl_supported_script)
tab.Close()
elif len(self._browser.tabs) > 0:
tab = self._browser.tabs[0]
result = tab.EvaluateJavaScript(_check_webgl_supported_script)
return result
| bsd-3-clause |
agconti/njode | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/contrib/pyopenssl.py | 304 | 15086 | '''SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 0.13)
* ndg-httpsclient (tested with 0.3.2)
* pyasn1 (tested with 0.1.6)
You can install them with the following command:
pip install pyopenssl ndg-httpsclient pyasn1
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
encryption in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
Module Variables
----------------
:var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites.
Default: ``ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:
ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS``
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
'''
from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName
import OpenSSL.SSL
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.type import univ, constraint
from socket import _fileobject, timeout
import ssl
import select
from cStringIO import StringIO
from .. import connection
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI only *really* works if we can read the subjectAltName of certificates.
HAS_SNI = SUBJ_ALT_NAME_SUPPORT
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
+ OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM over any AES-CBC for better performance and security,
# - use 3DES as fallback which is secure but slow,
# - disable NULL authentication, MD5 MACs and DSS for security reasons.
DEFAULT_SSL_CIPHER_LIST = "ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:" + \
"ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:" + \
"!aNULL:!MD5:!DSS"
orig_util_HAS_SNI = util.HAS_SNI
orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
connection.ssl_wrap_socket = ssl_wrap_socket
util.HAS_SNI = HAS_SNI
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket
util.HAS_SNI = orig_util_HAS_SNI
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
class SubjectAltName(BaseSubjectAltName):
'''ASN.1 implementation for subjectAltNames support'''
# There is no limit to how many SAN certificates a certificate may have,
# however this needs to have some limit so we'll set an arbitrarily high
# limit.
sizeSpec = univ.SequenceOf.sizeSpec + \
constraint.ValueSizeConstraint(1, 1024)
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
def get_subj_alt_name(peer_cert):
# Search through extensions
dns_name = []
if not SUBJ_ALT_NAME_SUPPORT:
return dns_name
general_names = SubjectAltName()
for i in range(peer_cert.get_extension_count()):
ext = peer_cert.get_extension(i)
ext_name = ext.get_short_name()
if ext_name != 'subjectAltName':
continue
# PyOpenSSL returns extension data in ASN.1 encoded form
ext_dat = ext.get_data()
decoded_dat = der_decoder.decode(ext_dat,
asn1Spec=general_names)
for name in decoded_dat:
if not isinstance(name, SubjectAltName):
continue
for entry in range(len(name)):
component = name.getComponentByPosition(entry)
if component.getName() != 'dNSName':
continue
dns_name.append(str(component.getComponent()))
return dns_name
class fileobject(_fileobject):
def _wait_for_sock(self):
rd, wd, ed = select.select([self._sock], [], [],
self._sock.gettimeout())
if not rd:
raise timeout()
def read(self, size=-1):
# Use max, disallow tiny reads in a loop as they are very inefficient.
# We never leave read() with any leftover data from a new recv() call
# in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned by
# recv() minimizes memory usage and fragmentation that occurs when
# rbufsize is large compared to the typical return value of recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(rbufsize)
except OpenSSL.SSL.WantReadError:
self._wait_for_sock()
continue
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
try:
data = self._sock.recv(left)
except OpenSSL.SSL.WantReadError:
self._wait_for_sock()
continue
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
data = None
recv = self._sock.recv
while True:
try:
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
except OpenSSL.SSL.WantReadError:
self._wait_for_sock()
continue
break
return "".join(buffers)
buf.seek(0, 2) # seek end
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except OpenSSL.SSL.WantReadError:
self._wait_for_sock()
continue
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except OpenSSL.SSL.WantReadError:
self._wait_for_sock()
continue
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when returning
# a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.'''
def __init__(self, connection, socket):
self.connection = connection
self.socket = socket
def fileno(self):
return self.socket.fileno()
def makefile(self, mode, bufsize=-1):
return fileobject(self.connection, mode, bufsize)
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def sendall(self, data):
return self.connection.sendall(data)
def close(self):
return self.connection.shutdown()
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': [
('DNS', value)
for value in get_subj_alt_name(x509)
]
}
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version])
if certfile:
ctx.use_certificate_file(certfile)
if keyfile:
ctx.use_privatekey_file(keyfile)
if cert_reqs != ssl.CERT_NONE:
ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback)
if ca_certs:
try:
ctx.load_verify_locations(ca_certs, None)
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)
else:
ctx.set_default_verify_paths()
# Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000
ctx.set_options(OP_NO_COMPRESSION)
# Set list of supported ciphersuites.
ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST)
cnx = OpenSSL.SSL.Connection(ctx, sock)
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
select.select([sock], [], [])
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake', e)
break
return WrappedSocket(cnx, sock)
| bsd-3-clause |
CloudVLab/professional-services | examples/ml-audio-content-profiling/tests/test_perspective_api_function.py | 2 | 5636 | #!/usr/bin/env python3
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test file for perspective_api_function.main.py."""
import unittest
from perspective_api_function import main
class TestPerspectiveAPIFunction(unittest.TestCase):
"""Tests the logic in perspective_api_function module."""
def test_format_api_result(self):
"""Test for get_api_result"""
response = {
'attributeScores': {
'TOXICITY': {
'spanScores': [
{
'begin': 0,
'end': 1003,
'score': {
'value': 0.049772695,
'type': 'PROBABILITY'}}
],
'summaryScore': {
'value': 0.049772695,
'type': 'PROBABILITY'}}},
'languages': ['en'],
'detectedLanguages': ['en']}
text = {'transcript': """
Hi and welcome to episode number 83 of the weekly
Google Cloud platform podcast. I am princess Campo
and I'm here with my colleague Mark Mendel. Hey,
Mark, how are you doing? I'm doing very very very
well. How you doing Friends Ask? I'm pretty good. It's
been a long week, but you know pretty good very
excited about a very cool episode about public data
sets. Yeah, it's super cool where we talk about how
we host all these really large data sets for all
these people to play with and do fun things with
yeah. I've used some of them before my favorite one
is we have a lot of good from GitHub. You can do
cool things with it, but it's not the only one. So
we're going to be talking about a bunch of different
ones that you can just go and use and have fun with
them. And after that we have a question of the week
that is actually a question of the week whose answer
comes from someone. We interviewed in the future. So
at some point we'll have him on the podcast. But for
now, we have something that he
""",
'start_time': '00:00:04',
'end_time': '00:01:00'}
actual_result = main.format_api_results(response, text)
expected_result = {'text': """
Hi and welcome to episode number 83 of the
weekly Google Cloud platform podcast. I am
princess Campo and I'm here with my colleague
Mark Mendel. Hey, Mark, how are you doing? I'm
doing very very very well. How you doing Friends
Ask? I'm pretty good. It's been a long week, but
you know pretty good very excited about a very
cool episode about public data sets. Yeah, it's
super cool where we talk about how we host all
these really large data sets for all these
people to play with and do fun things with yeah.
I've used some of them before my favorite one is
we have a lot of good from GitHub. You can do
cool things with it, but it's not the only one.
So we're going to be talking about a bunch of
different ones that you can just go and use and
have fun with them. And after that we have a
question of the week that is actually a question
of the week whose answer comes from someone. We
interviewed in the future. So at some point
we'll have him on the podcast. But for now, we
have something that he
""",
'start_time': '00:00:04',
'end_time': '00:01:00',
'toxicity': 0.05}
self.assertEqual(expected_result['toxicity'], actual_result['toxicity'])
self.assertEqual(expected_result['start_time'], actual_result['start_time'])
self.assertEqual(expected_result['end_time'], actual_result['end_time'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
huanchenz/STX-h-store | third_party/python/boto/exception.py | 9 | 13323 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Exception classes - Subclassing allows you to check for specific errors
"""
import base64
import xml.sax
from boto import handler
from boto.resultset import ResultSet
class BotoClientError(StandardError):
"""
General Boto Client error (error accessing AWS)
"""
def __init__(self, reason, *args):
StandardError.__init__(self, reason, *args)
self.reason = reason
def __repr__(self):
return 'BotoClientError: %s' % self.reason
def __str__(self):
return 'BotoClientError: %s' % self.reason
class SDBPersistenceError(StandardError):
pass
class StoragePermissionsError(BotoClientError):
"""
Permissions error when accessing a bucket or key on a storage service.
"""
pass
class S3PermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on S3.
"""
pass
class GSPermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on GS.
"""
pass
class BotoServerError(StandardError):
def __init__(self, status, reason, body=None, *args):
StandardError.__init__(self, status, reason, body, *args)
self.status = status
self.reason = reason
self.body = body or ''
self.request_id = None
self.error_code = None
self.error_message = None
self.box_usage = None
# Attempt to parse the error response. If body isn't present,
# then just ignore the error response.
if self.body:
try:
h = handler.XmlHandler(self, self)
xml.sax.parseString(self.body, h)
except xml.sax.SAXParseException, pe:
# Go ahead and clean up anything that may have
# managed to get into the error data so we
# don't get partial garbage.
print "Warning: failed to parse error message from AWS: %s" % pe
self._cleanupParsedProperties()
def __getattr__(self, name):
if name == 'message':
return self.error_message
if name == 'code':
return self.error_code
raise AttributeError
def __repr__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def __str__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name in ('RequestId', 'RequestID'):
self.request_id = value
elif name == 'Code':
self.error_code = value
elif name == 'Message':
self.error_message = value
elif name == 'BoxUsage':
self.box_usage = value
return None
def _cleanupParsedProperties(self):
self.request_id = None
self.error_code = None
self.error_message = None
self.box_usage = None
class ConsoleOutput:
def __init__(self, parent=None):
self.parent = parent
self.instance_id = None
self.timestamp = None
self.comment = None
self.output = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'instanceId':
self.instance_id = value
elif name == 'output':
self.output = base64.b64decode(value)
else:
setattr(self, name, value)
class StorageCreateError(BotoServerError):
"""
Error creating a bucket or key on a storage service.
"""
def __init__(self, status, reason, body=None):
self.bucket = None
BotoServerError.__init__(self, status, reason, body)
def endElement(self, name, value, connection):
if name == 'BucketName':
self.bucket = value
else:
return BotoServerError.endElement(self, name, value, connection)
class S3CreateError(StorageCreateError):
"""
Error creating a bucket or key on S3.
"""
pass
class GSCreateError(StorageCreateError):
"""
Error creating a bucket or key on GS.
"""
pass
class StorageCopyError(BotoServerError):
"""
Error copying a key on a storage service.
"""
pass
class S3CopyError(StorageCopyError):
"""
Error copying a key on S3.
"""
pass
class GSCopyError(StorageCopyError):
"""
Error copying a key on GS.
"""
pass
class SQSError(BotoServerError):
"""
General Error on Simple Queue Service.
"""
def __init__(self, status, reason, body=None):
self.detail = None
self.type = None
BotoServerError.__init__(self, status, reason, body)
def startElement(self, name, attrs, connection):
return BotoServerError.startElement(self, name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Detail':
self.detail = value
elif name == 'Type':
self.type = value
else:
return BotoServerError.endElement(self, name, value, connection)
def _cleanupParsedProperties(self):
BotoServerError._cleanupParsedProperties(self)
for p in ('detail', 'type'):
setattr(self, p, None)
class SQSDecodeError(BotoClientError):
"""
Error when decoding an SQS message.
"""
def __init__(self, reason, message):
BotoClientError.__init__(self, reason, message)
self.message = message
def __repr__(self):
return 'SQSDecodeError: %s' % self.reason
def __str__(self):
return 'SQSDecodeError: %s' % self.reason
class StorageResponseError(BotoServerError):
"""
Error in response from a storage service.
"""
def __init__(self, status, reason, body=None):
self.resource = None
BotoServerError.__init__(self, status, reason, body)
def startElement(self, name, attrs, connection):
return BotoServerError.startElement(self, name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Resource':
self.resource = value
else:
return BotoServerError.endElement(self, name, value, connection)
def _cleanupParsedProperties(self):
BotoServerError._cleanupParsedProperties(self)
for p in ('resource'):
setattr(self, p, None)
class S3ResponseError(StorageResponseError):
"""
Error in response from S3.
"""
pass
class GSResponseError(StorageResponseError):
"""
Error in response from GS.
"""
pass
class EC2ResponseError(BotoServerError):
"""
Error in response from EC2.
"""
def __init__(self, status, reason, body=None):
self.errors = None
self._errorResultSet = []
BotoServerError.__init__(self, status, reason, body)
self.errors = [ (e.error_code, e.error_message) \
for e in self._errorResultSet ]
if len(self.errors):
self.error_code, self.error_message = self.errors[0]
def startElement(self, name, attrs, connection):
if name == 'Errors':
self._errorResultSet = ResultSet([('Error', _EC2Error)])
return self._errorResultSet
else:
return None
def endElement(self, name, value, connection):
if name == 'RequestID':
self.request_id = value
else:
return None # don't call subclass here
def _cleanupParsedProperties(self):
BotoServerError._cleanupParsedProperties(self)
self._errorResultSet = []
for p in ('errors'):
setattr(self, p, None)
class EmrResponseError(BotoServerError):
"""
Error in response from EMR
"""
pass
class _EC2Error:
def __init__(self, connection=None):
self.connection = connection
self.error_code = None
self.error_message = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Code':
self.error_code = value
elif name == 'Message':
self.error_message = value
else:
return None
class SDBResponseError(BotoServerError):
"""
Error in responses from SDB.
"""
pass
class AWSConnectionError(BotoClientError):
"""
General error connecting to Amazon Web Services.
"""
pass
class StorageDataError(BotoClientError):
"""
Error receiving data from a storage service.
"""
pass
class S3DataError(StorageDataError):
"""
Error receiving data from S3.
"""
pass
class GSDataError(StorageDataError):
"""
Error receiving data from GS.
"""
pass
class FPSResponseError(BotoServerError):
pass
class InvalidUriError(Exception):
"""Exception raised when URI is invalid."""
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class InvalidAclError(Exception):
"""Exception raised when ACL XML is invalid."""
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class NoAuthHandlerFound(Exception):
"""Is raised when no auth handlers were found ready to authenticate."""
pass
class TooManyAuthHandlerReadyToAuthenticate(Exception):
"""Is raised when there are more than one auth handler ready.
In normal situation there should only be one auth handler that is ready to
authenticate. In case where more than one auth handler is ready to
authenticate, we raise this exception, to prevent unpredictable behavior
when multiple auth handlers can handle a particular case and the one chosen
depends on the order they were checked.
"""
pass
# Enum class for resumable upload failure disposition.
class ResumableTransferDisposition(object):
# START_OVER means an attempt to resume an existing transfer failed,
# and a new resumable upload should be attempted (without delay).
START_OVER = 'START_OVER'
# WAIT_BEFORE_RETRY means the resumable transfer failed but that it can
# be retried after a time delay within the current process.
WAIT_BEFORE_RETRY = 'WAIT_BEFORE_RETRY'
# ABORT_CUR_PROCESS means the resumable transfer failed and that
# delaying/retrying within the current process will not help. If
# resumable transfer included a state tracker file the upload can be
# retried again later, in another process (e.g., a later run of gsutil).
ABORT_CUR_PROCESS = 'ABORT_CUR_PROCESS'
# ABORT means the resumable transfer failed in a way that it does not
# make sense to continue in the current process, and further that the
# current tracker ID should not be preserved (in a tracker file if one
# was specified at resumable upload start time). If the user tries again
# later (e.g., a separate run of gsutil) it will get a new resumable
# upload ID.
ABORT = 'ABORT'
class ResumableUploadException(Exception):
"""
Exception raised for various resumable upload problems.
self.disposition is of type ResumableTransferDisposition.
"""
def __init__(self, message, disposition):
Exception.__init__(self, message, disposition)
self.message = message
self.disposition = disposition
def __repr__(self):
return 'ResumableUploadException("%s", %s)' % (
self.message, self.disposition)
class ResumableDownloadException(Exception):
"""
Exception raised for various resumable download problems.
self.disposition is of type ResumableTransferDisposition.
"""
def __init__(self, message, disposition):
Exception.__init__(self, message, disposition)
self.message = message
self.disposition = disposition
def __repr__(self):
return 'ResumableDownloadException("%s", %s)' % (
self.message, self.disposition)
| gpl-3.0 |
Mappy/mapnik | bindings/python/build.py | 2 | 8178 | #
# This file is part of Mapnik (c++ mapping toolkit)
#
# Copyright (C) 2013 Artem Pavlenko
#
# Mapnik is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
import os, re, sys, glob
from subprocess import Popen, PIPE
Import('env')
def call(cmd, silent=True):
stdin, stderr = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE).communicate()
if not stderr:
return stdin.strip()
elif not silent:
print stderr
def run_2to3(*args,**kwargs):
call('2to3 -w %s' % os.path.dirname(kwargs['target'][0].path))
def is_py3():
return 'True' in os.popen('''%s -c "import sys as s;s.stdout.write(str(s.version_info[0] == 3))"''' % env['PYTHON']).read().strip()
prefix = env['PREFIX']
target_path = os.path.normpath(env['PYTHON_INSTALL_LOCATION'] + os.path.sep + 'mapnik')
target_path_deprecated = os.path.normpath(env['PYTHON_INSTALL_LOCATION'] + os.path.sep + 'mapnik2')
py_env = env.Clone()
py_env.Append(CPPPATH = env['PYTHON_INCLUDES'])
py_env.Append(CPPDEFINES = env['LIBMAPNIK_DEFINES'])
py_env['LIBS'] = ['mapnik',env['BOOST_PYTHON_LIB']]
link_all_libs = env['LINKING'] == 'static' or env['RUNTIME_LINK'] == 'static' or (env['PLATFORM'] == 'Darwin' and not env['PYTHON_DYNAMIC_LOOKUP'])
if link_all_libs:
py_env.AppendUnique(LIBS=env['LIBMAPNIK_LIBS'])
if env['RUNTIME_LINK'] == 'static' and env['PLATFORM'] == 'Linux':
py_env.AppendUnique(LIBS='rt')
# TODO - do solaris/fedora need direct linking too?
if env['PLATFORM'] == 'Darwin':
##### Python linking on OS X is tricky ###
# Confounding problems are:
# 1) likelyhood of multiple python installs of the same major.minor version
# because apple supplies python built-in and many users may have installed
# further versions using macports
# 2) boost python directly links to a python version
# 3) the below will directly link _mapnik.so to a python version
# 4) _mapnik.so must link to the same python lib as boost_python.dylib otherwise
# python will Abort with a Version Mismatch error.
# See https://github.com/mapnik/mapnik/issues/453 for the seeds of a better approach
# for now we offer control over method of direct linking...
# The default below is to link against the python dylib in the form of
#/path/to/Python.framework/Python instead of -lpython
# http://developer.apple.com/mac/library/DOCUMENTATION/Darwin/Reference/ManPages/man1/ld.1.html
if env['PYTHON_DYNAMIC_LOOKUP']:
python_link_flag = '-undefined dynamic_lookup'
elif env['FRAMEWORK_PYTHON']:
if env['FRAMEWORK_SEARCH_PATH']:
# if the user has supplied a custom root path to search for
# a given Python framework, then use that to direct the linker
python_link_flag = '-F%s -framework Python -Z' % env['FRAMEWORK_SEARCH_PATH']
else:
# otherwise be as explicit as possible for linking to the same Framework
# as the executable we are building with (or is pointed to by the PYTHON variable)
# otherwise we may accidentally link against either:
# /System/Library/Frameworks/Python.framework/Python/Versions/
# or
# /Library/Frameworks/Python.framework/Python/Versions/
# See: https://github.com/mapnik/mapnik/issues/380
link_prefix = env['PYTHON_SYS_PREFIX']
if '.framework' in link_prefix:
python_link_flag = '-F%s -framework Python -Z' % os.path.dirname(link_prefix.split('.')[0])
elif '/System' in link_prefix:
python_link_flag = '-F/System/Library/Frameworks/ -framework Python -Z'
else:
# should we fall back to -lpython here?
python_link_flag = '-F/ -framework Python'
# if we are not linking to a framework then use the *nix standard approach
else:
# TODO - do we need to pass -L/?
python_link_flag = '-lpython%s' % env['PYTHON_VERSION']
elif env['PLATFORM'] == 'SunOS':
# make sure to explicitly link mapnik.so against
# libmapnik in its installed location
python_link_flag = '-R%s' % env['MAPNIK_LIB_BASE']
else:
# all other platforms we don't directly link python
python_link_flag = ''
if env['CUSTOM_LDFLAGS']:
linkflags = '%s %s' % (env['CUSTOM_LDFLAGS'], python_link_flag)
else:
linkflags = python_link_flag
paths = '''
"""Configuration paths of Mapnik fonts and input plugins (auto-generated by SCons)."""
from os.path import normpath,join,dirname
mapniklibpath = '%s'
mapniklibpath = normpath(join(dirname(__file__),mapniklibpath))
'''
paths += "inputpluginspath = join(mapniklibpath,'input')\n"
if env['SYSTEM_FONTS']:
paths += "fontscollectionpath = normpath('%s')\n" % env['SYSTEM_FONTS']
else:
paths += "fontscollectionpath = join(mapniklibpath,'fonts')\n"
paths += "__all__ = [mapniklibpath,inputpluginspath,fontscollectionpath]\n"
if not os.path.exists('mapnik'):
os.mkdir('mapnik')
file('mapnik/paths.py','w').write(paths % (env['MAPNIK_LIB_DIR']))
# force open perms temporarily so that `sudo scons install`
# does not later break simple non-install non-sudo rebuild
try:
os.chmod('mapnik/paths.py',0666)
except: pass
# install the shared object beside the module directory
sources = glob.glob('*.cpp')
if 'install' in COMMAND_LINE_TARGETS:
# install the core mapnik python files, including '__init__.py'
init_files = glob.glob('mapnik/*.py')
if 'mapnik/paths.py' in init_files:
init_files.remove('mapnik/paths.py')
init_module = env.Install(target_path, init_files)
env.Alias(target='install', source=init_module)
# install mapnik2 module which redirects to mapnik and issues DeprecatedWarning
init_mapnik2 = env.Install(target_path_deprecated, 'mapnik2/__init__.py')
env.Alias(target='install', source=init_mapnik2)
# fix perms and install the custom generated 'paths.py'
targetp = os.path.join(target_path,'paths.py')
env.Alias("install", targetp)
# use env.Command rather than env.Install
# to enable setting proper perms on `paths.py`
env.Command( targetp, 'mapnik/paths.py',
[
Copy("$TARGET","$SOURCE"),
Chmod("$TARGET", 0644),
])
if 'uninstall' not in COMMAND_LINE_TARGETS:
if env['HAS_CAIRO']:
py_env.Append(CPPPATH = env['CAIRO_CPPPATHS'])
py_env.Append(CPPDEFINES = '-DHAVE_CAIRO')
if link_all_libs:
py_env.Append(LIBS=env['CAIRO_ALL_LIBS'])
if env['HAS_PYCAIRO']:
py_env.ParseConfig('pkg-config --cflags pycairo')
py_env.Append(CPPDEFINES = '-DHAVE_PYCAIRO')
py_env.AppendUnique(LIBS = 'boost_thread%s' % env['BOOST_APPEND'])
_mapnik = py_env.LoadableModule('mapnik/_mapnik', sources, LDMODULEPREFIX='', LDMODULESUFFIX='.so',LINKFLAGS=linkflags)
Depends(_mapnik, env.subst('../../src/%s' % env['MAPNIK_LIB_NAME']))
if env['PLATFORM'] == 'SunOS' and env['PYTHON_IS_64BIT']:
# http://mail.python.org/pipermail/python-dev/2006-August/068528.html
cxx_module_path = os.path.join(target_path,'64')
else:
cxx_module_path = target_path
if 'uninstall' not in COMMAND_LINE_TARGETS:
pymapniklib = env.Install(cxx_module_path,_mapnik)
py_env.Alias(target='install',source=pymapniklib)
if 'install' in COMMAND_LINE_TARGETS:
if is_py3():
env.AddPostAction(pymapniklib, run_2to3)
env['create_uninstall_target'](env, target_path)
env['create_uninstall_target'](env, target_path_deprecated)
| lgpl-2.1 |
nkgilley/home-assistant | homeassistant/components/mfi/sensor.py | 19 | 3314 | """Support for Ubiquiti mFi sensors."""
import logging
from mficlient.client import FailedToLogin, MFiClient
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
CONF_VERIFY_SSL,
STATE_OFF,
STATE_ON,
TEMP_CELSIUS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DEFAULT_SSL = True
DEFAULT_VERIFY_SSL = True
DIGITS = {"volts": 1, "amps": 1, "active_power": 0, "temperature": 1}
SENSOR_MODELS = [
"Ubiquiti mFi-THS",
"Ubiquiti mFi-CS",
"Ubiquiti mFi-DS",
"Outlet",
"Input Analog",
"Input Digital",
]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up mFi sensors."""
host = config.get(CONF_HOST)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
use_tls = config.get(CONF_SSL)
verify_tls = config.get(CONF_VERIFY_SSL)
default_port = 6443 if use_tls else 6080
port = int(config.get(CONF_PORT, default_port))
try:
client = MFiClient(
host, username, password, port=port, use_tls=use_tls, verify=verify_tls
)
except (FailedToLogin, requests.exceptions.ConnectionError) as ex:
_LOGGER.error("Unable to connect to mFi: %s", str(ex))
return False
add_entities(
MfiSensor(port, hass)
for device in client.get_devices()
for port in device.ports.values()
if port.model in SENSOR_MODELS
)
class MfiSensor(Entity):
"""Representation of a mFi sensor."""
def __init__(self, port, hass):
"""Initialize the sensor."""
self._port = port
self._hass = hass
@property
def name(self):
"""Return the name of th sensor."""
return self._port.label
@property
def state(self):
"""Return the state of the sensor."""
try:
tag = self._port.tag
except ValueError:
tag = None
if tag is None:
return STATE_OFF
if self._port.model == "Input Digital":
return STATE_ON if self._port.value > 0 else STATE_OFF
digits = DIGITS.get(self._port.tag, 0)
return round(self._port.value, digits)
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
try:
tag = self._port.tag
except ValueError:
return "State"
if tag == "temperature":
return TEMP_CELSIUS
if tag == "active_pwr":
return "Watts"
if self._port.model == "Input Digital":
return "State"
return tag
def update(self):
"""Get the latest data."""
self._port.refresh()
| apache-2.0 |
kaithar/muhubot | utils/plumbing/http_sink/jisho.py | 1 | 1391 | from __future__ import print_function, unicode_literals
import json
from utils.protocol import Socket as sock
from funchain import AsyncCall
import traceback
tracker = {}
cid = 0
def receiver(body):
asc = tracker.get(body['cid'], None)
if asc:
del tracker[body['cid']]
asc.fire_callback(body['result'])
from utils.plumbing import command_registry
import re
def register_search():
try:
sock.get_sock().chain('plumbing/jisho/search') >> receiver
def search(body):
global cid
# {'match': match, 'user': user, 'instruction': instruction, 'body': body, 'send_reply': send_reply}
query = body['match'].group(2)
if not query:
return "usage: jisho words [keywords]"
cid += 1
my_cid = cid
body = {'cid': my_cid, 'return_path': 'plumbing/jisho/search', 'query': query}
asc = AsyncCall()
tracker[my_cid] = asc
sock.get_sock().send_multipart('MSG', 'api/jisho/words', json.dumps(body).encode('utf-8'))
raise asc
command_registry.getRegistry().registerInstruction(re.compile(r'jisho (words) ?(.*)?'), search, ("jisho words [keywords] - Search jisho.org for words",))
except:
print("Failed to register macros")
traceback.print_exc(None)
register_search()
__all__ = ['receiver']
| gpl-2.0 |
sgerhart/ansible | lib/ansible/modules/cloud/google/gcp_compute_target_vpn_gateway.py | 8 | 11842 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_target_vpn_gateway
description:
- Represents a VPN gateway running in GCP. This virtual device is managed by Google,
but used only by you.
short_description: Creates a GCP TargetVpnGateway
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices: ['present', 'absent']
default: 'present'
description:
description:
- An optional description of this resource.
required: false
name:
description:
- Name of the resource. Provided by the client when the resource is created. The name
must be 1-63 characters long, and comply with RFC1035. Specifically, the name must
be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following characters
must be a dash, lowercase letter, or digit, except the last character, which cannot
be a dash.
required: true
network:
description:
- The network this VPN gateway is accepting traffic for.
- 'This field represents a link to a Network resource in GCP. It can be specified
in two ways. You can add `register: name-of-resource` to a gcp_compute_network task
and then set this network field to "{{ name-of-resource }}" Alternatively, you can
set this network to a dictionary with the selfLink key where the value is the selfLink
of your Network.'
required: true
region:
description:
- The region this gateway should sit in.
required: true
extends_documentation_fragment: gcp
notes:
- "API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/targetVpnGateways)"
'''
EXAMPLES = '''
- name: create a address
gcp_compute_address:
name: "address-vpngateway"
region: us-west1
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: address
- name: create a network
gcp_compute_network:
name: "network-vpngateway"
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: network
- name: create a target vpn gateway
gcp_compute_target_vpn_gateway:
name: "test_object"
region: us-west1
network: "{{ network }}"
project: "test_project"
auth_kind: "serviceaccount"
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
name:
description:
- Name of the resource. Provided by the client when the resource is created. The name
must be 1-63 characters long, and comply with RFC1035. Specifically, the name must
be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following characters
must be a dash, lowercase letter, or digit, except the last character, which cannot
be a dash.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
network:
description:
- The network this VPN gateway is accepting traffic for.
returned: success
type: dict
tunnels:
description:
- A list of references to VpnTunnel resources associated to this VPN gateway.
returned: success
type: list
forwardingRules:
description:
- A list of references to the ForwardingRule resources associated to this VPN gateway.
returned: success
type: list
region:
description:
- The region this gateway should sit in.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
description=dict(type='str'),
name=dict(required=True, type='str'),
network=dict(required=True, type='dict'),
region=dict(required=True, type='str')
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#targetVpnGateway'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
module.fail_json(msg="TargetVpnGateway cannot be edited")
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#targetVpnGateway',
u'description': module.params.get('description'),
u'name': module.params.get('name'),
u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink')
}
return_vals = {}
for k, v in request.items():
if v:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetVpnGateways/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetVpnGateways".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'creationTimestamp': response.get(u'creationTimestamp'),
u'description': module.params.get('description'),
u'name': module.params.get('name'),
u'id': response.get(u'id'),
u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'),
u'tunnels': response.get(u'tunnels'),
u'forwardingRules': response.get(u'forwardingRules')
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#targetVpnGateway')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], 'message')
time.sleep(1.0)
if status not in ['PENDING', 'RUNNING', 'DONE']:
module.fail_json(msg="Invalid result %s" % status)
op_result = fetch_resource(module, op_uri, 'compute#operation')
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
if __name__ == '__main__':
main()
| mit |
sumedh123/debatify | venv/lib/python2.7/site-packages/pip/_vendor/html5lib/constants.py | 497 | 86873 | from __future__ import absolute_import, division, unicode_literals
import string
EOF = None
E = {
"null-character":
"Null character in input stream, replaced with U+FFFD.",
"invalid-codepoint":
"Invalid codepoint in stream.",
"incorrectly-placed-solidus":
"Solidus (/) incorrectly placed in tag.",
"incorrect-cr-newline-entity":
"Incorrect CR newline entity, replaced with LF.",
"illegal-windows-1252-entity":
"Entity used with illegal number (windows-1252 reference).",
"cant-convert-numeric-entity":
"Numeric entity couldn't be converted to character "
"(codepoint U+%(charAsInt)08x).",
"illegal-codepoint-for-numeric-entity":
"Numeric entity represents an illegal codepoint: "
"U+%(charAsInt)08x.",
"numeric-entity-without-semicolon":
"Numeric entity didn't end with ';'.",
"expected-numeric-entity-but-got-eof":
"Numeric entity expected. Got end of file instead.",
"expected-numeric-entity":
"Numeric entity expected but none found.",
"named-entity-without-semicolon":
"Named entity didn't end with ';'.",
"expected-named-entity":
"Named entity expected. Got none.",
"attributes-in-end-tag":
"End tag contains unexpected attributes.",
'self-closing-flag-on-end-tag':
"End tag contains unexpected self-closing flag.",
"expected-tag-name-but-got-right-bracket":
"Expected tag name. Got '>' instead.",
"expected-tag-name-but-got-question-mark":
"Expected tag name. Got '?' instead. (HTML doesn't "
"support processing instructions.)",
"expected-tag-name":
"Expected tag name. Got something else instead",
"expected-closing-tag-but-got-right-bracket":
"Expected closing tag. Got '>' instead. Ignoring '</>'.",
"expected-closing-tag-but-got-eof":
"Expected closing tag. Unexpected end of file.",
"expected-closing-tag-but-got-char":
"Expected closing tag. Unexpected character '%(data)s' found.",
"eof-in-tag-name":
"Unexpected end of file in the tag name.",
"expected-attribute-name-but-got-eof":
"Unexpected end of file. Expected attribute name instead.",
"eof-in-attribute-name":
"Unexpected end of file in attribute name.",
"invalid-character-in-attribute-name":
"Invalid character in attribute name",
"duplicate-attribute":
"Dropped duplicate attribute on tag.",
"expected-end-of-tag-name-but-got-eof":
"Unexpected end of file. Expected = or end of tag.",
"expected-attribute-value-but-got-eof":
"Unexpected end of file. Expected attribute value.",
"expected-attribute-value-but-got-right-bracket":
"Expected attribute value. Got '>' instead.",
'equals-in-unquoted-attribute-value':
"Unexpected = in unquoted attribute",
'unexpected-character-in-unquoted-attribute-value':
"Unexpected character in unquoted attribute",
"invalid-character-after-attribute-name":
"Unexpected character after attribute name.",
"unexpected-character-after-attribute-value":
"Unexpected character after attribute value.",
"eof-in-attribute-value-double-quote":
"Unexpected end of file in attribute value (\").",
"eof-in-attribute-value-single-quote":
"Unexpected end of file in attribute value (').",
"eof-in-attribute-value-no-quotes":
"Unexpected end of file in attribute value.",
"unexpected-EOF-after-solidus-in-tag":
"Unexpected end of file in tag. Expected >",
"unexpected-character-after-solidus-in-tag":
"Unexpected character after / in tag. Expected >",
"expected-dashes-or-doctype":
"Expected '--' or 'DOCTYPE'. Not found.",
"unexpected-bang-after-double-dash-in-comment":
"Unexpected ! after -- in comment",
"unexpected-space-after-double-dash-in-comment":
"Unexpected space after -- in comment",
"incorrect-comment":
"Incorrect comment.",
"eof-in-comment":
"Unexpected end of file in comment.",
"eof-in-comment-end-dash":
"Unexpected end of file in comment (-)",
"unexpected-dash-after-double-dash-in-comment":
"Unexpected '-' after '--' found in comment.",
"eof-in-comment-double-dash":
"Unexpected end of file in comment (--).",
"eof-in-comment-end-space-state":
"Unexpected end of file in comment.",
"eof-in-comment-end-bang-state":
"Unexpected end of file in comment.",
"unexpected-char-in-comment":
"Unexpected character in comment found.",
"need-space-after-doctype":
"No space after literal string 'DOCTYPE'.",
"expected-doctype-name-but-got-right-bracket":
"Unexpected > character. Expected DOCTYPE name.",
"expected-doctype-name-but-got-eof":
"Unexpected end of file. Expected DOCTYPE name.",
"eof-in-doctype-name":
"Unexpected end of file in DOCTYPE name.",
"eof-in-doctype":
"Unexpected end of file in DOCTYPE.",
"expected-space-or-right-bracket-in-doctype":
"Expected space or '>'. Got '%(data)s'",
"unexpected-end-of-doctype":
"Unexpected end of DOCTYPE.",
"unexpected-char-in-doctype":
"Unexpected character in DOCTYPE.",
"eof-in-innerhtml":
"XXX innerHTML EOF",
"unexpected-doctype":
"Unexpected DOCTYPE. Ignored.",
"non-html-root":
"html needs to be the first start tag.",
"expected-doctype-but-got-eof":
"Unexpected End of file. Expected DOCTYPE.",
"unknown-doctype":
"Erroneous DOCTYPE.",
"expected-doctype-but-got-chars":
"Unexpected non-space characters. Expected DOCTYPE.",
"expected-doctype-but-got-start-tag":
"Unexpected start tag (%(name)s). Expected DOCTYPE.",
"expected-doctype-but-got-end-tag":
"Unexpected end tag (%(name)s). Expected DOCTYPE.",
"end-tag-after-implied-root":
"Unexpected end tag (%(name)s) after the (implied) root element.",
"expected-named-closing-tag-but-got-eof":
"Unexpected end of file. Expected end tag (%(name)s).",
"two-heads-are-not-better-than-one":
"Unexpected start tag head in existing head. Ignored.",
"unexpected-end-tag":
"Unexpected end tag (%(name)s). Ignored.",
"unexpected-start-tag-out-of-my-head":
"Unexpected start tag (%(name)s) that can be in head. Moved.",
"unexpected-start-tag":
"Unexpected start tag (%(name)s).",
"missing-end-tag":
"Missing end tag (%(name)s).",
"missing-end-tags":
"Missing end tags (%(name)s).",
"unexpected-start-tag-implies-end-tag":
"Unexpected start tag (%(startName)s) "
"implies end tag (%(endName)s).",
"unexpected-start-tag-treated-as":
"Unexpected start tag (%(originalName)s). Treated as %(newName)s.",
"deprecated-tag":
"Unexpected start tag %(name)s. Don't use it!",
"unexpected-start-tag-ignored":
"Unexpected start tag %(name)s. Ignored.",
"expected-one-end-tag-but-got-another":
"Unexpected end tag (%(gotName)s). "
"Missing end tag (%(expectedName)s).",
"end-tag-too-early":
"End tag (%(name)s) seen too early. Expected other end tag.",
"end-tag-too-early-named":
"Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s).",
"end-tag-too-early-ignored":
"End tag (%(name)s) seen too early. Ignored.",
"adoption-agency-1.1":
"End tag (%(name)s) violates step 1, "
"paragraph 1 of the adoption agency algorithm.",
"adoption-agency-1.2":
"End tag (%(name)s) violates step 1, "
"paragraph 2 of the adoption agency algorithm.",
"adoption-agency-1.3":
"End tag (%(name)s) violates step 1, "
"paragraph 3 of the adoption agency algorithm.",
"adoption-agency-4.4":
"End tag (%(name)s) violates step 4, "
"paragraph 4 of the adoption agency algorithm.",
"unexpected-end-tag-treated-as":
"Unexpected end tag (%(originalName)s). Treated as %(newName)s.",
"no-end-tag":
"This element (%(name)s) has no end tag.",
"unexpected-implied-end-tag-in-table":
"Unexpected implied end tag (%(name)s) in the table phase.",
"unexpected-implied-end-tag-in-table-body":
"Unexpected implied end tag (%(name)s) in the table body phase.",
"unexpected-char-implies-table-voodoo":
"Unexpected non-space characters in "
"table context caused voodoo mode.",
"unexpected-hidden-input-in-table":
"Unexpected input with type hidden in table context.",
"unexpected-form-in-table":
"Unexpected form in table context.",
"unexpected-start-tag-implies-table-voodoo":
"Unexpected start tag (%(name)s) in "
"table context caused voodoo mode.",
"unexpected-end-tag-implies-table-voodoo":
"Unexpected end tag (%(name)s) in "
"table context caused voodoo mode.",
"unexpected-cell-in-table-body":
"Unexpected table cell start tag (%(name)s) "
"in the table body phase.",
"unexpected-cell-end-tag":
"Got table cell end tag (%(name)s) "
"while required end tags are missing.",
"unexpected-end-tag-in-table-body":
"Unexpected end tag (%(name)s) in the table body phase. Ignored.",
"unexpected-implied-end-tag-in-table-row":
"Unexpected implied end tag (%(name)s) in the table row phase.",
"unexpected-end-tag-in-table-row":
"Unexpected end tag (%(name)s) in the table row phase. Ignored.",
"unexpected-select-in-select":
"Unexpected select start tag in the select phase "
"treated as select end tag.",
"unexpected-input-in-select":
"Unexpected input start tag in the select phase.",
"unexpected-start-tag-in-select":
"Unexpected start tag token (%(name)s in the select phase. "
"Ignored.",
"unexpected-end-tag-in-select":
"Unexpected end tag (%(name)s) in the select phase. Ignored.",
"unexpected-table-element-start-tag-in-select-in-table":
"Unexpected table element start tag (%(name)s) in the select in table phase.",
"unexpected-table-element-end-tag-in-select-in-table":
"Unexpected table element end tag (%(name)s) in the select in table phase.",
"unexpected-char-after-body":
"Unexpected non-space characters in the after body phase.",
"unexpected-start-tag-after-body":
"Unexpected start tag token (%(name)s)"
" in the after body phase.",
"unexpected-end-tag-after-body":
"Unexpected end tag token (%(name)s)"
" in the after body phase.",
"unexpected-char-in-frameset":
"Unexpected characters in the frameset phase. Characters ignored.",
"unexpected-start-tag-in-frameset":
"Unexpected start tag token (%(name)s)"
" in the frameset phase. Ignored.",
"unexpected-frameset-in-frameset-innerhtml":
"Unexpected end tag token (frameset) "
"in the frameset phase (innerHTML).",
"unexpected-end-tag-in-frameset":
"Unexpected end tag token (%(name)s)"
" in the frameset phase. Ignored.",
"unexpected-char-after-frameset":
"Unexpected non-space characters in the "
"after frameset phase. Ignored.",
"unexpected-start-tag-after-frameset":
"Unexpected start tag (%(name)s)"
" in the after frameset phase. Ignored.",
"unexpected-end-tag-after-frameset":
"Unexpected end tag (%(name)s)"
" in the after frameset phase. Ignored.",
"unexpected-end-tag-after-body-innerhtml":
"Unexpected end tag after body(innerHtml)",
"expected-eof-but-got-char":
"Unexpected non-space characters. Expected end of file.",
"expected-eof-but-got-start-tag":
"Unexpected start tag (%(name)s)"
". Expected end of file.",
"expected-eof-but-got-end-tag":
"Unexpected end tag (%(name)s)"
". Expected end of file.",
"eof-in-table":
"Unexpected end of file. Expected table content.",
"eof-in-select":
"Unexpected end of file. Expected select content.",
"eof-in-frameset":
"Unexpected end of file. Expected frameset content.",
"eof-in-script-in-script":
"Unexpected end of file. Expected script content.",
"eof-in-foreign-lands":
"Unexpected end of file. Expected foreign content",
"non-void-element-with-trailing-solidus":
"Trailing solidus not allowed on element %(name)s",
"unexpected-html-element-in-foreign-content":
"Element %(name)s not allowed in a non-html context",
"unexpected-end-tag-before-html":
"Unexpected end tag (%(name)s) before html.",
"XXX-undefined-error":
"Undefined error (this sucks and should be fixed)",
}
namespaces = {
"html": "http://www.w3.org/1999/xhtml",
"mathml": "http://www.w3.org/1998/Math/MathML",
"svg": "http://www.w3.org/2000/svg",
"xlink": "http://www.w3.org/1999/xlink",
"xml": "http://www.w3.org/XML/1998/namespace",
"xmlns": "http://www.w3.org/2000/xmlns/"
}
scopingElements = frozenset([
(namespaces["html"], "applet"),
(namespaces["html"], "caption"),
(namespaces["html"], "html"),
(namespaces["html"], "marquee"),
(namespaces["html"], "object"),
(namespaces["html"], "table"),
(namespaces["html"], "td"),
(namespaces["html"], "th"),
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext"),
(namespaces["mathml"], "annotation-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title"),
])
formattingElements = frozenset([
(namespaces["html"], "a"),
(namespaces["html"], "b"),
(namespaces["html"], "big"),
(namespaces["html"], "code"),
(namespaces["html"], "em"),
(namespaces["html"], "font"),
(namespaces["html"], "i"),
(namespaces["html"], "nobr"),
(namespaces["html"], "s"),
(namespaces["html"], "small"),
(namespaces["html"], "strike"),
(namespaces["html"], "strong"),
(namespaces["html"], "tt"),
(namespaces["html"], "u")
])
specialElements = frozenset([
(namespaces["html"], "address"),
(namespaces["html"], "applet"),
(namespaces["html"], "area"),
(namespaces["html"], "article"),
(namespaces["html"], "aside"),
(namespaces["html"], "base"),
(namespaces["html"], "basefont"),
(namespaces["html"], "bgsound"),
(namespaces["html"], "blockquote"),
(namespaces["html"], "body"),
(namespaces["html"], "br"),
(namespaces["html"], "button"),
(namespaces["html"], "caption"),
(namespaces["html"], "center"),
(namespaces["html"], "col"),
(namespaces["html"], "colgroup"),
(namespaces["html"], "command"),
(namespaces["html"], "dd"),
(namespaces["html"], "details"),
(namespaces["html"], "dir"),
(namespaces["html"], "div"),
(namespaces["html"], "dl"),
(namespaces["html"], "dt"),
(namespaces["html"], "embed"),
(namespaces["html"], "fieldset"),
(namespaces["html"], "figure"),
(namespaces["html"], "footer"),
(namespaces["html"], "form"),
(namespaces["html"], "frame"),
(namespaces["html"], "frameset"),
(namespaces["html"], "h1"),
(namespaces["html"], "h2"),
(namespaces["html"], "h3"),
(namespaces["html"], "h4"),
(namespaces["html"], "h5"),
(namespaces["html"], "h6"),
(namespaces["html"], "head"),
(namespaces["html"], "header"),
(namespaces["html"], "hr"),
(namespaces["html"], "html"),
(namespaces["html"], "iframe"),
# Note that image is commented out in the spec as "this isn't an
# element that can end up on the stack, so it doesn't matter,"
(namespaces["html"], "image"),
(namespaces["html"], "img"),
(namespaces["html"], "input"),
(namespaces["html"], "isindex"),
(namespaces["html"], "li"),
(namespaces["html"], "link"),
(namespaces["html"], "listing"),
(namespaces["html"], "marquee"),
(namespaces["html"], "menu"),
(namespaces["html"], "meta"),
(namespaces["html"], "nav"),
(namespaces["html"], "noembed"),
(namespaces["html"], "noframes"),
(namespaces["html"], "noscript"),
(namespaces["html"], "object"),
(namespaces["html"], "ol"),
(namespaces["html"], "p"),
(namespaces["html"], "param"),
(namespaces["html"], "plaintext"),
(namespaces["html"], "pre"),
(namespaces["html"], "script"),
(namespaces["html"], "section"),
(namespaces["html"], "select"),
(namespaces["html"], "style"),
(namespaces["html"], "table"),
(namespaces["html"], "tbody"),
(namespaces["html"], "td"),
(namespaces["html"], "textarea"),
(namespaces["html"], "tfoot"),
(namespaces["html"], "th"),
(namespaces["html"], "thead"),
(namespaces["html"], "title"),
(namespaces["html"], "tr"),
(namespaces["html"], "ul"),
(namespaces["html"], "wbr"),
(namespaces["html"], "xmp"),
(namespaces["svg"], "foreignObject")
])
htmlIntegrationPointElements = frozenset([
(namespaces["mathml"], "annotaion-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title")
])
mathmlTextIntegrationPointElements = frozenset([
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext")
])
adjustForeignAttributes = {
"xlink:actuate": ("xlink", "actuate", namespaces["xlink"]),
"xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]),
"xlink:href": ("xlink", "href", namespaces["xlink"]),
"xlink:role": ("xlink", "role", namespaces["xlink"]),
"xlink:show": ("xlink", "show", namespaces["xlink"]),
"xlink:title": ("xlink", "title", namespaces["xlink"]),
"xlink:type": ("xlink", "type", namespaces["xlink"]),
"xml:base": ("xml", "base", namespaces["xml"]),
"xml:lang": ("xml", "lang", namespaces["xml"]),
"xml:space": ("xml", "space", namespaces["xml"]),
"xmlns": (None, "xmlns", namespaces["xmlns"]),
"xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"])
}
unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in
adjustForeignAttributes.items()])
spaceCharacters = frozenset([
"\t",
"\n",
"\u000C",
" ",
"\r"
])
tableInsertModeElements = frozenset([
"table",
"tbody",
"tfoot",
"thead",
"tr"
])
asciiLowercase = frozenset(string.ascii_lowercase)
asciiUppercase = frozenset(string.ascii_uppercase)
asciiLetters = frozenset(string.ascii_letters)
digits = frozenset(string.digits)
hexDigits = frozenset(string.hexdigits)
asciiUpper2Lower = dict([(ord(c), ord(c.lower()))
for c in string.ascii_uppercase])
# Heading elements need to be ordered
headingElements = (
"h1",
"h2",
"h3",
"h4",
"h5",
"h6"
)
voidElements = frozenset([
"base",
"command",
"event-source",
"link",
"meta",
"hr",
"br",
"img",
"embed",
"param",
"area",
"col",
"input",
"source",
"track"
])
cdataElements = frozenset(['title', 'textarea'])
rcdataElements = frozenset([
'style',
'script',
'xmp',
'iframe',
'noembed',
'noframes',
'noscript'
])
booleanAttributes = {
"": frozenset(["irrelevant"]),
"style": frozenset(["scoped"]),
"img": frozenset(["ismap"]),
"audio": frozenset(["autoplay", "controls"]),
"video": frozenset(["autoplay", "controls"]),
"script": frozenset(["defer", "async"]),
"details": frozenset(["open"]),
"datagrid": frozenset(["multiple", "disabled"]),
"command": frozenset(["hidden", "disabled", "checked", "default"]),
"hr": frozenset(["noshade"]),
"menu": frozenset(["autosubmit"]),
"fieldset": frozenset(["disabled", "readonly"]),
"option": frozenset(["disabled", "readonly", "selected"]),
"optgroup": frozenset(["disabled", "readonly"]),
"button": frozenset(["disabled", "autofocus"]),
"input": frozenset(["disabled", "readonly", "required", "autofocus", "checked", "ismap"]),
"select": frozenset(["disabled", "readonly", "autofocus", "multiple"]),
"output": frozenset(["disabled", "readonly"]),
}
# entitiesWindows1252 has to be _ordered_ and needs to have an index. It
# therefore can't be a frozenset.
entitiesWindows1252 = (
8364, # 0x80 0x20AC EURO SIGN
65533, # 0x81 UNDEFINED
8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK
402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK
8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK
8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS
8224, # 0x86 0x2020 DAGGER
8225, # 0x87 0x2021 DOUBLE DAGGER
710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT
8240, # 0x89 0x2030 PER MILLE SIGN
352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON
8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK
338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE
65533, # 0x8D UNDEFINED
381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON
65533, # 0x8F UNDEFINED
65533, # 0x90 UNDEFINED
8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK
8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK
8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK
8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK
8226, # 0x95 0x2022 BULLET
8211, # 0x96 0x2013 EN DASH
8212, # 0x97 0x2014 EM DASH
732, # 0x98 0x02DC SMALL TILDE
8482, # 0x99 0x2122 TRADE MARK SIGN
353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON
8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE
65533, # 0x9D UNDEFINED
382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON
376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS
)
xmlEntities = frozenset(['lt;', 'gt;', 'amp;', 'apos;', 'quot;'])
entities = {
"AElig": "\xc6",
"AElig;": "\xc6",
"AMP": "&",
"AMP;": "&",
"Aacute": "\xc1",
"Aacute;": "\xc1",
"Abreve;": "\u0102",
"Acirc": "\xc2",
"Acirc;": "\xc2",
"Acy;": "\u0410",
"Afr;": "\U0001d504",
"Agrave": "\xc0",
"Agrave;": "\xc0",
"Alpha;": "\u0391",
"Amacr;": "\u0100",
"And;": "\u2a53",
"Aogon;": "\u0104",
"Aopf;": "\U0001d538",
"ApplyFunction;": "\u2061",
"Aring": "\xc5",
"Aring;": "\xc5",
"Ascr;": "\U0001d49c",
"Assign;": "\u2254",
"Atilde": "\xc3",
"Atilde;": "\xc3",
"Auml": "\xc4",
"Auml;": "\xc4",
"Backslash;": "\u2216",
"Barv;": "\u2ae7",
"Barwed;": "\u2306",
"Bcy;": "\u0411",
"Because;": "\u2235",
"Bernoullis;": "\u212c",
"Beta;": "\u0392",
"Bfr;": "\U0001d505",
"Bopf;": "\U0001d539",
"Breve;": "\u02d8",
"Bscr;": "\u212c",
"Bumpeq;": "\u224e",
"CHcy;": "\u0427",
"COPY": "\xa9",
"COPY;": "\xa9",
"Cacute;": "\u0106",
"Cap;": "\u22d2",
"CapitalDifferentialD;": "\u2145",
"Cayleys;": "\u212d",
"Ccaron;": "\u010c",
"Ccedil": "\xc7",
"Ccedil;": "\xc7",
"Ccirc;": "\u0108",
"Cconint;": "\u2230",
"Cdot;": "\u010a",
"Cedilla;": "\xb8",
"CenterDot;": "\xb7",
"Cfr;": "\u212d",
"Chi;": "\u03a7",
"CircleDot;": "\u2299",
"CircleMinus;": "\u2296",
"CirclePlus;": "\u2295",
"CircleTimes;": "\u2297",
"ClockwiseContourIntegral;": "\u2232",
"CloseCurlyDoubleQuote;": "\u201d",
"CloseCurlyQuote;": "\u2019",
"Colon;": "\u2237",
"Colone;": "\u2a74",
"Congruent;": "\u2261",
"Conint;": "\u222f",
"ContourIntegral;": "\u222e",
"Copf;": "\u2102",
"Coproduct;": "\u2210",
"CounterClockwiseContourIntegral;": "\u2233",
"Cross;": "\u2a2f",
"Cscr;": "\U0001d49e",
"Cup;": "\u22d3",
"CupCap;": "\u224d",
"DD;": "\u2145",
"DDotrahd;": "\u2911",
"DJcy;": "\u0402",
"DScy;": "\u0405",
"DZcy;": "\u040f",
"Dagger;": "\u2021",
"Darr;": "\u21a1",
"Dashv;": "\u2ae4",
"Dcaron;": "\u010e",
"Dcy;": "\u0414",
"Del;": "\u2207",
"Delta;": "\u0394",
"Dfr;": "\U0001d507",
"DiacriticalAcute;": "\xb4",
"DiacriticalDot;": "\u02d9",
"DiacriticalDoubleAcute;": "\u02dd",
"DiacriticalGrave;": "`",
"DiacriticalTilde;": "\u02dc",
"Diamond;": "\u22c4",
"DifferentialD;": "\u2146",
"Dopf;": "\U0001d53b",
"Dot;": "\xa8",
"DotDot;": "\u20dc",
"DotEqual;": "\u2250",
"DoubleContourIntegral;": "\u222f",
"DoubleDot;": "\xa8",
"DoubleDownArrow;": "\u21d3",
"DoubleLeftArrow;": "\u21d0",
"DoubleLeftRightArrow;": "\u21d4",
"DoubleLeftTee;": "\u2ae4",
"DoubleLongLeftArrow;": "\u27f8",
"DoubleLongLeftRightArrow;": "\u27fa",
"DoubleLongRightArrow;": "\u27f9",
"DoubleRightArrow;": "\u21d2",
"DoubleRightTee;": "\u22a8",
"DoubleUpArrow;": "\u21d1",
"DoubleUpDownArrow;": "\u21d5",
"DoubleVerticalBar;": "\u2225",
"DownArrow;": "\u2193",
"DownArrowBar;": "\u2913",
"DownArrowUpArrow;": "\u21f5",
"DownBreve;": "\u0311",
"DownLeftRightVector;": "\u2950",
"DownLeftTeeVector;": "\u295e",
"DownLeftVector;": "\u21bd",
"DownLeftVectorBar;": "\u2956",
"DownRightTeeVector;": "\u295f",
"DownRightVector;": "\u21c1",
"DownRightVectorBar;": "\u2957",
"DownTee;": "\u22a4",
"DownTeeArrow;": "\u21a7",
"Downarrow;": "\u21d3",
"Dscr;": "\U0001d49f",
"Dstrok;": "\u0110",
"ENG;": "\u014a",
"ETH": "\xd0",
"ETH;": "\xd0",
"Eacute": "\xc9",
"Eacute;": "\xc9",
"Ecaron;": "\u011a",
"Ecirc": "\xca",
"Ecirc;": "\xca",
"Ecy;": "\u042d",
"Edot;": "\u0116",
"Efr;": "\U0001d508",
"Egrave": "\xc8",
"Egrave;": "\xc8",
"Element;": "\u2208",
"Emacr;": "\u0112",
"EmptySmallSquare;": "\u25fb",
"EmptyVerySmallSquare;": "\u25ab",
"Eogon;": "\u0118",
"Eopf;": "\U0001d53c",
"Epsilon;": "\u0395",
"Equal;": "\u2a75",
"EqualTilde;": "\u2242",
"Equilibrium;": "\u21cc",
"Escr;": "\u2130",
"Esim;": "\u2a73",
"Eta;": "\u0397",
"Euml": "\xcb",
"Euml;": "\xcb",
"Exists;": "\u2203",
"ExponentialE;": "\u2147",
"Fcy;": "\u0424",
"Ffr;": "\U0001d509",
"FilledSmallSquare;": "\u25fc",
"FilledVerySmallSquare;": "\u25aa",
"Fopf;": "\U0001d53d",
"ForAll;": "\u2200",
"Fouriertrf;": "\u2131",
"Fscr;": "\u2131",
"GJcy;": "\u0403",
"GT": ">",
"GT;": ">",
"Gamma;": "\u0393",
"Gammad;": "\u03dc",
"Gbreve;": "\u011e",
"Gcedil;": "\u0122",
"Gcirc;": "\u011c",
"Gcy;": "\u0413",
"Gdot;": "\u0120",
"Gfr;": "\U0001d50a",
"Gg;": "\u22d9",
"Gopf;": "\U0001d53e",
"GreaterEqual;": "\u2265",
"GreaterEqualLess;": "\u22db",
"GreaterFullEqual;": "\u2267",
"GreaterGreater;": "\u2aa2",
"GreaterLess;": "\u2277",
"GreaterSlantEqual;": "\u2a7e",
"GreaterTilde;": "\u2273",
"Gscr;": "\U0001d4a2",
"Gt;": "\u226b",
"HARDcy;": "\u042a",
"Hacek;": "\u02c7",
"Hat;": "^",
"Hcirc;": "\u0124",
"Hfr;": "\u210c",
"HilbertSpace;": "\u210b",
"Hopf;": "\u210d",
"HorizontalLine;": "\u2500",
"Hscr;": "\u210b",
"Hstrok;": "\u0126",
"HumpDownHump;": "\u224e",
"HumpEqual;": "\u224f",
"IEcy;": "\u0415",
"IJlig;": "\u0132",
"IOcy;": "\u0401",
"Iacute": "\xcd",
"Iacute;": "\xcd",
"Icirc": "\xce",
"Icirc;": "\xce",
"Icy;": "\u0418",
"Idot;": "\u0130",
"Ifr;": "\u2111",
"Igrave": "\xcc",
"Igrave;": "\xcc",
"Im;": "\u2111",
"Imacr;": "\u012a",
"ImaginaryI;": "\u2148",
"Implies;": "\u21d2",
"Int;": "\u222c",
"Integral;": "\u222b",
"Intersection;": "\u22c2",
"InvisibleComma;": "\u2063",
"InvisibleTimes;": "\u2062",
"Iogon;": "\u012e",
"Iopf;": "\U0001d540",
"Iota;": "\u0399",
"Iscr;": "\u2110",
"Itilde;": "\u0128",
"Iukcy;": "\u0406",
"Iuml": "\xcf",
"Iuml;": "\xcf",
"Jcirc;": "\u0134",
"Jcy;": "\u0419",
"Jfr;": "\U0001d50d",
"Jopf;": "\U0001d541",
"Jscr;": "\U0001d4a5",
"Jsercy;": "\u0408",
"Jukcy;": "\u0404",
"KHcy;": "\u0425",
"KJcy;": "\u040c",
"Kappa;": "\u039a",
"Kcedil;": "\u0136",
"Kcy;": "\u041a",
"Kfr;": "\U0001d50e",
"Kopf;": "\U0001d542",
"Kscr;": "\U0001d4a6",
"LJcy;": "\u0409",
"LT": "<",
"LT;": "<",
"Lacute;": "\u0139",
"Lambda;": "\u039b",
"Lang;": "\u27ea",
"Laplacetrf;": "\u2112",
"Larr;": "\u219e",
"Lcaron;": "\u013d",
"Lcedil;": "\u013b",
"Lcy;": "\u041b",
"LeftAngleBracket;": "\u27e8",
"LeftArrow;": "\u2190",
"LeftArrowBar;": "\u21e4",
"LeftArrowRightArrow;": "\u21c6",
"LeftCeiling;": "\u2308",
"LeftDoubleBracket;": "\u27e6",
"LeftDownTeeVector;": "\u2961",
"LeftDownVector;": "\u21c3",
"LeftDownVectorBar;": "\u2959",
"LeftFloor;": "\u230a",
"LeftRightArrow;": "\u2194",
"LeftRightVector;": "\u294e",
"LeftTee;": "\u22a3",
"LeftTeeArrow;": "\u21a4",
"LeftTeeVector;": "\u295a",
"LeftTriangle;": "\u22b2",
"LeftTriangleBar;": "\u29cf",
"LeftTriangleEqual;": "\u22b4",
"LeftUpDownVector;": "\u2951",
"LeftUpTeeVector;": "\u2960",
"LeftUpVector;": "\u21bf",
"LeftUpVectorBar;": "\u2958",
"LeftVector;": "\u21bc",
"LeftVectorBar;": "\u2952",
"Leftarrow;": "\u21d0",
"Leftrightarrow;": "\u21d4",
"LessEqualGreater;": "\u22da",
"LessFullEqual;": "\u2266",
"LessGreater;": "\u2276",
"LessLess;": "\u2aa1",
"LessSlantEqual;": "\u2a7d",
"LessTilde;": "\u2272",
"Lfr;": "\U0001d50f",
"Ll;": "\u22d8",
"Lleftarrow;": "\u21da",
"Lmidot;": "\u013f",
"LongLeftArrow;": "\u27f5",
"LongLeftRightArrow;": "\u27f7",
"LongRightArrow;": "\u27f6",
"Longleftarrow;": "\u27f8",
"Longleftrightarrow;": "\u27fa",
"Longrightarrow;": "\u27f9",
"Lopf;": "\U0001d543",
"LowerLeftArrow;": "\u2199",
"LowerRightArrow;": "\u2198",
"Lscr;": "\u2112",
"Lsh;": "\u21b0",
"Lstrok;": "\u0141",
"Lt;": "\u226a",
"Map;": "\u2905",
"Mcy;": "\u041c",
"MediumSpace;": "\u205f",
"Mellintrf;": "\u2133",
"Mfr;": "\U0001d510",
"MinusPlus;": "\u2213",
"Mopf;": "\U0001d544",
"Mscr;": "\u2133",
"Mu;": "\u039c",
"NJcy;": "\u040a",
"Nacute;": "\u0143",
"Ncaron;": "\u0147",
"Ncedil;": "\u0145",
"Ncy;": "\u041d",
"NegativeMediumSpace;": "\u200b",
"NegativeThickSpace;": "\u200b",
"NegativeThinSpace;": "\u200b",
"NegativeVeryThinSpace;": "\u200b",
"NestedGreaterGreater;": "\u226b",
"NestedLessLess;": "\u226a",
"NewLine;": "\n",
"Nfr;": "\U0001d511",
"NoBreak;": "\u2060",
"NonBreakingSpace;": "\xa0",
"Nopf;": "\u2115",
"Not;": "\u2aec",
"NotCongruent;": "\u2262",
"NotCupCap;": "\u226d",
"NotDoubleVerticalBar;": "\u2226",
"NotElement;": "\u2209",
"NotEqual;": "\u2260",
"NotEqualTilde;": "\u2242\u0338",
"NotExists;": "\u2204",
"NotGreater;": "\u226f",
"NotGreaterEqual;": "\u2271",
"NotGreaterFullEqual;": "\u2267\u0338",
"NotGreaterGreater;": "\u226b\u0338",
"NotGreaterLess;": "\u2279",
"NotGreaterSlantEqual;": "\u2a7e\u0338",
"NotGreaterTilde;": "\u2275",
"NotHumpDownHump;": "\u224e\u0338",
"NotHumpEqual;": "\u224f\u0338",
"NotLeftTriangle;": "\u22ea",
"NotLeftTriangleBar;": "\u29cf\u0338",
"NotLeftTriangleEqual;": "\u22ec",
"NotLess;": "\u226e",
"NotLessEqual;": "\u2270",
"NotLessGreater;": "\u2278",
"NotLessLess;": "\u226a\u0338",
"NotLessSlantEqual;": "\u2a7d\u0338",
"NotLessTilde;": "\u2274",
"NotNestedGreaterGreater;": "\u2aa2\u0338",
"NotNestedLessLess;": "\u2aa1\u0338",
"NotPrecedes;": "\u2280",
"NotPrecedesEqual;": "\u2aaf\u0338",
"NotPrecedesSlantEqual;": "\u22e0",
"NotReverseElement;": "\u220c",
"NotRightTriangle;": "\u22eb",
"NotRightTriangleBar;": "\u29d0\u0338",
"NotRightTriangleEqual;": "\u22ed",
"NotSquareSubset;": "\u228f\u0338",
"NotSquareSubsetEqual;": "\u22e2",
"NotSquareSuperset;": "\u2290\u0338",
"NotSquareSupersetEqual;": "\u22e3",
"NotSubset;": "\u2282\u20d2",
"NotSubsetEqual;": "\u2288",
"NotSucceeds;": "\u2281",
"NotSucceedsEqual;": "\u2ab0\u0338",
"NotSucceedsSlantEqual;": "\u22e1",
"NotSucceedsTilde;": "\u227f\u0338",
"NotSuperset;": "\u2283\u20d2",
"NotSupersetEqual;": "\u2289",
"NotTilde;": "\u2241",
"NotTildeEqual;": "\u2244",
"NotTildeFullEqual;": "\u2247",
"NotTildeTilde;": "\u2249",
"NotVerticalBar;": "\u2224",
"Nscr;": "\U0001d4a9",
"Ntilde": "\xd1",
"Ntilde;": "\xd1",
"Nu;": "\u039d",
"OElig;": "\u0152",
"Oacute": "\xd3",
"Oacute;": "\xd3",
"Ocirc": "\xd4",
"Ocirc;": "\xd4",
"Ocy;": "\u041e",
"Odblac;": "\u0150",
"Ofr;": "\U0001d512",
"Ograve": "\xd2",
"Ograve;": "\xd2",
"Omacr;": "\u014c",
"Omega;": "\u03a9",
"Omicron;": "\u039f",
"Oopf;": "\U0001d546",
"OpenCurlyDoubleQuote;": "\u201c",
"OpenCurlyQuote;": "\u2018",
"Or;": "\u2a54",
"Oscr;": "\U0001d4aa",
"Oslash": "\xd8",
"Oslash;": "\xd8",
"Otilde": "\xd5",
"Otilde;": "\xd5",
"Otimes;": "\u2a37",
"Ouml": "\xd6",
"Ouml;": "\xd6",
"OverBar;": "\u203e",
"OverBrace;": "\u23de",
"OverBracket;": "\u23b4",
"OverParenthesis;": "\u23dc",
"PartialD;": "\u2202",
"Pcy;": "\u041f",
"Pfr;": "\U0001d513",
"Phi;": "\u03a6",
"Pi;": "\u03a0",
"PlusMinus;": "\xb1",
"Poincareplane;": "\u210c",
"Popf;": "\u2119",
"Pr;": "\u2abb",
"Precedes;": "\u227a",
"PrecedesEqual;": "\u2aaf",
"PrecedesSlantEqual;": "\u227c",
"PrecedesTilde;": "\u227e",
"Prime;": "\u2033",
"Product;": "\u220f",
"Proportion;": "\u2237",
"Proportional;": "\u221d",
"Pscr;": "\U0001d4ab",
"Psi;": "\u03a8",
"QUOT": "\"",
"QUOT;": "\"",
"Qfr;": "\U0001d514",
"Qopf;": "\u211a",
"Qscr;": "\U0001d4ac",
"RBarr;": "\u2910",
"REG": "\xae",
"REG;": "\xae",
"Racute;": "\u0154",
"Rang;": "\u27eb",
"Rarr;": "\u21a0",
"Rarrtl;": "\u2916",
"Rcaron;": "\u0158",
"Rcedil;": "\u0156",
"Rcy;": "\u0420",
"Re;": "\u211c",
"ReverseElement;": "\u220b",
"ReverseEquilibrium;": "\u21cb",
"ReverseUpEquilibrium;": "\u296f",
"Rfr;": "\u211c",
"Rho;": "\u03a1",
"RightAngleBracket;": "\u27e9",
"RightArrow;": "\u2192",
"RightArrowBar;": "\u21e5",
"RightArrowLeftArrow;": "\u21c4",
"RightCeiling;": "\u2309",
"RightDoubleBracket;": "\u27e7",
"RightDownTeeVector;": "\u295d",
"RightDownVector;": "\u21c2",
"RightDownVectorBar;": "\u2955",
"RightFloor;": "\u230b",
"RightTee;": "\u22a2",
"RightTeeArrow;": "\u21a6",
"RightTeeVector;": "\u295b",
"RightTriangle;": "\u22b3",
"RightTriangleBar;": "\u29d0",
"RightTriangleEqual;": "\u22b5",
"RightUpDownVector;": "\u294f",
"RightUpTeeVector;": "\u295c",
"RightUpVector;": "\u21be",
"RightUpVectorBar;": "\u2954",
"RightVector;": "\u21c0",
"RightVectorBar;": "\u2953",
"Rightarrow;": "\u21d2",
"Ropf;": "\u211d",
"RoundImplies;": "\u2970",
"Rrightarrow;": "\u21db",
"Rscr;": "\u211b",
"Rsh;": "\u21b1",
"RuleDelayed;": "\u29f4",
"SHCHcy;": "\u0429",
"SHcy;": "\u0428",
"SOFTcy;": "\u042c",
"Sacute;": "\u015a",
"Sc;": "\u2abc",
"Scaron;": "\u0160",
"Scedil;": "\u015e",
"Scirc;": "\u015c",
"Scy;": "\u0421",
"Sfr;": "\U0001d516",
"ShortDownArrow;": "\u2193",
"ShortLeftArrow;": "\u2190",
"ShortRightArrow;": "\u2192",
"ShortUpArrow;": "\u2191",
"Sigma;": "\u03a3",
"SmallCircle;": "\u2218",
"Sopf;": "\U0001d54a",
"Sqrt;": "\u221a",
"Square;": "\u25a1",
"SquareIntersection;": "\u2293",
"SquareSubset;": "\u228f",
"SquareSubsetEqual;": "\u2291",
"SquareSuperset;": "\u2290",
"SquareSupersetEqual;": "\u2292",
"SquareUnion;": "\u2294",
"Sscr;": "\U0001d4ae",
"Star;": "\u22c6",
"Sub;": "\u22d0",
"Subset;": "\u22d0",
"SubsetEqual;": "\u2286",
"Succeeds;": "\u227b",
"SucceedsEqual;": "\u2ab0",
"SucceedsSlantEqual;": "\u227d",
"SucceedsTilde;": "\u227f",
"SuchThat;": "\u220b",
"Sum;": "\u2211",
"Sup;": "\u22d1",
"Superset;": "\u2283",
"SupersetEqual;": "\u2287",
"Supset;": "\u22d1",
"THORN": "\xde",
"THORN;": "\xde",
"TRADE;": "\u2122",
"TSHcy;": "\u040b",
"TScy;": "\u0426",
"Tab;": "\t",
"Tau;": "\u03a4",
"Tcaron;": "\u0164",
"Tcedil;": "\u0162",
"Tcy;": "\u0422",
"Tfr;": "\U0001d517",
"Therefore;": "\u2234",
"Theta;": "\u0398",
"ThickSpace;": "\u205f\u200a",
"ThinSpace;": "\u2009",
"Tilde;": "\u223c",
"TildeEqual;": "\u2243",
"TildeFullEqual;": "\u2245",
"TildeTilde;": "\u2248",
"Topf;": "\U0001d54b",
"TripleDot;": "\u20db",
"Tscr;": "\U0001d4af",
"Tstrok;": "\u0166",
"Uacute": "\xda",
"Uacute;": "\xda",
"Uarr;": "\u219f",
"Uarrocir;": "\u2949",
"Ubrcy;": "\u040e",
"Ubreve;": "\u016c",
"Ucirc": "\xdb",
"Ucirc;": "\xdb",
"Ucy;": "\u0423",
"Udblac;": "\u0170",
"Ufr;": "\U0001d518",
"Ugrave": "\xd9",
"Ugrave;": "\xd9",
"Umacr;": "\u016a",
"UnderBar;": "_",
"UnderBrace;": "\u23df",
"UnderBracket;": "\u23b5",
"UnderParenthesis;": "\u23dd",
"Union;": "\u22c3",
"UnionPlus;": "\u228e",
"Uogon;": "\u0172",
"Uopf;": "\U0001d54c",
"UpArrow;": "\u2191",
"UpArrowBar;": "\u2912",
"UpArrowDownArrow;": "\u21c5",
"UpDownArrow;": "\u2195",
"UpEquilibrium;": "\u296e",
"UpTee;": "\u22a5",
"UpTeeArrow;": "\u21a5",
"Uparrow;": "\u21d1",
"Updownarrow;": "\u21d5",
"UpperLeftArrow;": "\u2196",
"UpperRightArrow;": "\u2197",
"Upsi;": "\u03d2",
"Upsilon;": "\u03a5",
"Uring;": "\u016e",
"Uscr;": "\U0001d4b0",
"Utilde;": "\u0168",
"Uuml": "\xdc",
"Uuml;": "\xdc",
"VDash;": "\u22ab",
"Vbar;": "\u2aeb",
"Vcy;": "\u0412",
"Vdash;": "\u22a9",
"Vdashl;": "\u2ae6",
"Vee;": "\u22c1",
"Verbar;": "\u2016",
"Vert;": "\u2016",
"VerticalBar;": "\u2223",
"VerticalLine;": "|",
"VerticalSeparator;": "\u2758",
"VerticalTilde;": "\u2240",
"VeryThinSpace;": "\u200a",
"Vfr;": "\U0001d519",
"Vopf;": "\U0001d54d",
"Vscr;": "\U0001d4b1",
"Vvdash;": "\u22aa",
"Wcirc;": "\u0174",
"Wedge;": "\u22c0",
"Wfr;": "\U0001d51a",
"Wopf;": "\U0001d54e",
"Wscr;": "\U0001d4b2",
"Xfr;": "\U0001d51b",
"Xi;": "\u039e",
"Xopf;": "\U0001d54f",
"Xscr;": "\U0001d4b3",
"YAcy;": "\u042f",
"YIcy;": "\u0407",
"YUcy;": "\u042e",
"Yacute": "\xdd",
"Yacute;": "\xdd",
"Ycirc;": "\u0176",
"Ycy;": "\u042b",
"Yfr;": "\U0001d51c",
"Yopf;": "\U0001d550",
"Yscr;": "\U0001d4b4",
"Yuml;": "\u0178",
"ZHcy;": "\u0416",
"Zacute;": "\u0179",
"Zcaron;": "\u017d",
"Zcy;": "\u0417",
"Zdot;": "\u017b",
"ZeroWidthSpace;": "\u200b",
"Zeta;": "\u0396",
"Zfr;": "\u2128",
"Zopf;": "\u2124",
"Zscr;": "\U0001d4b5",
"aacute": "\xe1",
"aacute;": "\xe1",
"abreve;": "\u0103",
"ac;": "\u223e",
"acE;": "\u223e\u0333",
"acd;": "\u223f",
"acirc": "\xe2",
"acirc;": "\xe2",
"acute": "\xb4",
"acute;": "\xb4",
"acy;": "\u0430",
"aelig": "\xe6",
"aelig;": "\xe6",
"af;": "\u2061",
"afr;": "\U0001d51e",
"agrave": "\xe0",
"agrave;": "\xe0",
"alefsym;": "\u2135",
"aleph;": "\u2135",
"alpha;": "\u03b1",
"amacr;": "\u0101",
"amalg;": "\u2a3f",
"amp": "&",
"amp;": "&",
"and;": "\u2227",
"andand;": "\u2a55",
"andd;": "\u2a5c",
"andslope;": "\u2a58",
"andv;": "\u2a5a",
"ang;": "\u2220",
"ange;": "\u29a4",
"angle;": "\u2220",
"angmsd;": "\u2221",
"angmsdaa;": "\u29a8",
"angmsdab;": "\u29a9",
"angmsdac;": "\u29aa",
"angmsdad;": "\u29ab",
"angmsdae;": "\u29ac",
"angmsdaf;": "\u29ad",
"angmsdag;": "\u29ae",
"angmsdah;": "\u29af",
"angrt;": "\u221f",
"angrtvb;": "\u22be",
"angrtvbd;": "\u299d",
"angsph;": "\u2222",
"angst;": "\xc5",
"angzarr;": "\u237c",
"aogon;": "\u0105",
"aopf;": "\U0001d552",
"ap;": "\u2248",
"apE;": "\u2a70",
"apacir;": "\u2a6f",
"ape;": "\u224a",
"apid;": "\u224b",
"apos;": "'",
"approx;": "\u2248",
"approxeq;": "\u224a",
"aring": "\xe5",
"aring;": "\xe5",
"ascr;": "\U0001d4b6",
"ast;": "*",
"asymp;": "\u2248",
"asympeq;": "\u224d",
"atilde": "\xe3",
"atilde;": "\xe3",
"auml": "\xe4",
"auml;": "\xe4",
"awconint;": "\u2233",
"awint;": "\u2a11",
"bNot;": "\u2aed",
"backcong;": "\u224c",
"backepsilon;": "\u03f6",
"backprime;": "\u2035",
"backsim;": "\u223d",
"backsimeq;": "\u22cd",
"barvee;": "\u22bd",
"barwed;": "\u2305",
"barwedge;": "\u2305",
"bbrk;": "\u23b5",
"bbrktbrk;": "\u23b6",
"bcong;": "\u224c",
"bcy;": "\u0431",
"bdquo;": "\u201e",
"becaus;": "\u2235",
"because;": "\u2235",
"bemptyv;": "\u29b0",
"bepsi;": "\u03f6",
"bernou;": "\u212c",
"beta;": "\u03b2",
"beth;": "\u2136",
"between;": "\u226c",
"bfr;": "\U0001d51f",
"bigcap;": "\u22c2",
"bigcirc;": "\u25ef",
"bigcup;": "\u22c3",
"bigodot;": "\u2a00",
"bigoplus;": "\u2a01",
"bigotimes;": "\u2a02",
"bigsqcup;": "\u2a06",
"bigstar;": "\u2605",
"bigtriangledown;": "\u25bd",
"bigtriangleup;": "\u25b3",
"biguplus;": "\u2a04",
"bigvee;": "\u22c1",
"bigwedge;": "\u22c0",
"bkarow;": "\u290d",
"blacklozenge;": "\u29eb",
"blacksquare;": "\u25aa",
"blacktriangle;": "\u25b4",
"blacktriangledown;": "\u25be",
"blacktriangleleft;": "\u25c2",
"blacktriangleright;": "\u25b8",
"blank;": "\u2423",
"blk12;": "\u2592",
"blk14;": "\u2591",
"blk34;": "\u2593",
"block;": "\u2588",
"bne;": "=\u20e5",
"bnequiv;": "\u2261\u20e5",
"bnot;": "\u2310",
"bopf;": "\U0001d553",
"bot;": "\u22a5",
"bottom;": "\u22a5",
"bowtie;": "\u22c8",
"boxDL;": "\u2557",
"boxDR;": "\u2554",
"boxDl;": "\u2556",
"boxDr;": "\u2553",
"boxH;": "\u2550",
"boxHD;": "\u2566",
"boxHU;": "\u2569",
"boxHd;": "\u2564",
"boxHu;": "\u2567",
"boxUL;": "\u255d",
"boxUR;": "\u255a",
"boxUl;": "\u255c",
"boxUr;": "\u2559",
"boxV;": "\u2551",
"boxVH;": "\u256c",
"boxVL;": "\u2563",
"boxVR;": "\u2560",
"boxVh;": "\u256b",
"boxVl;": "\u2562",
"boxVr;": "\u255f",
"boxbox;": "\u29c9",
"boxdL;": "\u2555",
"boxdR;": "\u2552",
"boxdl;": "\u2510",
"boxdr;": "\u250c",
"boxh;": "\u2500",
"boxhD;": "\u2565",
"boxhU;": "\u2568",
"boxhd;": "\u252c",
"boxhu;": "\u2534",
"boxminus;": "\u229f",
"boxplus;": "\u229e",
"boxtimes;": "\u22a0",
"boxuL;": "\u255b",
"boxuR;": "\u2558",
"boxul;": "\u2518",
"boxur;": "\u2514",
"boxv;": "\u2502",
"boxvH;": "\u256a",
"boxvL;": "\u2561",
"boxvR;": "\u255e",
"boxvh;": "\u253c",
"boxvl;": "\u2524",
"boxvr;": "\u251c",
"bprime;": "\u2035",
"breve;": "\u02d8",
"brvbar": "\xa6",
"brvbar;": "\xa6",
"bscr;": "\U0001d4b7",
"bsemi;": "\u204f",
"bsim;": "\u223d",
"bsime;": "\u22cd",
"bsol;": "\\",
"bsolb;": "\u29c5",
"bsolhsub;": "\u27c8",
"bull;": "\u2022",
"bullet;": "\u2022",
"bump;": "\u224e",
"bumpE;": "\u2aae",
"bumpe;": "\u224f",
"bumpeq;": "\u224f",
"cacute;": "\u0107",
"cap;": "\u2229",
"capand;": "\u2a44",
"capbrcup;": "\u2a49",
"capcap;": "\u2a4b",
"capcup;": "\u2a47",
"capdot;": "\u2a40",
"caps;": "\u2229\ufe00",
"caret;": "\u2041",
"caron;": "\u02c7",
"ccaps;": "\u2a4d",
"ccaron;": "\u010d",
"ccedil": "\xe7",
"ccedil;": "\xe7",
"ccirc;": "\u0109",
"ccups;": "\u2a4c",
"ccupssm;": "\u2a50",
"cdot;": "\u010b",
"cedil": "\xb8",
"cedil;": "\xb8",
"cemptyv;": "\u29b2",
"cent": "\xa2",
"cent;": "\xa2",
"centerdot;": "\xb7",
"cfr;": "\U0001d520",
"chcy;": "\u0447",
"check;": "\u2713",
"checkmark;": "\u2713",
"chi;": "\u03c7",
"cir;": "\u25cb",
"cirE;": "\u29c3",
"circ;": "\u02c6",
"circeq;": "\u2257",
"circlearrowleft;": "\u21ba",
"circlearrowright;": "\u21bb",
"circledR;": "\xae",
"circledS;": "\u24c8",
"circledast;": "\u229b",
"circledcirc;": "\u229a",
"circleddash;": "\u229d",
"cire;": "\u2257",
"cirfnint;": "\u2a10",
"cirmid;": "\u2aef",
"cirscir;": "\u29c2",
"clubs;": "\u2663",
"clubsuit;": "\u2663",
"colon;": ":",
"colone;": "\u2254",
"coloneq;": "\u2254",
"comma;": ",",
"commat;": "@",
"comp;": "\u2201",
"compfn;": "\u2218",
"complement;": "\u2201",
"complexes;": "\u2102",
"cong;": "\u2245",
"congdot;": "\u2a6d",
"conint;": "\u222e",
"copf;": "\U0001d554",
"coprod;": "\u2210",
"copy": "\xa9",
"copy;": "\xa9",
"copysr;": "\u2117",
"crarr;": "\u21b5",
"cross;": "\u2717",
"cscr;": "\U0001d4b8",
"csub;": "\u2acf",
"csube;": "\u2ad1",
"csup;": "\u2ad0",
"csupe;": "\u2ad2",
"ctdot;": "\u22ef",
"cudarrl;": "\u2938",
"cudarrr;": "\u2935",
"cuepr;": "\u22de",
"cuesc;": "\u22df",
"cularr;": "\u21b6",
"cularrp;": "\u293d",
"cup;": "\u222a",
"cupbrcap;": "\u2a48",
"cupcap;": "\u2a46",
"cupcup;": "\u2a4a",
"cupdot;": "\u228d",
"cupor;": "\u2a45",
"cups;": "\u222a\ufe00",
"curarr;": "\u21b7",
"curarrm;": "\u293c",
"curlyeqprec;": "\u22de",
"curlyeqsucc;": "\u22df",
"curlyvee;": "\u22ce",
"curlywedge;": "\u22cf",
"curren": "\xa4",
"curren;": "\xa4",
"curvearrowleft;": "\u21b6",
"curvearrowright;": "\u21b7",
"cuvee;": "\u22ce",
"cuwed;": "\u22cf",
"cwconint;": "\u2232",
"cwint;": "\u2231",
"cylcty;": "\u232d",
"dArr;": "\u21d3",
"dHar;": "\u2965",
"dagger;": "\u2020",
"daleth;": "\u2138",
"darr;": "\u2193",
"dash;": "\u2010",
"dashv;": "\u22a3",
"dbkarow;": "\u290f",
"dblac;": "\u02dd",
"dcaron;": "\u010f",
"dcy;": "\u0434",
"dd;": "\u2146",
"ddagger;": "\u2021",
"ddarr;": "\u21ca",
"ddotseq;": "\u2a77",
"deg": "\xb0",
"deg;": "\xb0",
"delta;": "\u03b4",
"demptyv;": "\u29b1",
"dfisht;": "\u297f",
"dfr;": "\U0001d521",
"dharl;": "\u21c3",
"dharr;": "\u21c2",
"diam;": "\u22c4",
"diamond;": "\u22c4",
"diamondsuit;": "\u2666",
"diams;": "\u2666",
"die;": "\xa8",
"digamma;": "\u03dd",
"disin;": "\u22f2",
"div;": "\xf7",
"divide": "\xf7",
"divide;": "\xf7",
"divideontimes;": "\u22c7",
"divonx;": "\u22c7",
"djcy;": "\u0452",
"dlcorn;": "\u231e",
"dlcrop;": "\u230d",
"dollar;": "$",
"dopf;": "\U0001d555",
"dot;": "\u02d9",
"doteq;": "\u2250",
"doteqdot;": "\u2251",
"dotminus;": "\u2238",
"dotplus;": "\u2214",
"dotsquare;": "\u22a1",
"doublebarwedge;": "\u2306",
"downarrow;": "\u2193",
"downdownarrows;": "\u21ca",
"downharpoonleft;": "\u21c3",
"downharpoonright;": "\u21c2",
"drbkarow;": "\u2910",
"drcorn;": "\u231f",
"drcrop;": "\u230c",
"dscr;": "\U0001d4b9",
"dscy;": "\u0455",
"dsol;": "\u29f6",
"dstrok;": "\u0111",
"dtdot;": "\u22f1",
"dtri;": "\u25bf",
"dtrif;": "\u25be",
"duarr;": "\u21f5",
"duhar;": "\u296f",
"dwangle;": "\u29a6",
"dzcy;": "\u045f",
"dzigrarr;": "\u27ff",
"eDDot;": "\u2a77",
"eDot;": "\u2251",
"eacute": "\xe9",
"eacute;": "\xe9",
"easter;": "\u2a6e",
"ecaron;": "\u011b",
"ecir;": "\u2256",
"ecirc": "\xea",
"ecirc;": "\xea",
"ecolon;": "\u2255",
"ecy;": "\u044d",
"edot;": "\u0117",
"ee;": "\u2147",
"efDot;": "\u2252",
"efr;": "\U0001d522",
"eg;": "\u2a9a",
"egrave": "\xe8",
"egrave;": "\xe8",
"egs;": "\u2a96",
"egsdot;": "\u2a98",
"el;": "\u2a99",
"elinters;": "\u23e7",
"ell;": "\u2113",
"els;": "\u2a95",
"elsdot;": "\u2a97",
"emacr;": "\u0113",
"empty;": "\u2205",
"emptyset;": "\u2205",
"emptyv;": "\u2205",
"emsp13;": "\u2004",
"emsp14;": "\u2005",
"emsp;": "\u2003",
"eng;": "\u014b",
"ensp;": "\u2002",
"eogon;": "\u0119",
"eopf;": "\U0001d556",
"epar;": "\u22d5",
"eparsl;": "\u29e3",
"eplus;": "\u2a71",
"epsi;": "\u03b5",
"epsilon;": "\u03b5",
"epsiv;": "\u03f5",
"eqcirc;": "\u2256",
"eqcolon;": "\u2255",
"eqsim;": "\u2242",
"eqslantgtr;": "\u2a96",
"eqslantless;": "\u2a95",
"equals;": "=",
"equest;": "\u225f",
"equiv;": "\u2261",
"equivDD;": "\u2a78",
"eqvparsl;": "\u29e5",
"erDot;": "\u2253",
"erarr;": "\u2971",
"escr;": "\u212f",
"esdot;": "\u2250",
"esim;": "\u2242",
"eta;": "\u03b7",
"eth": "\xf0",
"eth;": "\xf0",
"euml": "\xeb",
"euml;": "\xeb",
"euro;": "\u20ac",
"excl;": "!",
"exist;": "\u2203",
"expectation;": "\u2130",
"exponentiale;": "\u2147",
"fallingdotseq;": "\u2252",
"fcy;": "\u0444",
"female;": "\u2640",
"ffilig;": "\ufb03",
"fflig;": "\ufb00",
"ffllig;": "\ufb04",
"ffr;": "\U0001d523",
"filig;": "\ufb01",
"fjlig;": "fj",
"flat;": "\u266d",
"fllig;": "\ufb02",
"fltns;": "\u25b1",
"fnof;": "\u0192",
"fopf;": "\U0001d557",
"forall;": "\u2200",
"fork;": "\u22d4",
"forkv;": "\u2ad9",
"fpartint;": "\u2a0d",
"frac12": "\xbd",
"frac12;": "\xbd",
"frac13;": "\u2153",
"frac14": "\xbc",
"frac14;": "\xbc",
"frac15;": "\u2155",
"frac16;": "\u2159",
"frac18;": "\u215b",
"frac23;": "\u2154",
"frac25;": "\u2156",
"frac34": "\xbe",
"frac34;": "\xbe",
"frac35;": "\u2157",
"frac38;": "\u215c",
"frac45;": "\u2158",
"frac56;": "\u215a",
"frac58;": "\u215d",
"frac78;": "\u215e",
"frasl;": "\u2044",
"frown;": "\u2322",
"fscr;": "\U0001d4bb",
"gE;": "\u2267",
"gEl;": "\u2a8c",
"gacute;": "\u01f5",
"gamma;": "\u03b3",
"gammad;": "\u03dd",
"gap;": "\u2a86",
"gbreve;": "\u011f",
"gcirc;": "\u011d",
"gcy;": "\u0433",
"gdot;": "\u0121",
"ge;": "\u2265",
"gel;": "\u22db",
"geq;": "\u2265",
"geqq;": "\u2267",
"geqslant;": "\u2a7e",
"ges;": "\u2a7e",
"gescc;": "\u2aa9",
"gesdot;": "\u2a80",
"gesdoto;": "\u2a82",
"gesdotol;": "\u2a84",
"gesl;": "\u22db\ufe00",
"gesles;": "\u2a94",
"gfr;": "\U0001d524",
"gg;": "\u226b",
"ggg;": "\u22d9",
"gimel;": "\u2137",
"gjcy;": "\u0453",
"gl;": "\u2277",
"glE;": "\u2a92",
"gla;": "\u2aa5",
"glj;": "\u2aa4",
"gnE;": "\u2269",
"gnap;": "\u2a8a",
"gnapprox;": "\u2a8a",
"gne;": "\u2a88",
"gneq;": "\u2a88",
"gneqq;": "\u2269",
"gnsim;": "\u22e7",
"gopf;": "\U0001d558",
"grave;": "`",
"gscr;": "\u210a",
"gsim;": "\u2273",
"gsime;": "\u2a8e",
"gsiml;": "\u2a90",
"gt": ">",
"gt;": ">",
"gtcc;": "\u2aa7",
"gtcir;": "\u2a7a",
"gtdot;": "\u22d7",
"gtlPar;": "\u2995",
"gtquest;": "\u2a7c",
"gtrapprox;": "\u2a86",
"gtrarr;": "\u2978",
"gtrdot;": "\u22d7",
"gtreqless;": "\u22db",
"gtreqqless;": "\u2a8c",
"gtrless;": "\u2277",
"gtrsim;": "\u2273",
"gvertneqq;": "\u2269\ufe00",
"gvnE;": "\u2269\ufe00",
"hArr;": "\u21d4",
"hairsp;": "\u200a",
"half;": "\xbd",
"hamilt;": "\u210b",
"hardcy;": "\u044a",
"harr;": "\u2194",
"harrcir;": "\u2948",
"harrw;": "\u21ad",
"hbar;": "\u210f",
"hcirc;": "\u0125",
"hearts;": "\u2665",
"heartsuit;": "\u2665",
"hellip;": "\u2026",
"hercon;": "\u22b9",
"hfr;": "\U0001d525",
"hksearow;": "\u2925",
"hkswarow;": "\u2926",
"hoarr;": "\u21ff",
"homtht;": "\u223b",
"hookleftarrow;": "\u21a9",
"hookrightarrow;": "\u21aa",
"hopf;": "\U0001d559",
"horbar;": "\u2015",
"hscr;": "\U0001d4bd",
"hslash;": "\u210f",
"hstrok;": "\u0127",
"hybull;": "\u2043",
"hyphen;": "\u2010",
"iacute": "\xed",
"iacute;": "\xed",
"ic;": "\u2063",
"icirc": "\xee",
"icirc;": "\xee",
"icy;": "\u0438",
"iecy;": "\u0435",
"iexcl": "\xa1",
"iexcl;": "\xa1",
"iff;": "\u21d4",
"ifr;": "\U0001d526",
"igrave": "\xec",
"igrave;": "\xec",
"ii;": "\u2148",
"iiiint;": "\u2a0c",
"iiint;": "\u222d",
"iinfin;": "\u29dc",
"iiota;": "\u2129",
"ijlig;": "\u0133",
"imacr;": "\u012b",
"image;": "\u2111",
"imagline;": "\u2110",
"imagpart;": "\u2111",
"imath;": "\u0131",
"imof;": "\u22b7",
"imped;": "\u01b5",
"in;": "\u2208",
"incare;": "\u2105",
"infin;": "\u221e",
"infintie;": "\u29dd",
"inodot;": "\u0131",
"int;": "\u222b",
"intcal;": "\u22ba",
"integers;": "\u2124",
"intercal;": "\u22ba",
"intlarhk;": "\u2a17",
"intprod;": "\u2a3c",
"iocy;": "\u0451",
"iogon;": "\u012f",
"iopf;": "\U0001d55a",
"iota;": "\u03b9",
"iprod;": "\u2a3c",
"iquest": "\xbf",
"iquest;": "\xbf",
"iscr;": "\U0001d4be",
"isin;": "\u2208",
"isinE;": "\u22f9",
"isindot;": "\u22f5",
"isins;": "\u22f4",
"isinsv;": "\u22f3",
"isinv;": "\u2208",
"it;": "\u2062",
"itilde;": "\u0129",
"iukcy;": "\u0456",
"iuml": "\xef",
"iuml;": "\xef",
"jcirc;": "\u0135",
"jcy;": "\u0439",
"jfr;": "\U0001d527",
"jmath;": "\u0237",
"jopf;": "\U0001d55b",
"jscr;": "\U0001d4bf",
"jsercy;": "\u0458",
"jukcy;": "\u0454",
"kappa;": "\u03ba",
"kappav;": "\u03f0",
"kcedil;": "\u0137",
"kcy;": "\u043a",
"kfr;": "\U0001d528",
"kgreen;": "\u0138",
"khcy;": "\u0445",
"kjcy;": "\u045c",
"kopf;": "\U0001d55c",
"kscr;": "\U0001d4c0",
"lAarr;": "\u21da",
"lArr;": "\u21d0",
"lAtail;": "\u291b",
"lBarr;": "\u290e",
"lE;": "\u2266",
"lEg;": "\u2a8b",
"lHar;": "\u2962",
"lacute;": "\u013a",
"laemptyv;": "\u29b4",
"lagran;": "\u2112",
"lambda;": "\u03bb",
"lang;": "\u27e8",
"langd;": "\u2991",
"langle;": "\u27e8",
"lap;": "\u2a85",
"laquo": "\xab",
"laquo;": "\xab",
"larr;": "\u2190",
"larrb;": "\u21e4",
"larrbfs;": "\u291f",
"larrfs;": "\u291d",
"larrhk;": "\u21a9",
"larrlp;": "\u21ab",
"larrpl;": "\u2939",
"larrsim;": "\u2973",
"larrtl;": "\u21a2",
"lat;": "\u2aab",
"latail;": "\u2919",
"late;": "\u2aad",
"lates;": "\u2aad\ufe00",
"lbarr;": "\u290c",
"lbbrk;": "\u2772",
"lbrace;": "{",
"lbrack;": "[",
"lbrke;": "\u298b",
"lbrksld;": "\u298f",
"lbrkslu;": "\u298d",
"lcaron;": "\u013e",
"lcedil;": "\u013c",
"lceil;": "\u2308",
"lcub;": "{",
"lcy;": "\u043b",
"ldca;": "\u2936",
"ldquo;": "\u201c",
"ldquor;": "\u201e",
"ldrdhar;": "\u2967",
"ldrushar;": "\u294b",
"ldsh;": "\u21b2",
"le;": "\u2264",
"leftarrow;": "\u2190",
"leftarrowtail;": "\u21a2",
"leftharpoondown;": "\u21bd",
"leftharpoonup;": "\u21bc",
"leftleftarrows;": "\u21c7",
"leftrightarrow;": "\u2194",
"leftrightarrows;": "\u21c6",
"leftrightharpoons;": "\u21cb",
"leftrightsquigarrow;": "\u21ad",
"leftthreetimes;": "\u22cb",
"leg;": "\u22da",
"leq;": "\u2264",
"leqq;": "\u2266",
"leqslant;": "\u2a7d",
"les;": "\u2a7d",
"lescc;": "\u2aa8",
"lesdot;": "\u2a7f",
"lesdoto;": "\u2a81",
"lesdotor;": "\u2a83",
"lesg;": "\u22da\ufe00",
"lesges;": "\u2a93",
"lessapprox;": "\u2a85",
"lessdot;": "\u22d6",
"lesseqgtr;": "\u22da",
"lesseqqgtr;": "\u2a8b",
"lessgtr;": "\u2276",
"lesssim;": "\u2272",
"lfisht;": "\u297c",
"lfloor;": "\u230a",
"lfr;": "\U0001d529",
"lg;": "\u2276",
"lgE;": "\u2a91",
"lhard;": "\u21bd",
"lharu;": "\u21bc",
"lharul;": "\u296a",
"lhblk;": "\u2584",
"ljcy;": "\u0459",
"ll;": "\u226a",
"llarr;": "\u21c7",
"llcorner;": "\u231e",
"llhard;": "\u296b",
"lltri;": "\u25fa",
"lmidot;": "\u0140",
"lmoust;": "\u23b0",
"lmoustache;": "\u23b0",
"lnE;": "\u2268",
"lnap;": "\u2a89",
"lnapprox;": "\u2a89",
"lne;": "\u2a87",
"lneq;": "\u2a87",
"lneqq;": "\u2268",
"lnsim;": "\u22e6",
"loang;": "\u27ec",
"loarr;": "\u21fd",
"lobrk;": "\u27e6",
"longleftarrow;": "\u27f5",
"longleftrightarrow;": "\u27f7",
"longmapsto;": "\u27fc",
"longrightarrow;": "\u27f6",
"looparrowleft;": "\u21ab",
"looparrowright;": "\u21ac",
"lopar;": "\u2985",
"lopf;": "\U0001d55d",
"loplus;": "\u2a2d",
"lotimes;": "\u2a34",
"lowast;": "\u2217",
"lowbar;": "_",
"loz;": "\u25ca",
"lozenge;": "\u25ca",
"lozf;": "\u29eb",
"lpar;": "(",
"lparlt;": "\u2993",
"lrarr;": "\u21c6",
"lrcorner;": "\u231f",
"lrhar;": "\u21cb",
"lrhard;": "\u296d",
"lrm;": "\u200e",
"lrtri;": "\u22bf",
"lsaquo;": "\u2039",
"lscr;": "\U0001d4c1",
"lsh;": "\u21b0",
"lsim;": "\u2272",
"lsime;": "\u2a8d",
"lsimg;": "\u2a8f",
"lsqb;": "[",
"lsquo;": "\u2018",
"lsquor;": "\u201a",
"lstrok;": "\u0142",
"lt": "<",
"lt;": "<",
"ltcc;": "\u2aa6",
"ltcir;": "\u2a79",
"ltdot;": "\u22d6",
"lthree;": "\u22cb",
"ltimes;": "\u22c9",
"ltlarr;": "\u2976",
"ltquest;": "\u2a7b",
"ltrPar;": "\u2996",
"ltri;": "\u25c3",
"ltrie;": "\u22b4",
"ltrif;": "\u25c2",
"lurdshar;": "\u294a",
"luruhar;": "\u2966",
"lvertneqq;": "\u2268\ufe00",
"lvnE;": "\u2268\ufe00",
"mDDot;": "\u223a",
"macr": "\xaf",
"macr;": "\xaf",
"male;": "\u2642",
"malt;": "\u2720",
"maltese;": "\u2720",
"map;": "\u21a6",
"mapsto;": "\u21a6",
"mapstodown;": "\u21a7",
"mapstoleft;": "\u21a4",
"mapstoup;": "\u21a5",
"marker;": "\u25ae",
"mcomma;": "\u2a29",
"mcy;": "\u043c",
"mdash;": "\u2014",
"measuredangle;": "\u2221",
"mfr;": "\U0001d52a",
"mho;": "\u2127",
"micro": "\xb5",
"micro;": "\xb5",
"mid;": "\u2223",
"midast;": "*",
"midcir;": "\u2af0",
"middot": "\xb7",
"middot;": "\xb7",
"minus;": "\u2212",
"minusb;": "\u229f",
"minusd;": "\u2238",
"minusdu;": "\u2a2a",
"mlcp;": "\u2adb",
"mldr;": "\u2026",
"mnplus;": "\u2213",
"models;": "\u22a7",
"mopf;": "\U0001d55e",
"mp;": "\u2213",
"mscr;": "\U0001d4c2",
"mstpos;": "\u223e",
"mu;": "\u03bc",
"multimap;": "\u22b8",
"mumap;": "\u22b8",
"nGg;": "\u22d9\u0338",
"nGt;": "\u226b\u20d2",
"nGtv;": "\u226b\u0338",
"nLeftarrow;": "\u21cd",
"nLeftrightarrow;": "\u21ce",
"nLl;": "\u22d8\u0338",
"nLt;": "\u226a\u20d2",
"nLtv;": "\u226a\u0338",
"nRightarrow;": "\u21cf",
"nVDash;": "\u22af",
"nVdash;": "\u22ae",
"nabla;": "\u2207",
"nacute;": "\u0144",
"nang;": "\u2220\u20d2",
"nap;": "\u2249",
"napE;": "\u2a70\u0338",
"napid;": "\u224b\u0338",
"napos;": "\u0149",
"napprox;": "\u2249",
"natur;": "\u266e",
"natural;": "\u266e",
"naturals;": "\u2115",
"nbsp": "\xa0",
"nbsp;": "\xa0",
"nbump;": "\u224e\u0338",
"nbumpe;": "\u224f\u0338",
"ncap;": "\u2a43",
"ncaron;": "\u0148",
"ncedil;": "\u0146",
"ncong;": "\u2247",
"ncongdot;": "\u2a6d\u0338",
"ncup;": "\u2a42",
"ncy;": "\u043d",
"ndash;": "\u2013",
"ne;": "\u2260",
"neArr;": "\u21d7",
"nearhk;": "\u2924",
"nearr;": "\u2197",
"nearrow;": "\u2197",
"nedot;": "\u2250\u0338",
"nequiv;": "\u2262",
"nesear;": "\u2928",
"nesim;": "\u2242\u0338",
"nexist;": "\u2204",
"nexists;": "\u2204",
"nfr;": "\U0001d52b",
"ngE;": "\u2267\u0338",
"nge;": "\u2271",
"ngeq;": "\u2271",
"ngeqq;": "\u2267\u0338",
"ngeqslant;": "\u2a7e\u0338",
"nges;": "\u2a7e\u0338",
"ngsim;": "\u2275",
"ngt;": "\u226f",
"ngtr;": "\u226f",
"nhArr;": "\u21ce",
"nharr;": "\u21ae",
"nhpar;": "\u2af2",
"ni;": "\u220b",
"nis;": "\u22fc",
"nisd;": "\u22fa",
"niv;": "\u220b",
"njcy;": "\u045a",
"nlArr;": "\u21cd",
"nlE;": "\u2266\u0338",
"nlarr;": "\u219a",
"nldr;": "\u2025",
"nle;": "\u2270",
"nleftarrow;": "\u219a",
"nleftrightarrow;": "\u21ae",
"nleq;": "\u2270",
"nleqq;": "\u2266\u0338",
"nleqslant;": "\u2a7d\u0338",
"nles;": "\u2a7d\u0338",
"nless;": "\u226e",
"nlsim;": "\u2274",
"nlt;": "\u226e",
"nltri;": "\u22ea",
"nltrie;": "\u22ec",
"nmid;": "\u2224",
"nopf;": "\U0001d55f",
"not": "\xac",
"not;": "\xac",
"notin;": "\u2209",
"notinE;": "\u22f9\u0338",
"notindot;": "\u22f5\u0338",
"notinva;": "\u2209",
"notinvb;": "\u22f7",
"notinvc;": "\u22f6",
"notni;": "\u220c",
"notniva;": "\u220c",
"notnivb;": "\u22fe",
"notnivc;": "\u22fd",
"npar;": "\u2226",
"nparallel;": "\u2226",
"nparsl;": "\u2afd\u20e5",
"npart;": "\u2202\u0338",
"npolint;": "\u2a14",
"npr;": "\u2280",
"nprcue;": "\u22e0",
"npre;": "\u2aaf\u0338",
"nprec;": "\u2280",
"npreceq;": "\u2aaf\u0338",
"nrArr;": "\u21cf",
"nrarr;": "\u219b",
"nrarrc;": "\u2933\u0338",
"nrarrw;": "\u219d\u0338",
"nrightarrow;": "\u219b",
"nrtri;": "\u22eb",
"nrtrie;": "\u22ed",
"nsc;": "\u2281",
"nsccue;": "\u22e1",
"nsce;": "\u2ab0\u0338",
"nscr;": "\U0001d4c3",
"nshortmid;": "\u2224",
"nshortparallel;": "\u2226",
"nsim;": "\u2241",
"nsime;": "\u2244",
"nsimeq;": "\u2244",
"nsmid;": "\u2224",
"nspar;": "\u2226",
"nsqsube;": "\u22e2",
"nsqsupe;": "\u22e3",
"nsub;": "\u2284",
"nsubE;": "\u2ac5\u0338",
"nsube;": "\u2288",
"nsubset;": "\u2282\u20d2",
"nsubseteq;": "\u2288",
"nsubseteqq;": "\u2ac5\u0338",
"nsucc;": "\u2281",
"nsucceq;": "\u2ab0\u0338",
"nsup;": "\u2285",
"nsupE;": "\u2ac6\u0338",
"nsupe;": "\u2289",
"nsupset;": "\u2283\u20d2",
"nsupseteq;": "\u2289",
"nsupseteqq;": "\u2ac6\u0338",
"ntgl;": "\u2279",
"ntilde": "\xf1",
"ntilde;": "\xf1",
"ntlg;": "\u2278",
"ntriangleleft;": "\u22ea",
"ntrianglelefteq;": "\u22ec",
"ntriangleright;": "\u22eb",
"ntrianglerighteq;": "\u22ed",
"nu;": "\u03bd",
"num;": "#",
"numero;": "\u2116",
"numsp;": "\u2007",
"nvDash;": "\u22ad",
"nvHarr;": "\u2904",
"nvap;": "\u224d\u20d2",
"nvdash;": "\u22ac",
"nvge;": "\u2265\u20d2",
"nvgt;": ">\u20d2",
"nvinfin;": "\u29de",
"nvlArr;": "\u2902",
"nvle;": "\u2264\u20d2",
"nvlt;": "<\u20d2",
"nvltrie;": "\u22b4\u20d2",
"nvrArr;": "\u2903",
"nvrtrie;": "\u22b5\u20d2",
"nvsim;": "\u223c\u20d2",
"nwArr;": "\u21d6",
"nwarhk;": "\u2923",
"nwarr;": "\u2196",
"nwarrow;": "\u2196",
"nwnear;": "\u2927",
"oS;": "\u24c8",
"oacute": "\xf3",
"oacute;": "\xf3",
"oast;": "\u229b",
"ocir;": "\u229a",
"ocirc": "\xf4",
"ocirc;": "\xf4",
"ocy;": "\u043e",
"odash;": "\u229d",
"odblac;": "\u0151",
"odiv;": "\u2a38",
"odot;": "\u2299",
"odsold;": "\u29bc",
"oelig;": "\u0153",
"ofcir;": "\u29bf",
"ofr;": "\U0001d52c",
"ogon;": "\u02db",
"ograve": "\xf2",
"ograve;": "\xf2",
"ogt;": "\u29c1",
"ohbar;": "\u29b5",
"ohm;": "\u03a9",
"oint;": "\u222e",
"olarr;": "\u21ba",
"olcir;": "\u29be",
"olcross;": "\u29bb",
"oline;": "\u203e",
"olt;": "\u29c0",
"omacr;": "\u014d",
"omega;": "\u03c9",
"omicron;": "\u03bf",
"omid;": "\u29b6",
"ominus;": "\u2296",
"oopf;": "\U0001d560",
"opar;": "\u29b7",
"operp;": "\u29b9",
"oplus;": "\u2295",
"or;": "\u2228",
"orarr;": "\u21bb",
"ord;": "\u2a5d",
"order;": "\u2134",
"orderof;": "\u2134",
"ordf": "\xaa",
"ordf;": "\xaa",
"ordm": "\xba",
"ordm;": "\xba",
"origof;": "\u22b6",
"oror;": "\u2a56",
"orslope;": "\u2a57",
"orv;": "\u2a5b",
"oscr;": "\u2134",
"oslash": "\xf8",
"oslash;": "\xf8",
"osol;": "\u2298",
"otilde": "\xf5",
"otilde;": "\xf5",
"otimes;": "\u2297",
"otimesas;": "\u2a36",
"ouml": "\xf6",
"ouml;": "\xf6",
"ovbar;": "\u233d",
"par;": "\u2225",
"para": "\xb6",
"para;": "\xb6",
"parallel;": "\u2225",
"parsim;": "\u2af3",
"parsl;": "\u2afd",
"part;": "\u2202",
"pcy;": "\u043f",
"percnt;": "%",
"period;": ".",
"permil;": "\u2030",
"perp;": "\u22a5",
"pertenk;": "\u2031",
"pfr;": "\U0001d52d",
"phi;": "\u03c6",
"phiv;": "\u03d5",
"phmmat;": "\u2133",
"phone;": "\u260e",
"pi;": "\u03c0",
"pitchfork;": "\u22d4",
"piv;": "\u03d6",
"planck;": "\u210f",
"planckh;": "\u210e",
"plankv;": "\u210f",
"plus;": "+",
"plusacir;": "\u2a23",
"plusb;": "\u229e",
"pluscir;": "\u2a22",
"plusdo;": "\u2214",
"plusdu;": "\u2a25",
"pluse;": "\u2a72",
"plusmn": "\xb1",
"plusmn;": "\xb1",
"plussim;": "\u2a26",
"plustwo;": "\u2a27",
"pm;": "\xb1",
"pointint;": "\u2a15",
"popf;": "\U0001d561",
"pound": "\xa3",
"pound;": "\xa3",
"pr;": "\u227a",
"prE;": "\u2ab3",
"prap;": "\u2ab7",
"prcue;": "\u227c",
"pre;": "\u2aaf",
"prec;": "\u227a",
"precapprox;": "\u2ab7",
"preccurlyeq;": "\u227c",
"preceq;": "\u2aaf",
"precnapprox;": "\u2ab9",
"precneqq;": "\u2ab5",
"precnsim;": "\u22e8",
"precsim;": "\u227e",
"prime;": "\u2032",
"primes;": "\u2119",
"prnE;": "\u2ab5",
"prnap;": "\u2ab9",
"prnsim;": "\u22e8",
"prod;": "\u220f",
"profalar;": "\u232e",
"profline;": "\u2312",
"profsurf;": "\u2313",
"prop;": "\u221d",
"propto;": "\u221d",
"prsim;": "\u227e",
"prurel;": "\u22b0",
"pscr;": "\U0001d4c5",
"psi;": "\u03c8",
"puncsp;": "\u2008",
"qfr;": "\U0001d52e",
"qint;": "\u2a0c",
"qopf;": "\U0001d562",
"qprime;": "\u2057",
"qscr;": "\U0001d4c6",
"quaternions;": "\u210d",
"quatint;": "\u2a16",
"quest;": "?",
"questeq;": "\u225f",
"quot": "\"",
"quot;": "\"",
"rAarr;": "\u21db",
"rArr;": "\u21d2",
"rAtail;": "\u291c",
"rBarr;": "\u290f",
"rHar;": "\u2964",
"race;": "\u223d\u0331",
"racute;": "\u0155",
"radic;": "\u221a",
"raemptyv;": "\u29b3",
"rang;": "\u27e9",
"rangd;": "\u2992",
"range;": "\u29a5",
"rangle;": "\u27e9",
"raquo": "\xbb",
"raquo;": "\xbb",
"rarr;": "\u2192",
"rarrap;": "\u2975",
"rarrb;": "\u21e5",
"rarrbfs;": "\u2920",
"rarrc;": "\u2933",
"rarrfs;": "\u291e",
"rarrhk;": "\u21aa",
"rarrlp;": "\u21ac",
"rarrpl;": "\u2945",
"rarrsim;": "\u2974",
"rarrtl;": "\u21a3",
"rarrw;": "\u219d",
"ratail;": "\u291a",
"ratio;": "\u2236",
"rationals;": "\u211a",
"rbarr;": "\u290d",
"rbbrk;": "\u2773",
"rbrace;": "}",
"rbrack;": "]",
"rbrke;": "\u298c",
"rbrksld;": "\u298e",
"rbrkslu;": "\u2990",
"rcaron;": "\u0159",
"rcedil;": "\u0157",
"rceil;": "\u2309",
"rcub;": "}",
"rcy;": "\u0440",
"rdca;": "\u2937",
"rdldhar;": "\u2969",
"rdquo;": "\u201d",
"rdquor;": "\u201d",
"rdsh;": "\u21b3",
"real;": "\u211c",
"realine;": "\u211b",
"realpart;": "\u211c",
"reals;": "\u211d",
"rect;": "\u25ad",
"reg": "\xae",
"reg;": "\xae",
"rfisht;": "\u297d",
"rfloor;": "\u230b",
"rfr;": "\U0001d52f",
"rhard;": "\u21c1",
"rharu;": "\u21c0",
"rharul;": "\u296c",
"rho;": "\u03c1",
"rhov;": "\u03f1",
"rightarrow;": "\u2192",
"rightarrowtail;": "\u21a3",
"rightharpoondown;": "\u21c1",
"rightharpoonup;": "\u21c0",
"rightleftarrows;": "\u21c4",
"rightleftharpoons;": "\u21cc",
"rightrightarrows;": "\u21c9",
"rightsquigarrow;": "\u219d",
"rightthreetimes;": "\u22cc",
"ring;": "\u02da",
"risingdotseq;": "\u2253",
"rlarr;": "\u21c4",
"rlhar;": "\u21cc",
"rlm;": "\u200f",
"rmoust;": "\u23b1",
"rmoustache;": "\u23b1",
"rnmid;": "\u2aee",
"roang;": "\u27ed",
"roarr;": "\u21fe",
"robrk;": "\u27e7",
"ropar;": "\u2986",
"ropf;": "\U0001d563",
"roplus;": "\u2a2e",
"rotimes;": "\u2a35",
"rpar;": ")",
"rpargt;": "\u2994",
"rppolint;": "\u2a12",
"rrarr;": "\u21c9",
"rsaquo;": "\u203a",
"rscr;": "\U0001d4c7",
"rsh;": "\u21b1",
"rsqb;": "]",
"rsquo;": "\u2019",
"rsquor;": "\u2019",
"rthree;": "\u22cc",
"rtimes;": "\u22ca",
"rtri;": "\u25b9",
"rtrie;": "\u22b5",
"rtrif;": "\u25b8",
"rtriltri;": "\u29ce",
"ruluhar;": "\u2968",
"rx;": "\u211e",
"sacute;": "\u015b",
"sbquo;": "\u201a",
"sc;": "\u227b",
"scE;": "\u2ab4",
"scap;": "\u2ab8",
"scaron;": "\u0161",
"sccue;": "\u227d",
"sce;": "\u2ab0",
"scedil;": "\u015f",
"scirc;": "\u015d",
"scnE;": "\u2ab6",
"scnap;": "\u2aba",
"scnsim;": "\u22e9",
"scpolint;": "\u2a13",
"scsim;": "\u227f",
"scy;": "\u0441",
"sdot;": "\u22c5",
"sdotb;": "\u22a1",
"sdote;": "\u2a66",
"seArr;": "\u21d8",
"searhk;": "\u2925",
"searr;": "\u2198",
"searrow;": "\u2198",
"sect": "\xa7",
"sect;": "\xa7",
"semi;": ";",
"seswar;": "\u2929",
"setminus;": "\u2216",
"setmn;": "\u2216",
"sext;": "\u2736",
"sfr;": "\U0001d530",
"sfrown;": "\u2322",
"sharp;": "\u266f",
"shchcy;": "\u0449",
"shcy;": "\u0448",
"shortmid;": "\u2223",
"shortparallel;": "\u2225",
"shy": "\xad",
"shy;": "\xad",
"sigma;": "\u03c3",
"sigmaf;": "\u03c2",
"sigmav;": "\u03c2",
"sim;": "\u223c",
"simdot;": "\u2a6a",
"sime;": "\u2243",
"simeq;": "\u2243",
"simg;": "\u2a9e",
"simgE;": "\u2aa0",
"siml;": "\u2a9d",
"simlE;": "\u2a9f",
"simne;": "\u2246",
"simplus;": "\u2a24",
"simrarr;": "\u2972",
"slarr;": "\u2190",
"smallsetminus;": "\u2216",
"smashp;": "\u2a33",
"smeparsl;": "\u29e4",
"smid;": "\u2223",
"smile;": "\u2323",
"smt;": "\u2aaa",
"smte;": "\u2aac",
"smtes;": "\u2aac\ufe00",
"softcy;": "\u044c",
"sol;": "/",
"solb;": "\u29c4",
"solbar;": "\u233f",
"sopf;": "\U0001d564",
"spades;": "\u2660",
"spadesuit;": "\u2660",
"spar;": "\u2225",
"sqcap;": "\u2293",
"sqcaps;": "\u2293\ufe00",
"sqcup;": "\u2294",
"sqcups;": "\u2294\ufe00",
"sqsub;": "\u228f",
"sqsube;": "\u2291",
"sqsubset;": "\u228f",
"sqsubseteq;": "\u2291",
"sqsup;": "\u2290",
"sqsupe;": "\u2292",
"sqsupset;": "\u2290",
"sqsupseteq;": "\u2292",
"squ;": "\u25a1",
"square;": "\u25a1",
"squarf;": "\u25aa",
"squf;": "\u25aa",
"srarr;": "\u2192",
"sscr;": "\U0001d4c8",
"ssetmn;": "\u2216",
"ssmile;": "\u2323",
"sstarf;": "\u22c6",
"star;": "\u2606",
"starf;": "\u2605",
"straightepsilon;": "\u03f5",
"straightphi;": "\u03d5",
"strns;": "\xaf",
"sub;": "\u2282",
"subE;": "\u2ac5",
"subdot;": "\u2abd",
"sube;": "\u2286",
"subedot;": "\u2ac3",
"submult;": "\u2ac1",
"subnE;": "\u2acb",
"subne;": "\u228a",
"subplus;": "\u2abf",
"subrarr;": "\u2979",
"subset;": "\u2282",
"subseteq;": "\u2286",
"subseteqq;": "\u2ac5",
"subsetneq;": "\u228a",
"subsetneqq;": "\u2acb",
"subsim;": "\u2ac7",
"subsub;": "\u2ad5",
"subsup;": "\u2ad3",
"succ;": "\u227b",
"succapprox;": "\u2ab8",
"succcurlyeq;": "\u227d",
"succeq;": "\u2ab0",
"succnapprox;": "\u2aba",
"succneqq;": "\u2ab6",
"succnsim;": "\u22e9",
"succsim;": "\u227f",
"sum;": "\u2211",
"sung;": "\u266a",
"sup1": "\xb9",
"sup1;": "\xb9",
"sup2": "\xb2",
"sup2;": "\xb2",
"sup3": "\xb3",
"sup3;": "\xb3",
"sup;": "\u2283",
"supE;": "\u2ac6",
"supdot;": "\u2abe",
"supdsub;": "\u2ad8",
"supe;": "\u2287",
"supedot;": "\u2ac4",
"suphsol;": "\u27c9",
"suphsub;": "\u2ad7",
"suplarr;": "\u297b",
"supmult;": "\u2ac2",
"supnE;": "\u2acc",
"supne;": "\u228b",
"supplus;": "\u2ac0",
"supset;": "\u2283",
"supseteq;": "\u2287",
"supseteqq;": "\u2ac6",
"supsetneq;": "\u228b",
"supsetneqq;": "\u2acc",
"supsim;": "\u2ac8",
"supsub;": "\u2ad4",
"supsup;": "\u2ad6",
"swArr;": "\u21d9",
"swarhk;": "\u2926",
"swarr;": "\u2199",
"swarrow;": "\u2199",
"swnwar;": "\u292a",
"szlig": "\xdf",
"szlig;": "\xdf",
"target;": "\u2316",
"tau;": "\u03c4",
"tbrk;": "\u23b4",
"tcaron;": "\u0165",
"tcedil;": "\u0163",
"tcy;": "\u0442",
"tdot;": "\u20db",
"telrec;": "\u2315",
"tfr;": "\U0001d531",
"there4;": "\u2234",
"therefore;": "\u2234",
"theta;": "\u03b8",
"thetasym;": "\u03d1",
"thetav;": "\u03d1",
"thickapprox;": "\u2248",
"thicksim;": "\u223c",
"thinsp;": "\u2009",
"thkap;": "\u2248",
"thksim;": "\u223c",
"thorn": "\xfe",
"thorn;": "\xfe",
"tilde;": "\u02dc",
"times": "\xd7",
"times;": "\xd7",
"timesb;": "\u22a0",
"timesbar;": "\u2a31",
"timesd;": "\u2a30",
"tint;": "\u222d",
"toea;": "\u2928",
"top;": "\u22a4",
"topbot;": "\u2336",
"topcir;": "\u2af1",
"topf;": "\U0001d565",
"topfork;": "\u2ada",
"tosa;": "\u2929",
"tprime;": "\u2034",
"trade;": "\u2122",
"triangle;": "\u25b5",
"triangledown;": "\u25bf",
"triangleleft;": "\u25c3",
"trianglelefteq;": "\u22b4",
"triangleq;": "\u225c",
"triangleright;": "\u25b9",
"trianglerighteq;": "\u22b5",
"tridot;": "\u25ec",
"trie;": "\u225c",
"triminus;": "\u2a3a",
"triplus;": "\u2a39",
"trisb;": "\u29cd",
"tritime;": "\u2a3b",
"trpezium;": "\u23e2",
"tscr;": "\U0001d4c9",
"tscy;": "\u0446",
"tshcy;": "\u045b",
"tstrok;": "\u0167",
"twixt;": "\u226c",
"twoheadleftarrow;": "\u219e",
"twoheadrightarrow;": "\u21a0",
"uArr;": "\u21d1",
"uHar;": "\u2963",
"uacute": "\xfa",
"uacute;": "\xfa",
"uarr;": "\u2191",
"ubrcy;": "\u045e",
"ubreve;": "\u016d",
"ucirc": "\xfb",
"ucirc;": "\xfb",
"ucy;": "\u0443",
"udarr;": "\u21c5",
"udblac;": "\u0171",
"udhar;": "\u296e",
"ufisht;": "\u297e",
"ufr;": "\U0001d532",
"ugrave": "\xf9",
"ugrave;": "\xf9",
"uharl;": "\u21bf",
"uharr;": "\u21be",
"uhblk;": "\u2580",
"ulcorn;": "\u231c",
"ulcorner;": "\u231c",
"ulcrop;": "\u230f",
"ultri;": "\u25f8",
"umacr;": "\u016b",
"uml": "\xa8",
"uml;": "\xa8",
"uogon;": "\u0173",
"uopf;": "\U0001d566",
"uparrow;": "\u2191",
"updownarrow;": "\u2195",
"upharpoonleft;": "\u21bf",
"upharpoonright;": "\u21be",
"uplus;": "\u228e",
"upsi;": "\u03c5",
"upsih;": "\u03d2",
"upsilon;": "\u03c5",
"upuparrows;": "\u21c8",
"urcorn;": "\u231d",
"urcorner;": "\u231d",
"urcrop;": "\u230e",
"uring;": "\u016f",
"urtri;": "\u25f9",
"uscr;": "\U0001d4ca",
"utdot;": "\u22f0",
"utilde;": "\u0169",
"utri;": "\u25b5",
"utrif;": "\u25b4",
"uuarr;": "\u21c8",
"uuml": "\xfc",
"uuml;": "\xfc",
"uwangle;": "\u29a7",
"vArr;": "\u21d5",
"vBar;": "\u2ae8",
"vBarv;": "\u2ae9",
"vDash;": "\u22a8",
"vangrt;": "\u299c",
"varepsilon;": "\u03f5",
"varkappa;": "\u03f0",
"varnothing;": "\u2205",
"varphi;": "\u03d5",
"varpi;": "\u03d6",
"varpropto;": "\u221d",
"varr;": "\u2195",
"varrho;": "\u03f1",
"varsigma;": "\u03c2",
"varsubsetneq;": "\u228a\ufe00",
"varsubsetneqq;": "\u2acb\ufe00",
"varsupsetneq;": "\u228b\ufe00",
"varsupsetneqq;": "\u2acc\ufe00",
"vartheta;": "\u03d1",
"vartriangleleft;": "\u22b2",
"vartriangleright;": "\u22b3",
"vcy;": "\u0432",
"vdash;": "\u22a2",
"vee;": "\u2228",
"veebar;": "\u22bb",
"veeeq;": "\u225a",
"vellip;": "\u22ee",
"verbar;": "|",
"vert;": "|",
"vfr;": "\U0001d533",
"vltri;": "\u22b2",
"vnsub;": "\u2282\u20d2",
"vnsup;": "\u2283\u20d2",
"vopf;": "\U0001d567",
"vprop;": "\u221d",
"vrtri;": "\u22b3",
"vscr;": "\U0001d4cb",
"vsubnE;": "\u2acb\ufe00",
"vsubne;": "\u228a\ufe00",
"vsupnE;": "\u2acc\ufe00",
"vsupne;": "\u228b\ufe00",
"vzigzag;": "\u299a",
"wcirc;": "\u0175",
"wedbar;": "\u2a5f",
"wedge;": "\u2227",
"wedgeq;": "\u2259",
"weierp;": "\u2118",
"wfr;": "\U0001d534",
"wopf;": "\U0001d568",
"wp;": "\u2118",
"wr;": "\u2240",
"wreath;": "\u2240",
"wscr;": "\U0001d4cc",
"xcap;": "\u22c2",
"xcirc;": "\u25ef",
"xcup;": "\u22c3",
"xdtri;": "\u25bd",
"xfr;": "\U0001d535",
"xhArr;": "\u27fa",
"xharr;": "\u27f7",
"xi;": "\u03be",
"xlArr;": "\u27f8",
"xlarr;": "\u27f5",
"xmap;": "\u27fc",
"xnis;": "\u22fb",
"xodot;": "\u2a00",
"xopf;": "\U0001d569",
"xoplus;": "\u2a01",
"xotime;": "\u2a02",
"xrArr;": "\u27f9",
"xrarr;": "\u27f6",
"xscr;": "\U0001d4cd",
"xsqcup;": "\u2a06",
"xuplus;": "\u2a04",
"xutri;": "\u25b3",
"xvee;": "\u22c1",
"xwedge;": "\u22c0",
"yacute": "\xfd",
"yacute;": "\xfd",
"yacy;": "\u044f",
"ycirc;": "\u0177",
"ycy;": "\u044b",
"yen": "\xa5",
"yen;": "\xa5",
"yfr;": "\U0001d536",
"yicy;": "\u0457",
"yopf;": "\U0001d56a",
"yscr;": "\U0001d4ce",
"yucy;": "\u044e",
"yuml": "\xff",
"yuml;": "\xff",
"zacute;": "\u017a",
"zcaron;": "\u017e",
"zcy;": "\u0437",
"zdot;": "\u017c",
"zeetrf;": "\u2128",
"zeta;": "\u03b6",
"zfr;": "\U0001d537",
"zhcy;": "\u0436",
"zigrarr;": "\u21dd",
"zopf;": "\U0001d56b",
"zscr;": "\U0001d4cf",
"zwj;": "\u200d",
"zwnj;": "\u200c",
}
replacementCharacters = {
0x0: "\uFFFD",
0x0d: "\u000D",
0x80: "\u20AC",
0x81: "\u0081",
0x81: "\u0081",
0x82: "\u201A",
0x83: "\u0192",
0x84: "\u201E",
0x85: "\u2026",
0x86: "\u2020",
0x87: "\u2021",
0x88: "\u02C6",
0x89: "\u2030",
0x8A: "\u0160",
0x8B: "\u2039",
0x8C: "\u0152",
0x8D: "\u008D",
0x8E: "\u017D",
0x8F: "\u008F",
0x90: "\u0090",
0x91: "\u2018",
0x92: "\u2019",
0x93: "\u201C",
0x94: "\u201D",
0x95: "\u2022",
0x96: "\u2013",
0x97: "\u2014",
0x98: "\u02DC",
0x99: "\u2122",
0x9A: "\u0161",
0x9B: "\u203A",
0x9C: "\u0153",
0x9D: "\u009D",
0x9E: "\u017E",
0x9F: "\u0178",
}
encodings = {
'437': 'cp437',
'850': 'cp850',
'852': 'cp852',
'855': 'cp855',
'857': 'cp857',
'860': 'cp860',
'861': 'cp861',
'862': 'cp862',
'863': 'cp863',
'865': 'cp865',
'866': 'cp866',
'869': 'cp869',
'ansix341968': 'ascii',
'ansix341986': 'ascii',
'arabic': 'iso8859-6',
'ascii': 'ascii',
'asmo708': 'iso8859-6',
'big5': 'big5',
'big5hkscs': 'big5hkscs',
'chinese': 'gbk',
'cp037': 'cp037',
'cp1026': 'cp1026',
'cp154': 'ptcp154',
'cp367': 'ascii',
'cp424': 'cp424',
'cp437': 'cp437',
'cp500': 'cp500',
'cp775': 'cp775',
'cp819': 'windows-1252',
'cp850': 'cp850',
'cp852': 'cp852',
'cp855': 'cp855',
'cp857': 'cp857',
'cp860': 'cp860',
'cp861': 'cp861',
'cp862': 'cp862',
'cp863': 'cp863',
'cp864': 'cp864',
'cp865': 'cp865',
'cp866': 'cp866',
'cp869': 'cp869',
'cp936': 'gbk',
'cpgr': 'cp869',
'cpis': 'cp861',
'csascii': 'ascii',
'csbig5': 'big5',
'cseuckr': 'cp949',
'cseucpkdfmtjapanese': 'euc_jp',
'csgb2312': 'gbk',
'cshproman8': 'hp-roman8',
'csibm037': 'cp037',
'csibm1026': 'cp1026',
'csibm424': 'cp424',
'csibm500': 'cp500',
'csibm855': 'cp855',
'csibm857': 'cp857',
'csibm860': 'cp860',
'csibm861': 'cp861',
'csibm863': 'cp863',
'csibm864': 'cp864',
'csibm865': 'cp865',
'csibm866': 'cp866',
'csibm869': 'cp869',
'csiso2022jp': 'iso2022_jp',
'csiso2022jp2': 'iso2022_jp_2',
'csiso2022kr': 'iso2022_kr',
'csiso58gb231280': 'gbk',
'csisolatin1': 'windows-1252',
'csisolatin2': 'iso8859-2',
'csisolatin3': 'iso8859-3',
'csisolatin4': 'iso8859-4',
'csisolatin5': 'windows-1254',
'csisolatin6': 'iso8859-10',
'csisolatinarabic': 'iso8859-6',
'csisolatincyrillic': 'iso8859-5',
'csisolatingreek': 'iso8859-7',
'csisolatinhebrew': 'iso8859-8',
'cskoi8r': 'koi8-r',
'csksc56011987': 'cp949',
'cspc775baltic': 'cp775',
'cspc850multilingual': 'cp850',
'cspc862latinhebrew': 'cp862',
'cspc8codepage437': 'cp437',
'cspcp852': 'cp852',
'csptcp154': 'ptcp154',
'csshiftjis': 'shift_jis',
'csunicode11utf7': 'utf-7',
'cyrillic': 'iso8859-5',
'cyrillicasian': 'ptcp154',
'ebcdiccpbe': 'cp500',
'ebcdiccpca': 'cp037',
'ebcdiccpch': 'cp500',
'ebcdiccphe': 'cp424',
'ebcdiccpnl': 'cp037',
'ebcdiccpus': 'cp037',
'ebcdiccpwt': 'cp037',
'ecma114': 'iso8859-6',
'ecma118': 'iso8859-7',
'elot928': 'iso8859-7',
'eucjp': 'euc_jp',
'euckr': 'cp949',
'extendedunixcodepackedformatforjapanese': 'euc_jp',
'gb18030': 'gb18030',
'gb2312': 'gbk',
'gb231280': 'gbk',
'gbk': 'gbk',
'greek': 'iso8859-7',
'greek8': 'iso8859-7',
'hebrew': 'iso8859-8',
'hproman8': 'hp-roman8',
'hzgb2312': 'hz',
'ibm037': 'cp037',
'ibm1026': 'cp1026',
'ibm367': 'ascii',
'ibm424': 'cp424',
'ibm437': 'cp437',
'ibm500': 'cp500',
'ibm775': 'cp775',
'ibm819': 'windows-1252',
'ibm850': 'cp850',
'ibm852': 'cp852',
'ibm855': 'cp855',
'ibm857': 'cp857',
'ibm860': 'cp860',
'ibm861': 'cp861',
'ibm862': 'cp862',
'ibm863': 'cp863',
'ibm864': 'cp864',
'ibm865': 'cp865',
'ibm866': 'cp866',
'ibm869': 'cp869',
'iso2022jp': 'iso2022_jp',
'iso2022jp2': 'iso2022_jp_2',
'iso2022kr': 'iso2022_kr',
'iso646irv1991': 'ascii',
'iso646us': 'ascii',
'iso88591': 'windows-1252',
'iso885910': 'iso8859-10',
'iso8859101992': 'iso8859-10',
'iso885911987': 'windows-1252',
'iso885913': 'iso8859-13',
'iso885914': 'iso8859-14',
'iso8859141998': 'iso8859-14',
'iso885915': 'iso8859-15',
'iso885916': 'iso8859-16',
'iso8859162001': 'iso8859-16',
'iso88592': 'iso8859-2',
'iso885921987': 'iso8859-2',
'iso88593': 'iso8859-3',
'iso885931988': 'iso8859-3',
'iso88594': 'iso8859-4',
'iso885941988': 'iso8859-4',
'iso88595': 'iso8859-5',
'iso885951988': 'iso8859-5',
'iso88596': 'iso8859-6',
'iso885961987': 'iso8859-6',
'iso88597': 'iso8859-7',
'iso885971987': 'iso8859-7',
'iso88598': 'iso8859-8',
'iso885981988': 'iso8859-8',
'iso88599': 'windows-1254',
'iso885991989': 'windows-1254',
'isoceltic': 'iso8859-14',
'isoir100': 'windows-1252',
'isoir101': 'iso8859-2',
'isoir109': 'iso8859-3',
'isoir110': 'iso8859-4',
'isoir126': 'iso8859-7',
'isoir127': 'iso8859-6',
'isoir138': 'iso8859-8',
'isoir144': 'iso8859-5',
'isoir148': 'windows-1254',
'isoir149': 'cp949',
'isoir157': 'iso8859-10',
'isoir199': 'iso8859-14',
'isoir226': 'iso8859-16',
'isoir58': 'gbk',
'isoir6': 'ascii',
'koi8r': 'koi8-r',
'koi8u': 'koi8-u',
'korean': 'cp949',
'ksc5601': 'cp949',
'ksc56011987': 'cp949',
'ksc56011989': 'cp949',
'l1': 'windows-1252',
'l10': 'iso8859-16',
'l2': 'iso8859-2',
'l3': 'iso8859-3',
'l4': 'iso8859-4',
'l5': 'windows-1254',
'l6': 'iso8859-10',
'l8': 'iso8859-14',
'latin1': 'windows-1252',
'latin10': 'iso8859-16',
'latin2': 'iso8859-2',
'latin3': 'iso8859-3',
'latin4': 'iso8859-4',
'latin5': 'windows-1254',
'latin6': 'iso8859-10',
'latin8': 'iso8859-14',
'latin9': 'iso8859-15',
'ms936': 'gbk',
'mskanji': 'shift_jis',
'pt154': 'ptcp154',
'ptcp154': 'ptcp154',
'r8': 'hp-roman8',
'roman8': 'hp-roman8',
'shiftjis': 'shift_jis',
'tis620': 'cp874',
'unicode11utf7': 'utf-7',
'us': 'ascii',
'usascii': 'ascii',
'utf16': 'utf-16',
'utf16be': 'utf-16-be',
'utf16le': 'utf-16-le',
'utf8': 'utf-8',
'windows1250': 'cp1250',
'windows1251': 'cp1251',
'windows1252': 'cp1252',
'windows1253': 'cp1253',
'windows1254': 'cp1254',
'windows1255': 'cp1255',
'windows1256': 'cp1256',
'windows1257': 'cp1257',
'windows1258': 'cp1258',
'windows936': 'gbk',
'x-x-big5': 'big5'}
tokenTypes = {
"Doctype": 0,
"Characters": 1,
"SpaceCharacters": 2,
"StartTag": 3,
"EndTag": 4,
"EmptyTag": 5,
"Comment": 6,
"ParseError": 7
}
tagTokenTypes = frozenset([tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]])
prefixes = dict([(v, k) for k, v in namespaces.items()])
prefixes["http://www.w3.org/1998/Math/MathML"] = "math"
class DataLossWarning(UserWarning):
pass
class ReparseException(Exception):
pass
| mit |
mivanov/editkit | editkit/users/models.py | 1 | 1488 | import urllib
from django.contrib.auth.models import User
from django.utils.encoding import smart_str
from django.db import models
from django.conf import settings
from django.contrib.sites.models import Site
#######################################################################
#
# "Monkey-pathcing is bad, but contib.auth is so badly designed, that
# using monkey-patching here should be no shame at all."
#
#######################################################################
User._meta.get_field_by_name('email')[0]._unique = True
def name_to_first_last(self, name):
"""
Takes 'name' and sets self.first_name, self.last_name.
Args:
name: A name string.
"""
split_name = name.split()
last_name = ''
if len(split_name) > 1:
last_name = ' '.join(split_name[1:])
first_name = split_name[0]
self.first_name = first_name
self.last_name = last_name
def first_last_to_name(self):
name = self.first_name
if self.last_name:
name = "%s %s" % (self.first_name, self.last_name)
return name
def get_absolute_url(self):
return "/Users/%s" % urllib.quote(smart_str(self.username))
User.name = property(first_last_to_name, name_to_first_last)
User.get_absolute_url = get_absolute_url
class UserProfile(models.Model):
# this field is required
user = models.OneToOneField(User)
subscribed = models.BooleanField(verbose_name=settings.SUBSCRIBE_MESSAGE)
import signals # to fire signals
| gpl-2.0 |
p4datasystems/CarnotKE | jyhton/lib-python/2.7/test/test_runpy.py | 43 | 16184 | # Test the runpy module
import unittest
import os
import os.path
import sys
import re
import tempfile
from test.test_support import verbose, run_unittest, forget
from test.script_helper import (temp_dir, make_script, compile_script,
make_pkg, make_zip_script, make_zip_pkg)
from runpy import _run_code, _run_module_code, run_module, run_path
# Note: This module can't safely test _run_module_as_main as it
# runs its tests in the current process, which would mess with the
# real __main__ module (usually test.regrtest)
# See test_cmd_line_script for a test that executes that code path
# Set up the test code and expected results
class RunModuleCodeTest(unittest.TestCase):
"""Unit tests for runpy._run_code and runpy._run_module_code"""
expected_result = ["Top level assignment", "Lower level reference"]
test_source = (
"# Check basic code execution\n"
"result = ['Top level assignment']\n"
"def f():\n"
" result.append('Lower level reference')\n"
"f()\n"
"# Check the sys module\n"
"import sys\n"
"run_argv0 = sys.argv[0]\n"
"run_name_in_sys_modules = __name__ in sys.modules\n"
"if run_name_in_sys_modules:\n"
" module_in_sys_modules = globals() is sys.modules[__name__].__dict__\n"
"# Check nested operation\n"
"import runpy\n"
"nested = runpy._run_module_code('x=1\\n', mod_name='<run>')\n"
)
def test_run_code(self):
saved_argv0 = sys.argv[0]
d = _run_code(self.test_source, {})
self.assertEqual(d["result"], self.expected_result)
self.assertIs(d["__name__"], None)
self.assertIs(d["__file__"], None)
self.assertIs(d["__loader__"], None)
self.assertIs(d["__package__"], None)
self.assertIs(d["run_argv0"], saved_argv0)
self.assertNotIn("run_name", d)
self.assertIs(sys.argv[0], saved_argv0)
def test_run_module_code(self):
initial = object()
name = "<Nonsense>"
file = "Some other nonsense"
loader = "Now you're just being silly"
package = '' # Treat as a top level module
d1 = dict(initial=initial)
saved_argv0 = sys.argv[0]
d2 = _run_module_code(self.test_source,
d1,
name,
file,
loader,
package)
self.assertNotIn("result", d1)
self.assertIs(d2["initial"], initial)
self.assertEqual(d2["result"], self.expected_result)
self.assertEqual(d2["nested"]["x"], 1)
self.assertIs(d2["__name__"], name)
self.assertTrue(d2["run_name_in_sys_modules"])
self.assertTrue(d2["module_in_sys_modules"])
self.assertIs(d2["__file__"], file)
self.assertIs(d2["run_argv0"], file)
self.assertIs(d2["__loader__"], loader)
self.assertIs(d2["__package__"], package)
self.assertIs(sys.argv[0], saved_argv0)
self.assertNotIn(name, sys.modules)
class RunModuleTest(unittest.TestCase):
"""Unit tests for runpy.run_module"""
def expect_import_error(self, mod_name):
try:
run_module(mod_name)
except ImportError:
pass
else:
self.fail("Expected import error for " + mod_name)
def test_invalid_names(self):
# Builtin module
self.expect_import_error("sys")
# Non-existent modules
self.expect_import_error("sys.imp.eric")
self.expect_import_error("os.path.half")
self.expect_import_error("a.bee")
self.expect_import_error(".howard")
self.expect_import_error("..eaten")
# Package without __main__.py
self.expect_import_error("multiprocessing")
def test_library_module(self):
run_module("runpy")
def _add_pkg_dir(self, pkg_dir):
os.mkdir(pkg_dir)
pkg_fname = os.path.join(pkg_dir, "__init__"+os.extsep+"py")
pkg_file = open(pkg_fname, "w")
pkg_file.close()
return pkg_fname
def _make_pkg(self, source, depth, mod_base="runpy_test"):
pkg_name = "__runpy_pkg__"
test_fname = mod_base+os.extsep+"py"
pkg_dir = sub_dir = tempfile.mkdtemp()
if verbose: print " Package tree in:", sub_dir
sys.path.insert(0, pkg_dir)
if verbose: print " Updated sys.path:", sys.path[0]
for i in range(depth):
sub_dir = os.path.join(sub_dir, pkg_name)
pkg_fname = self._add_pkg_dir(sub_dir)
if verbose: print " Next level in:", sub_dir
if verbose: print " Created:", pkg_fname
mod_fname = os.path.join(sub_dir, test_fname)
mod_file = open(mod_fname, "w")
mod_file.write(source)
mod_file.close()
if verbose: print " Created:", mod_fname
mod_name = (pkg_name+".")*depth + mod_base
return pkg_dir, mod_fname, mod_name
def _del_pkg(self, top, depth, mod_name):
for entry in list(sys.modules):
if entry.startswith("__runpy_pkg__"):
del sys.modules[entry]
if verbose: print " Removed sys.modules entries"
del sys.path[0]
if verbose: print " Removed sys.path entry"
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
try:
os.remove(os.path.join(root, name))
except OSError, ex:
if verbose: print ex # Persist with cleaning up
for name in dirs:
fullname = os.path.join(root, name)
try:
os.rmdir(fullname)
except OSError, ex:
if verbose: print ex # Persist with cleaning up
try:
os.rmdir(top)
if verbose: print " Removed package tree"
except OSError, ex:
if verbose: print ex # Persist with cleaning up
def _check_module(self, depth):
pkg_dir, mod_fname, mod_name = (
self._make_pkg("x=1\n", depth))
forget(mod_name)
try:
if verbose: print "Running from source:", mod_name
d1 = run_module(mod_name) # Read from source
self.assertIn("x", d1)
self.assertTrue(d1["x"] == 1)
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
if verbose: print "Running from compiled:", mod_name
d2 = run_module(mod_name) # Read from bytecode
self.assertIn("x", d2)
self.assertTrue(d2["x"] == 1)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose: print "Module executed successfully"
def _check_package(self, depth):
pkg_dir, mod_fname, mod_name = (
self._make_pkg("x=1\n", depth, "__main__"))
pkg_name, _, _ = mod_name.rpartition(".")
forget(mod_name)
try:
if verbose: print "Running from source:", pkg_name
d1 = run_module(pkg_name) # Read from source
self.assertIn("x", d1)
self.assertTrue(d1["x"] == 1)
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
if verbose: print "Running from compiled:", pkg_name
d2 = run_module(pkg_name) # Read from bytecode
self.assertIn("x", d2)
self.assertTrue(d2["x"] == 1)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, pkg_name)
if verbose: print "Package executed successfully"
def _add_relative_modules(self, base_dir, source, depth):
if depth <= 1:
raise ValueError("Relative module test needs depth > 1")
pkg_name = "__runpy_pkg__"
module_dir = base_dir
for i in range(depth):
parent_dir = module_dir
module_dir = os.path.join(module_dir, pkg_name)
# Add sibling module
sibling_fname = os.path.join(module_dir, "sibling"+os.extsep+"py")
sibling_file = open(sibling_fname, "w")
sibling_file.close()
if verbose: print " Added sibling module:", sibling_fname
# Add nephew module
uncle_dir = os.path.join(parent_dir, "uncle")
self._add_pkg_dir(uncle_dir)
if verbose: print " Added uncle package:", uncle_dir
cousin_dir = os.path.join(uncle_dir, "cousin")
self._add_pkg_dir(cousin_dir)
if verbose: print " Added cousin package:", cousin_dir
nephew_fname = os.path.join(cousin_dir, "nephew"+os.extsep+"py")
nephew_file = open(nephew_fname, "w")
nephew_file.close()
if verbose: print " Added nephew module:", nephew_fname
def _check_relative_imports(self, depth, run_name=None):
contents = r"""\
from __future__ import absolute_import
from . import sibling
from ..uncle.cousin import nephew
"""
pkg_dir, mod_fname, mod_name = (
self._make_pkg(contents, depth))
try:
self._add_relative_modules(pkg_dir, contents, depth)
pkg_name = mod_name.rpartition('.')[0]
if verbose: print "Running from source:", mod_name
d1 = run_module(mod_name, run_name=run_name) # Read from source
self.assertIn("__package__", d1)
self.assertTrue(d1["__package__"] == pkg_name)
self.assertIn("sibling", d1)
self.assertIn("nephew", d1)
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
if verbose: print "Running from compiled:", mod_name
d2 = run_module(mod_name, run_name=run_name) # Read from bytecode
self.assertIn("__package__", d2)
self.assertTrue(d2["__package__"] == pkg_name)
self.assertIn("sibling", d2)
self.assertIn("nephew", d2)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose: print "Module executed successfully"
def test_run_module(self):
for depth in range(4):
if verbose: print "Testing package depth:", depth
self._check_module(depth)
def test_run_package(self):
for depth in range(1, 4):
if verbose: print "Testing package depth:", depth
self._check_package(depth)
def test_explicit_relative_import(self):
for depth in range(2, 5):
if verbose: print "Testing relative imports at depth:", depth
self._check_relative_imports(depth)
def test_main_relative_import(self):
for depth in range(2, 5):
if verbose: print "Testing main relative imports at depth:", depth
self._check_relative_imports(depth, "__main__")
class RunPathTest(unittest.TestCase):
"""Unit tests for runpy.run_path"""
# Based on corresponding tests in test_cmd_line_script
test_source = """\
# Script may be run with optimisation enabled, so don't rely on assert
# statements being executed
def assertEqual(lhs, rhs):
if lhs != rhs:
raise AssertionError('%r != %r' % (lhs, rhs))
def assertIs(lhs, rhs):
if lhs is not rhs:
raise AssertionError('%r is not %r' % (lhs, rhs))
# Check basic code execution
result = ['Top level assignment']
def f():
result.append('Lower level reference')
f()
assertEqual(result, ['Top level assignment', 'Lower level reference'])
# Check the sys module
import sys
assertIs(globals(), sys.modules[__name__].__dict__)
argv0 = sys.argv[0]
"""
def _make_test_script(self, script_dir, script_basename, source=None):
if source is None:
source = self.test_source
return make_script(script_dir, script_basename, source)
def _check_script(self, script_name, expected_name, expected_file,
expected_argv0, expected_package):
result = run_path(script_name)
self.assertEqual(result["__name__"], expected_name)
self.assertEqual(result["__file__"], expected_file)
self.assertIn("argv0", result)
self.assertEqual(result["argv0"], expected_argv0)
self.assertEqual(result["__package__"], expected_package)
def _check_import_error(self, script_name, msg):
msg = re.escape(msg)
self.assertRaisesRegexp(ImportError, msg, run_path, script_name)
def test_basic_script(self):
with temp_dir() as script_dir:
mod_name = 'script'
script_name = self._make_test_script(script_dir, mod_name)
self._check_script(script_name, "<run_path>", script_name,
script_name, None)
def test_script_compiled(self):
with temp_dir() as script_dir:
mod_name = 'script'
script_name = self._make_test_script(script_dir, mod_name)
compiled_name = compile_script(script_name)
os.remove(script_name)
self._check_script(compiled_name, "<run_path>", compiled_name,
compiled_name, None)
def test_directory(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
self._check_script(script_dir, "<run_path>", script_name,
script_dir, '')
def test_directory_compiled(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
compiled_name = compile_script(script_name)
os.remove(script_name)
self._check_script(script_dir, "<run_path>", compiled_name,
script_dir, '')
def test_directory_error(self):
with temp_dir() as script_dir:
mod_name = 'not_main'
script_name = self._make_test_script(script_dir, mod_name)
msg = "can't find '__main__' module in %r" % script_dir
self._check_import_error(script_dir, msg)
def test_zipfile(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
self._check_script(zip_name, "<run_path>", fname, zip_name, '')
def test_zipfile_compiled(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
compiled_name = compile_script(script_name)
zip_name, fname = make_zip_script(script_dir, 'test_zip', compiled_name)
self._check_script(zip_name, "<run_path>", fname, zip_name, '')
def test_zipfile_error(self):
with temp_dir() as script_dir:
mod_name = 'not_main'
script_name = self._make_test_script(script_dir, mod_name)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
msg = "can't find '__main__' module in %r" % zip_name
self._check_import_error(zip_name, msg)
def test_main_recursion_error(self):
with temp_dir() as script_dir, temp_dir() as dummy_dir:
mod_name = '__main__'
source = ("import runpy\n"
"runpy.run_path(%r)\n") % dummy_dir
script_name = self._make_test_script(script_dir, mod_name, source)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
msg = "recursion depth exceeded"
self.assertRaisesRegexp(RuntimeError, msg, run_path, zip_name)
def test_main():
run_unittest(RunModuleCodeTest, RunModuleTest, RunPathTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
grantsewell/nzbToMedia | libs/unidecode/x082.py | 252 | 4649 | data = (
'Yao ', # 0x00
'Yu ', # 0x01
'Chong ', # 0x02
'Xi ', # 0x03
'Xi ', # 0x04
'Jiu ', # 0x05
'Yu ', # 0x06
'Yu ', # 0x07
'Xing ', # 0x08
'Ju ', # 0x09
'Jiu ', # 0x0a
'Xin ', # 0x0b
'She ', # 0x0c
'She ', # 0x0d
'Yadoru ', # 0x0e
'Jiu ', # 0x0f
'Shi ', # 0x10
'Tan ', # 0x11
'Shu ', # 0x12
'Shi ', # 0x13
'Tian ', # 0x14
'Dan ', # 0x15
'Pu ', # 0x16
'Pu ', # 0x17
'Guan ', # 0x18
'Hua ', # 0x19
'Tan ', # 0x1a
'Chuan ', # 0x1b
'Shun ', # 0x1c
'Xia ', # 0x1d
'Wu ', # 0x1e
'Zhou ', # 0x1f
'Dao ', # 0x20
'Gang ', # 0x21
'Shan ', # 0x22
'Yi ', # 0x23
'[?] ', # 0x24
'Pa ', # 0x25
'Tai ', # 0x26
'Fan ', # 0x27
'Ban ', # 0x28
'Chuan ', # 0x29
'Hang ', # 0x2a
'Fang ', # 0x2b
'Ban ', # 0x2c
'Que ', # 0x2d
'Hesaki ', # 0x2e
'Zhong ', # 0x2f
'Jian ', # 0x30
'Cang ', # 0x31
'Ling ', # 0x32
'Zhu ', # 0x33
'Ze ', # 0x34
'Duo ', # 0x35
'Bo ', # 0x36
'Xian ', # 0x37
'Ge ', # 0x38
'Chuan ', # 0x39
'Jia ', # 0x3a
'Lu ', # 0x3b
'Hong ', # 0x3c
'Pang ', # 0x3d
'Xi ', # 0x3e
'[?] ', # 0x3f
'Fu ', # 0x40
'Zao ', # 0x41
'Feng ', # 0x42
'Li ', # 0x43
'Shao ', # 0x44
'Yu ', # 0x45
'Lang ', # 0x46
'Ting ', # 0x47
'[?] ', # 0x48
'Wei ', # 0x49
'Bo ', # 0x4a
'Meng ', # 0x4b
'Nian ', # 0x4c
'Ju ', # 0x4d
'Huang ', # 0x4e
'Shou ', # 0x4f
'Zong ', # 0x50
'Bian ', # 0x51
'Mao ', # 0x52
'Die ', # 0x53
'[?] ', # 0x54
'Bang ', # 0x55
'Cha ', # 0x56
'Yi ', # 0x57
'Sao ', # 0x58
'Cang ', # 0x59
'Cao ', # 0x5a
'Lou ', # 0x5b
'Dai ', # 0x5c
'Sori ', # 0x5d
'Yao ', # 0x5e
'Tong ', # 0x5f
'Yofune ', # 0x60
'Dang ', # 0x61
'Tan ', # 0x62
'Lu ', # 0x63
'Yi ', # 0x64
'Jie ', # 0x65
'Jian ', # 0x66
'Huo ', # 0x67
'Meng ', # 0x68
'Qi ', # 0x69
'Lu ', # 0x6a
'Lu ', # 0x6b
'Chan ', # 0x6c
'Shuang ', # 0x6d
'Gen ', # 0x6e
'Liang ', # 0x6f
'Jian ', # 0x70
'Jian ', # 0x71
'Se ', # 0x72
'Yan ', # 0x73
'Fu ', # 0x74
'Ping ', # 0x75
'Yan ', # 0x76
'Yan ', # 0x77
'Cao ', # 0x78
'Cao ', # 0x79
'Yi ', # 0x7a
'Le ', # 0x7b
'Ting ', # 0x7c
'Qiu ', # 0x7d
'Ai ', # 0x7e
'Nai ', # 0x7f
'Tiao ', # 0x80
'Jiao ', # 0x81
'Jie ', # 0x82
'Peng ', # 0x83
'Wan ', # 0x84
'Yi ', # 0x85
'Chai ', # 0x86
'Mian ', # 0x87
'Mie ', # 0x88
'Gan ', # 0x89
'Qian ', # 0x8a
'Yu ', # 0x8b
'Yu ', # 0x8c
'Shuo ', # 0x8d
'Qiong ', # 0x8e
'Tu ', # 0x8f
'Xia ', # 0x90
'Qi ', # 0x91
'Mang ', # 0x92
'Zi ', # 0x93
'Hui ', # 0x94
'Sui ', # 0x95
'Zhi ', # 0x96
'Xiang ', # 0x97
'Bi ', # 0x98
'Fu ', # 0x99
'Tun ', # 0x9a
'Wei ', # 0x9b
'Wu ', # 0x9c
'Zhi ', # 0x9d
'Qi ', # 0x9e
'Shan ', # 0x9f
'Wen ', # 0xa0
'Qian ', # 0xa1
'Ren ', # 0xa2
'Fou ', # 0xa3
'Kou ', # 0xa4
'Jie ', # 0xa5
'Lu ', # 0xa6
'Xu ', # 0xa7
'Ji ', # 0xa8
'Qin ', # 0xa9
'Qi ', # 0xaa
'Yuan ', # 0xab
'Fen ', # 0xac
'Ba ', # 0xad
'Rui ', # 0xae
'Xin ', # 0xaf
'Ji ', # 0xb0
'Hua ', # 0xb1
'Hua ', # 0xb2
'Fang ', # 0xb3
'Wu ', # 0xb4
'Jue ', # 0xb5
'Gou ', # 0xb6
'Zhi ', # 0xb7
'Yun ', # 0xb8
'Qin ', # 0xb9
'Ao ', # 0xba
'Chu ', # 0xbb
'Mao ', # 0xbc
'Ya ', # 0xbd
'Fei ', # 0xbe
'Reng ', # 0xbf
'Hang ', # 0xc0
'Cong ', # 0xc1
'Yin ', # 0xc2
'You ', # 0xc3
'Bian ', # 0xc4
'Yi ', # 0xc5
'Susa ', # 0xc6
'Wei ', # 0xc7
'Li ', # 0xc8
'Pi ', # 0xc9
'E ', # 0xca
'Xian ', # 0xcb
'Chang ', # 0xcc
'Cang ', # 0xcd
'Meng ', # 0xce
'Su ', # 0xcf
'Yi ', # 0xd0
'Yuan ', # 0xd1
'Ran ', # 0xd2
'Ling ', # 0xd3
'Tai ', # 0xd4
'Tiao ', # 0xd5
'Di ', # 0xd6
'Miao ', # 0xd7
'Qiong ', # 0xd8
'Li ', # 0xd9
'Yong ', # 0xda
'Ke ', # 0xdb
'Mu ', # 0xdc
'Pei ', # 0xdd
'Bao ', # 0xde
'Gou ', # 0xdf
'Min ', # 0xe0
'Yi ', # 0xe1
'Yi ', # 0xe2
'Ju ', # 0xe3
'Pi ', # 0xe4
'Ruo ', # 0xe5
'Ku ', # 0xe6
'Zhu ', # 0xe7
'Ni ', # 0xe8
'Bo ', # 0xe9
'Bing ', # 0xea
'Shan ', # 0xeb
'Qiu ', # 0xec
'Yao ', # 0xed
'Xian ', # 0xee
'Ben ', # 0xef
'Hong ', # 0xf0
'Ying ', # 0xf1
'Zha ', # 0xf2
'Dong ', # 0xf3
'Ju ', # 0xf4
'Die ', # 0xf5
'Nie ', # 0xf6
'Gan ', # 0xf7
'Hu ', # 0xf8
'Ping ', # 0xf9
'Mei ', # 0xfa
'Fu ', # 0xfb
'Sheng ', # 0xfc
'Gu ', # 0xfd
'Bi ', # 0xfe
'Wei ', # 0xff
)
| gpl-3.0 |
s0enke/boto | boto/exception.py | 117 | 17106 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Exception classes - Subclassing allows you to check for specific errors
"""
import base64
import xml.sax
import boto
from boto import handler
from boto.compat import json, StandardError
from boto.resultset import ResultSet
class BotoClientError(StandardError):
"""
General Boto Client error (error accessing AWS)
"""
def __init__(self, reason, *args):
super(BotoClientError, self).__init__(reason, *args)
self.reason = reason
def __repr__(self):
return 'BotoClientError: %s' % self.reason
def __str__(self):
return 'BotoClientError: %s' % self.reason
class SDBPersistenceError(StandardError):
pass
class StoragePermissionsError(BotoClientError):
"""
Permissions error when accessing a bucket or key on a storage service.
"""
pass
class S3PermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on S3.
"""
pass
class GSPermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on GS.
"""
pass
class BotoServerError(StandardError):
def __init__(self, status, reason, body=None, *args):
super(BotoServerError, self).__init__(status, reason, body, *args)
self.status = status
self.reason = reason
self.body = body or ''
self.request_id = None
self.error_code = None
self._error_message = None
self.message = ''
self.box_usage = None
if isinstance(self.body, bytes):
try:
self.body = self.body.decode('utf-8')
except UnicodeDecodeError:
boto.log.debug('Unable to decode body from bytes!')
# Attempt to parse the error response. If body isn't present,
# then just ignore the error response.
if self.body:
# Check if it looks like a ``dict``.
if hasattr(self.body, 'items'):
# It's not a string, so trying to parse it will fail.
# But since it's data, we can work with that.
self.request_id = self.body.get('RequestId', None)
if 'Error' in self.body:
# XML-style
error = self.body.get('Error', {})
self.error_code = error.get('Code', None)
self.message = error.get('Message', None)
else:
# JSON-style.
self.message = self.body.get('message', None)
else:
try:
h = handler.XmlHandlerWrapper(self, self)
h.parseString(self.body)
except (TypeError, xml.sax.SAXParseException):
# What if it's JSON? Let's try that.
try:
parsed = json.loads(self.body)
if 'RequestId' in parsed:
self.request_id = parsed['RequestId']
if 'Error' in parsed:
if 'Code' in parsed['Error']:
self.error_code = parsed['Error']['Code']
if 'Message' in parsed['Error']:
self.message = parsed['Error']['Message']
except (TypeError, ValueError):
# Remove unparsable message body so we don't include garbage
# in exception. But first, save self.body in self.error_message
# because occasionally we get error messages from Eucalyptus
# that are just text strings that we want to preserve.
self.message = self.body
self.body = None
def __getattr__(self, name):
if name == 'error_message':
return self.message
if name == 'code':
return self.error_code
raise AttributeError
def __setattr__(self, name, value):
if name == 'error_message':
self.message = value
else:
super(BotoServerError, self).__setattr__(name, value)
def __repr__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def __str__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name in ('RequestId', 'RequestID'):
self.request_id = value
elif name == 'Code':
self.error_code = value
elif name == 'Message':
self.message = value
elif name == 'BoxUsage':
self.box_usage = value
return None
def _cleanupParsedProperties(self):
self.request_id = None
self.error_code = None
self.message = None
self.box_usage = None
class ConsoleOutput(object):
def __init__(self, parent=None):
self.parent = parent
self.instance_id = None
self.timestamp = None
self.comment = None
self.output = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'instanceId':
self.instance_id = value
elif name == 'output':
self.output = base64.b64decode(value)
else:
setattr(self, name, value)
class StorageCreateError(BotoServerError):
"""
Error creating a bucket or key on a storage service.
"""
def __init__(self, status, reason, body=None):
self.bucket = None
super(StorageCreateError, self).__init__(status, reason, body)
def endElement(self, name, value, connection):
if name == 'BucketName':
self.bucket = value
else:
return super(StorageCreateError, self).endElement(name, value, connection)
class S3CreateError(StorageCreateError):
"""
Error creating a bucket or key on S3.
"""
pass
class GSCreateError(StorageCreateError):
"""
Error creating a bucket or key on GS.
"""
pass
class StorageCopyError(BotoServerError):
"""
Error copying a key on a storage service.
"""
pass
class S3CopyError(StorageCopyError):
"""
Error copying a key on S3.
"""
pass
class GSCopyError(StorageCopyError):
"""
Error copying a key on GS.
"""
pass
class SQSError(BotoServerError):
"""
General Error on Simple Queue Service.
"""
def __init__(self, status, reason, body=None):
self.detail = None
self.type = None
super(SQSError, self).__init__(status, reason, body)
def startElement(self, name, attrs, connection):
return super(SQSError, self).startElement(name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Detail':
self.detail = value
elif name == 'Type':
self.type = value
else:
return super(SQSError, self).endElement(name, value, connection)
def _cleanupParsedProperties(self):
super(SQSError, self)._cleanupParsedProperties()
for p in ('detail', 'type'):
setattr(self, p, None)
class SQSDecodeError(BotoClientError):
"""
Error when decoding an SQS message.
"""
def __init__(self, reason, message):
super(SQSDecodeError, self).__init__(reason, message)
self.message = message
def __repr__(self):
return 'SQSDecodeError: %s' % self.reason
def __str__(self):
return 'SQSDecodeError: %s' % self.reason
class StorageResponseError(BotoServerError):
"""
Error in response from a storage service.
"""
def __init__(self, status, reason, body=None):
self.resource = None
super(StorageResponseError, self).__init__(status, reason, body)
def startElement(self, name, attrs, connection):
return super(StorageResponseError, self).startElement(
name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Resource':
self.resource = value
else:
return super(StorageResponseError, self).endElement(
name, value, connection)
def _cleanupParsedProperties(self):
super(StorageResponseError, self)._cleanupParsedProperties()
for p in ('resource'):
setattr(self, p, None)
class S3ResponseError(StorageResponseError):
"""
Error in response from S3.
"""
pass
class GSResponseError(StorageResponseError):
"""
Error in response from GS.
"""
pass
class EC2ResponseError(BotoServerError):
"""
Error in response from EC2.
"""
def __init__(self, status, reason, body=None):
self.errors = None
self._errorResultSet = []
super(EC2ResponseError, self).__init__(status, reason, body)
self.errors = [
(e.error_code, e.error_message) for e in self._errorResultSet]
if len(self.errors):
self.error_code, self.error_message = self.errors[0]
def startElement(self, name, attrs, connection):
if name == 'Errors':
self._errorResultSet = ResultSet([('Error', _EC2Error)])
return self._errorResultSet
else:
return None
def endElement(self, name, value, connection):
if name == 'RequestID':
self.request_id = value
else:
return None # don't call subclass here
def _cleanupParsedProperties(self):
super(EC2ResponseError, self)._cleanupParsedProperties()
self._errorResultSet = []
for p in ('errors'):
setattr(self, p, None)
class JSONResponseError(BotoServerError):
"""
This exception expects the fully parsed and decoded JSON response
body to be passed as the body parameter.
:ivar status: The HTTP status code.
:ivar reason: The HTTP reason message.
:ivar body: The Python dict that represents the decoded JSON
response body.
:ivar error_message: The full description of the AWS error encountered.
:ivar error_code: A short string that identifies the AWS error
(e.g. ConditionalCheckFailedException)
"""
def __init__(self, status, reason, body=None, *args):
self.status = status
self.reason = reason
self.body = body
if self.body:
self.error_message = self.body.get('message', None)
self.error_code = self.body.get('__type', None)
if self.error_code:
self.error_code = self.error_code.split('#')[-1]
class DynamoDBResponseError(JSONResponseError):
pass
class SWFResponseError(JSONResponseError):
pass
class EmrResponseError(BotoServerError):
"""
Error in response from EMR
"""
pass
class _EC2Error(object):
def __init__(self, connection=None):
self.connection = connection
self.error_code = None
self.error_message = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Code':
self.error_code = value
elif name == 'Message':
self.error_message = value
else:
return None
class SDBResponseError(BotoServerError):
"""
Error in responses from SDB.
"""
pass
class AWSConnectionError(BotoClientError):
"""
General error connecting to Amazon Web Services.
"""
pass
class StorageDataError(BotoClientError):
"""
Error receiving data from a storage service.
"""
pass
class S3DataError(StorageDataError):
"""
Error receiving data from S3.
"""
pass
class GSDataError(StorageDataError):
"""
Error receiving data from GS.
"""
pass
class InvalidUriError(Exception):
"""Exception raised when URI is invalid."""
def __init__(self, message):
super(InvalidUriError, self).__init__(message)
self.message = message
class InvalidAclError(Exception):
"""Exception raised when ACL XML is invalid."""
def __init__(self, message):
super(InvalidAclError, self).__init__(message)
self.message = message
class InvalidCorsError(Exception):
"""Exception raised when CORS XML is invalid."""
def __init__(self, message):
super(InvalidCorsError, self).__init__(message)
self.message = message
class NoAuthHandlerFound(Exception):
"""Is raised when no auth handlers were found ready to authenticate."""
pass
class InvalidLifecycleConfigError(Exception):
"""Exception raised when GCS lifecycle configuration XML is invalid."""
def __init__(self, message):
super(InvalidLifecycleConfigError, self).__init__(message)
self.message = message
# Enum class for resumable upload failure disposition.
class ResumableTransferDisposition(object):
# START_OVER means an attempt to resume an existing transfer failed,
# and a new resumable upload should be attempted (without delay).
START_OVER = 'START_OVER'
# WAIT_BEFORE_RETRY means the resumable transfer failed but that it can
# be retried after a time delay within the current process.
WAIT_BEFORE_RETRY = 'WAIT_BEFORE_RETRY'
# ABORT_CUR_PROCESS means the resumable transfer failed and that
# delaying/retrying within the current process will not help. If
# resumable transfer included a state tracker file the upload can be
# retried again later, in another process (e.g., a later run of gsutil).
ABORT_CUR_PROCESS = 'ABORT_CUR_PROCESS'
# ABORT means the resumable transfer failed in a way that it does not
# make sense to continue in the current process, and further that the
# current tracker ID should not be preserved (in a tracker file if one
# was specified at resumable upload start time). If the user tries again
# later (e.g., a separate run of gsutil) it will get a new resumable
# upload ID.
ABORT = 'ABORT'
class ResumableUploadException(Exception):
"""
Exception raised for various resumable upload problems.
self.disposition is of type ResumableTransferDisposition.
"""
def __init__(self, message, disposition):
super(ResumableUploadException, self).__init__(message, disposition)
self.message = message
self.disposition = disposition
def __repr__(self):
return 'ResumableUploadException("%s", %s)' % (
self.message, self.disposition)
class ResumableDownloadException(Exception):
"""
Exception raised for various resumable download problems.
self.disposition is of type ResumableTransferDisposition.
"""
def __init__(self, message, disposition):
super(ResumableDownloadException, self).__init__(message, disposition)
self.message = message
self.disposition = disposition
def __repr__(self):
return 'ResumableDownloadException("%s", %s)' % (
self.message, self.disposition)
class TooManyRecordsException(Exception):
"""
Exception raised when a search of Route53 records returns more
records than requested.
"""
def __init__(self, message):
super(TooManyRecordsException, self).__init__(message)
self.message = message
class PleaseRetryException(Exception):
"""
Indicates a request should be retried.
"""
def __init__(self, message, response=None):
self.message = message
self.response = response
def __repr__(self):
return 'PleaseRetryException("%s", %s)' % (
self.message,
self.response
)
| mit |
j4s0nh4ck/you-get | src/you_get/extractors/ehow.py | 20 | 1099 | #!/usr/bin/env python
__all__ = ['ehow_download']
from ..common import *
def ehow_download(url, output_dir = '.', merge = True, info_only = False):
assert re.search(r'http://www.ehow.com/video_', url), "URL you entered is not supported"
html = get_html(url)
contentid = r1(r'<meta name="contentid" scheme="DMINSTR2" content="([^"]+)" />', html)
vid = r1(r'"demand_ehow_videoid":"([^"]+)"', html)
assert vid
xml = get_html('http://www.ehow.com/services/video/series.xml?demand_ehow_videoid=%s' % vid)
from xml.dom.minidom import parseString
doc = parseString(xml)
tab = doc.getElementsByTagName('related')[0].firstChild
for video in tab.childNodes:
if re.search(contentid, video.attributes['link'].value):
url = video.attributes['flv'].value
break
title = video.attributes['title'].value
assert title
type, ext, size = url_info(url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([url], title, ext, size, output_dir, merge = merge)
site_info = "ehow.com"
download = ehow_download
download_playlist = playlist_not_supported('ehow') | mit |
Yelp/paasta | setup.py | 1 | 5208 | #!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
from pkg_resources import yield_lines
from setuptools import find_packages
from setuptools import setup
from paasta_tools import __version__
def get_install_requires():
with open("requirements-minimal.txt", "r") as f:
minimal_reqs = list(yield_lines(f.read()))
return minimal_reqs
setup(
name="paasta-tools",
version=__version__,
provides=["paasta_tools"],
author="Compute Infrastructure @ Yelp",
author_email="[email protected]",
description="Tools for Yelps SOA infrastructure",
packages=find_packages(exclude=("tests*", "scripts*")),
include_package_data=True,
install_requires=get_install_requires(),
scripts=[
"paasta_tools/am_i_mesos_leader.py",
"paasta_tools/apply_external_resources.py",
"paasta_tools/autoscale_all_services.py",
"paasta_tools/check_flink_services_health.py",
"paasta_tools/check_cassandracluster_services_replication.py",
"paasta_tools/check_marathon_services_replication.py",
"paasta_tools/check_kubernetes_api.py",
"paasta_tools/check_kubernetes_services_replication.py",
"paasta_tools/check_oom_events.py",
"paasta_tools/check_spark_jobs.py",
"paasta_tools/cleanup_marathon_jobs.py",
"paasta_tools/cleanup_kubernetes_cr.py",
"paasta_tools/cleanup_kubernetes_crd.py",
"paasta_tools/cleanup_kubernetes_jobs.py",
"paasta_tools/delete_kubernetes_deployments.py",
"paasta_tools/deploy_marathon_services",
"paasta_tools/paasta_deploy_tron_jobs",
"paasta_tools/generate_all_deployments",
"paasta_tools/generate_deployments_for_service.py",
"paasta_tools/generate_services_file.py",
"paasta_tools/generate_services_yaml.py",
"paasta_tools/get_mesos_leader.py",
"paasta_tools/kubernetes/bin/paasta_secrets_sync.py",
"paasta_tools/kubernetes/bin/paasta_cleanup_stale_nodes.py",
"paasta_tools/kubernetes/bin/kubernetes_remove_evicted_pods.py",
"paasta_tools/list_marathon_service_instances.py",
"paasta_tools/log_task_lifecycle_events.py",
"paasta_tools/marathon_dashboard.py",
"paasta_tools/monitoring/check_capacity.py",
"paasta_tools/monitoring/check_marathon_has_apps.py",
"paasta_tools/monitoring/check_mesos_active_frameworks.py",
"paasta_tools/monitoring/check_mesos_duplicate_frameworks.py",
"paasta_tools/monitoring/check_mesos_quorum.py",
"paasta_tools/monitoring/check_mesos_outdated_tasks.py",
"paasta_tools/monitoring/kill_orphaned_docker_containers.py",
"paasta_tools/cli/paasta_tabcomplete.sh",
"paasta_tools/paasta_cluster_boost.py",
"paasta_tools/paasta_execute_docker_command.py",
"paasta_tools/paasta_maintenance.py",
"paasta_tools/paasta_metastatus.py",
"paasta_tools/paasta_remote_run.py",
"paasta_tools/setup_kubernetes_job.py",
"paasta_tools/setup_kubernetes_crd.py",
"paasta_tools/setup_kubernetes_cr.py",
"paasta_tools/setup_marathon_job.py",
"paasta_tools/setup_prometheus_adapter_config.py",
"paasta_tools/synapse_srv_namespaces_fact.py",
]
+ glob.glob("paasta_tools/contrib/*.sh")
+ glob.glob("paasta_tools/contrib/[!_]*.py"),
entry_points={
"console_scripts": [
"paasta=paasta_tools.cli.cli:main",
"paasta-api=paasta_tools.api.api:main",
"paasta-deployd=paasta_tools.deployd.master:main",
"paasta-fsm=paasta_tools.cli.fsm_cmd:main",
"paasta_cleanup_tron_namespaces=paasta_tools.cleanup_tron_namespaces:main",
"paasta_list_kubernetes_service_instances=paasta_tools.list_kubernetes_service_instances:main",
"paasta_list_tron_namespaces=paasta_tools.list_tron_namespaces:main",
"paasta_setup_tron_namespace=paasta_tools.setup_tron_namespace:main",
"paasta_cleanup_maintenance=paasta_tools.cleanup_maintenance:main",
"paasta_docker_wrapper=paasta_tools.docker_wrapper:main",
"paasta_firewall_update=paasta_tools.firewall_update:main",
"paasta_firewall_logging=paasta_tools.firewall_logging:main",
"paasta_oom_logger=paasta_tools.oom_logger:main",
"paasta_broadcast_log=paasta_tools.broadcast_log_to_services:main",
"paasta_dump_locally_running_services=paasta_tools.dump_locally_running_services:main",
],
"paste.app_factory": ["paasta-api-config=paasta_tools.api.api:make_app"],
},
)
| apache-2.0 |
ahmed-mahran/hue | desktop/core/ext-py/Pygments-1.3.1/pygments/formatters/terminal.py | 75 | 4012 | # -*- coding: utf-8 -*-
"""
pygments.formatters.terminal
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for terminal output with ANSI sequences.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
from pygments.console import ansiformat
from pygments.util import get_choice_opt
__all__ = ['TerminalFormatter']
#: Map token types to a tuple of color values for light and dark
#: backgrounds.
TERMINAL_COLORS = {
Token: ('', ''),
Whitespace: ('lightgray', 'darkgray'),
Comment: ('lightgray', 'darkgray'),
Comment.Preproc: ('teal', 'turquoise'),
Keyword: ('darkblue', 'blue'),
Keyword.Type: ('teal', 'turquoise'),
Operator.Word: ('purple', 'fuchsia'),
Name.Builtin: ('teal', 'turquoise'),
Name.Function: ('darkgreen', 'green'),
Name.Namespace: ('_teal_', '_turquoise_'),
Name.Class: ('_darkgreen_', '_green_'),
Name.Exception: ('teal', 'turquoise'),
Name.Decorator: ('darkgray', 'lightgray'),
Name.Variable: ('darkred', 'red'),
Name.Constant: ('darkred', 'red'),
Name.Attribute: ('teal', 'turquoise'),
Name.Tag: ('blue', 'blue'),
String: ('brown', 'brown'),
Number: ('darkblue', 'blue'),
Generic.Deleted: ('red', 'red'),
Generic.Inserted: ('darkgreen', 'green'),
Generic.Heading: ('**', '**'),
Generic.Subheading: ('*purple*', '*fuchsia*'),
Generic.Error: ('red', 'red'),
Error: ('_red_', '_red_'),
}
class TerminalFormatter(Formatter):
r"""
Format tokens with ANSI color sequences, for output in a text console.
Color sequences are terminated at newlines, so that paging the output
works correctly.
The `get_style_defs()` method doesn't do anything special since there is
no support for common styles.
Options accepted:
`bg`
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
(default: ``"light"``).
`colorscheme`
A dictionary mapping token types to (lightbg, darkbg) color names or
``None`` (default: ``None`` = use builtin colorscheme).
"""
name = 'Terminal'
aliases = ['terminal', 'console']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.darkbg = get_choice_opt(options, 'bg',
['light', 'dark'], 'light') == 'dark'
self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS
def format(self, tokensource, outfile):
# hack: if the output is a terminal and has an encoding set,
# use that to avoid unicode encode problems
if not self.encoding and hasattr(outfile, "encoding") and \
hasattr(outfile, "isatty") and outfile.isatty():
self.encoding = outfile.encoding
return Formatter.format(self, tokensource, outfile)
def format_unencoded(self, tokensource, outfile):
for ttype, value in tokensource:
color = self.colorscheme.get(ttype)
while color is None:
ttype = ttype[:-1]
color = self.colorscheme.get(ttype)
if color:
color = color[self.darkbg]
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write(ansiformat(color, line))
outfile.write('\n')
if spl[-1]:
outfile.write(ansiformat(color, spl[-1]))
else:
outfile.write(value)
| apache-2.0 |
cm-a7lte/device_samsung_a7lte | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
nrsimha/wagtail | wagtail/wagtailimages/rich_text.py | 9 | 1466 | from wagtail.wagtailimages.models import get_image_model
from wagtail.wagtailimages.formats import get_image_format
class ImageEmbedHandler(object):
"""
ImageEmbedHandler will be invoked whenever we encounter an element in HTML content
with an attribute of data-embedtype="image". The resulting element in the database
representation will be:
<embed embedtype="image" id="42" format="thumb" alt="some custom alt text">
"""
@staticmethod
def get_db_attributes(tag):
"""
Given a tag that we've identified as an image embed (because it has a
data-embedtype="image" attribute), return a dict of the attributes we should
have on the resulting <embed> element.
"""
return {
'id': tag['data-id'],
'format': tag['data-format'],
'alt': tag['data-alt'],
}
@staticmethod
def expand_db_attributes(attrs, for_editor):
"""
Given a dict of attributes from the <embed> tag, return the real HTML
representation.
"""
Image = get_image_model()
try:
image = Image.objects.get(id=attrs['id'])
except Image.DoesNotExist:
return "<img>"
image_format = get_image_format(attrs['format'])
if for_editor:
return image_format.image_to_editor_html(image, attrs['alt'])
else:
return image_format.image_to_html(image, attrs['alt'])
| bsd-3-clause |
WhireCrow/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/root-ralink/usr/lib/python2.7/fpformat.py | 322 | 4699 | """General floating point formatting functions.
Functions:
fix(x, digits_behind)
sci(x, digits_behind)
Each takes a number or a string and a number of digits as arguments.
Parameters:
x: number to be formatted; or a string resembling a number
digits_behind: number of digits behind the decimal point
"""
from warnings import warnpy3k
warnpy3k("the fpformat module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import re
__all__ = ["fix","sci","NotANumber"]
# Compiled regular expression to "decode" a number
decoder = re.compile(r'^([-+]?)0*(\d*)((?:\.\d*)?)(([eE][-+]?\d+)?)$')
# \0 the whole thing
# \1 leading sign or empty
# \2 digits left of decimal point
# \3 fraction (empty or begins with point)
# \4 exponent part (empty or begins with 'e' or 'E')
try:
class NotANumber(ValueError):
pass
except TypeError:
NotANumber = 'fpformat.NotANumber'
def extract(s):
"""Return (sign, intpart, fraction, expo) or raise an exception:
sign is '+' or '-'
intpart is 0 or more digits beginning with a nonzero
fraction is 0 or more digits
expo is an integer"""
res = decoder.match(s)
if res is None: raise NotANumber, s
sign, intpart, fraction, exppart = res.group(1,2,3,4)
if sign == '+': sign = ''
if fraction: fraction = fraction[1:]
if exppart: expo = int(exppart[1:])
else: expo = 0
return sign, intpart, fraction, expo
def unexpo(intpart, fraction, expo):
"""Remove the exponent by changing intpart and fraction."""
if expo > 0: # Move the point left
f = len(fraction)
intpart, fraction = intpart + fraction[:expo], fraction[expo:]
if expo > f:
intpart = intpart + '0'*(expo-f)
elif expo < 0: # Move the point right
i = len(intpart)
intpart, fraction = intpart[:expo], intpart[expo:] + fraction
if expo < -i:
fraction = '0'*(-expo-i) + fraction
return intpart, fraction
def roundfrac(intpart, fraction, digs):
"""Round or extend the fraction to size digs."""
f = len(fraction)
if f <= digs:
return intpart, fraction + '0'*(digs-f)
i = len(intpart)
if i+digs < 0:
return '0'*-digs, ''
total = intpart + fraction
nextdigit = total[i+digs]
if nextdigit >= '5': # Hard case: increment last digit, may have carry!
n = i + digs - 1
while n >= 0:
if total[n] != '9': break
n = n-1
else:
total = '0' + total
i = i+1
n = 0
total = total[:n] + chr(ord(total[n]) + 1) + '0'*(len(total)-n-1)
intpart, fraction = total[:i], total[i:]
if digs >= 0:
return intpart, fraction[:digs]
else:
return intpart[:digs] + '0'*-digs, ''
def fix(x, digs):
"""Format x as [-]ddd.ddd with 'digs' digits after the point
and at least one digit before.
If digs <= 0, the point is suppressed."""
if type(x) != type(''): x = repr(x)
try:
sign, intpart, fraction, expo = extract(x)
except NotANumber:
return x
intpart, fraction = unexpo(intpart, fraction, expo)
intpart, fraction = roundfrac(intpart, fraction, digs)
while intpart and intpart[0] == '0': intpart = intpart[1:]
if intpart == '': intpart = '0'
if digs > 0: return sign + intpart + '.' + fraction
else: return sign + intpart
def sci(x, digs):
"""Format x as [-]d.dddE[+-]ddd with 'digs' digits after the point
and exactly one digit before.
If digs is <= 0, one digit is kept and the point is suppressed."""
if type(x) != type(''): x = repr(x)
sign, intpart, fraction, expo = extract(x)
if not intpart:
while fraction and fraction[0] == '0':
fraction = fraction[1:]
expo = expo - 1
if fraction:
intpart, fraction = fraction[0], fraction[1:]
expo = expo - 1
else:
intpart = '0'
else:
expo = expo + len(intpart) - 1
intpart, fraction = intpart[0], intpart[1:] + fraction
digs = max(0, digs)
intpart, fraction = roundfrac(intpart, fraction, digs)
if len(intpart) > 1:
intpart, fraction, expo = \
intpart[0], intpart[1:] + fraction[:-1], \
expo + len(intpart) - 1
s = sign + intpart
if digs > 0: s = s + '.' + fraction
e = repr(abs(expo))
e = '0'*(3-len(e)) + e
if expo < 0: e = '-' + e
else: e = '+' + e
return s + 'e' + e
def test():
"""Interactive test run."""
try:
while 1:
x, digs = input('Enter (x, digs): ')
print x, fix(x, digs), sci(x, digs)
except (EOFError, KeyboardInterrupt):
pass
| gpl-2.0 |
gautamMalu/rootfs_xen_arndale | usr/lib/python3/dist-packages/dbus/mainloop/glib.py | 10 | 1773 | # Copyright (C) 2004 Anders Carlsson
# Copyright (C) 2004-2006 Red Hat Inc. <http://www.redhat.com/>
# Copyright (C) 2005-2006 Collabora Ltd. <http://www.collabora.co.uk/>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""GLib main loop integration using libdbus-glib."""
__all__ = ('DBusGMainLoop', 'threads_init')
from _dbus_glib_bindings import DBusGMainLoop, gthreads_init
_dbus_gthreads_initialized = False
def threads_init():
"""Initialize threads in dbus-glib, if this has not already been done.
This must be called before creating a second thread in a program that
uses this module.
"""
global _dbus_gthreads_initialized
if not _dbus_gthreads_initialized:
gthreads_init()
_dbus_gthreads_initialized = True
| gpl-2.0 |
traveloka/ansible | lib/ansible/modules/cloud/rackspace/rax_keypair.py | 50 | 5128 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: rax_keypair
short_description: Create a keypair for use with Rackspace Cloud Servers
description:
- Create a keypair for use with Rackspace Cloud Servers
version_added: 1.5
options:
name:
description:
- Name of keypair
required: true
public_key:
description:
- Public Key string to upload. Can be a file path or string
default: null
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
author: "Matt Martz (@sivel)"
notes:
- Keypairs cannot be manipulated, only created and deleted. To "update" a
keypair you must first delete and then recreate.
- The ability to specify a file path for the public key was added in 1.7
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Create a keypair
hosts: localhost
gather_facts: False
tasks:
- name: keypair request
local_action:
module: rax_keypair
credentials: ~/.raxpub
name: my_keypair
region: DFW
register: keypair
- name: Create local public key
local_action:
module: copy
content: "{{ keypair.keypair.public_key }}"
dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}.pub"
- name: Create local private key
local_action:
module: copy
content: "{{ keypair.keypair.private_key }}"
dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}"
- name: Create a keypair
hosts: localhost
gather_facts: False
tasks:
- name: keypair request
local_action:
module: rax_keypair
credentials: ~/.raxpub
name: my_keypair
public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}"
region: DFW
register: keypair
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_keypair(module, name, public_key, state):
changed = False
cs = pyrax.cloudservers
if cs is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
keypair = {}
if state == 'present':
if public_key and os.path.isfile(public_key):
try:
f = open(public_key)
public_key = f.read()
f.close()
except Exception as e:
module.fail_json(msg='Failed to load %s' % public_key)
try:
keypair = cs.keypairs.find(name=name)
except cs.exceptions.NotFound:
try:
keypair = cs.keypairs.create(name, public_key)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
except Exception as e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
keypair = cs.keypairs.find(name=name)
except:
pass
if keypair:
try:
keypair.delete()
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, keypair=rax_to_dict(keypair))
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
public_key=dict(),
state=dict(default='present', choices=['absent', 'present']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
name = module.params.get('name')
public_key = module.params.get('public_key')
state = module.params.get('state')
setup_rax_module(module, pyrax)
rax_keypair(module, name, public_key, state)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
if __name__ == '__main__':
main()
| gpl-3.0 |
PetePriority/home-assistant | homeassistant/components/xiaomi_aqara/binary_sensor.py | 3 | 17451 | """Support for Xiaomi aqara binary sensors."""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.components.xiaomi_aqara import (PY_XIAOMI_GATEWAY,
XiaomiDevice)
from homeassistant.core import callback
from homeassistant.helpers.event import async_call_later
_LOGGER = logging.getLogger(__name__)
NO_CLOSE = 'no_close'
ATTR_OPEN_SINCE = 'Open since'
MOTION = 'motion'
NO_MOTION = 'no_motion'
ATTR_LAST_ACTION = 'last_action'
ATTR_NO_MOTION_SINCE = 'No motion since'
DENSITY = 'density'
ATTR_DENSITY = 'Density'
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Perform the setup for Xiaomi devices."""
devices = []
for (_, gateway) in hass.data[PY_XIAOMI_GATEWAY].gateways.items():
for device in gateway.devices['binary_sensor']:
model = device['model']
if model in ['motion', 'sensor_motion', 'sensor_motion.aq2']:
devices.append(XiaomiMotionSensor(device, hass, gateway))
elif model in ['magnet', 'sensor_magnet', 'sensor_magnet.aq2']:
devices.append(XiaomiDoorSensor(device, gateway))
elif model == 'sensor_wleak.aq1':
devices.append(XiaomiWaterLeakSensor(device, gateway))
elif model in ['smoke', 'sensor_smoke']:
devices.append(XiaomiSmokeSensor(device, gateway))
elif model in ['natgas', 'sensor_natgas']:
devices.append(XiaomiNatgasSensor(device, gateway))
elif model in ['switch', 'sensor_switch',
'sensor_switch.aq2', 'sensor_switch.aq3',
'remote.b1acn01']:
if 'proto' not in device or int(device['proto'][0:1]) == 1:
data_key = 'status'
else:
data_key = 'button_0'
devices.append(XiaomiButton(device, 'Switch', data_key,
hass, gateway))
elif model in ['86sw1', 'sensor_86sw1', 'sensor_86sw1.aq1',
'remote.b186acn01']:
if 'proto' not in device or int(device['proto'][0:1]) == 1:
data_key = 'channel_0'
else:
data_key = 'button_0'
devices.append(XiaomiButton(device, 'Wall Switch', data_key,
hass, gateway))
elif model in ['86sw2', 'sensor_86sw2', 'sensor_86sw2.aq1',
'remote.b286acn01']:
if 'proto' not in device or int(device['proto'][0:1]) == 1:
data_key_left = 'channel_0'
data_key_right = 'channel_1'
else:
data_key_left = 'button_0'
data_key_right = 'button_1'
devices.append(XiaomiButton(device, 'Wall Switch (Left)',
data_key_left, hass, gateway))
devices.append(XiaomiButton(device, 'Wall Switch (Right)',
data_key_right, hass, gateway))
devices.append(XiaomiButton(device, 'Wall Switch (Both)',
'dual_channel', hass, gateway))
elif model in ['cube', 'sensor_cube', 'sensor_cube.aqgl01']:
devices.append(XiaomiCube(device, hass, gateway))
elif model in ['vibration', 'vibration.aq1']:
devices.append(XiaomiVibration(device, 'Vibration',
'status', gateway))
else:
_LOGGER.warning('Unmapped Device Model %s', model)
add_entities(devices)
class XiaomiBinarySensor(XiaomiDevice, BinarySensorDevice):
"""Representation of a base XiaomiBinarySensor."""
def __init__(self, device, name, xiaomi_hub, data_key, device_class):
"""Initialize the XiaomiSmokeSensor."""
self._data_key = data_key
self._device_class = device_class
self._should_poll = False
self._density = 0
XiaomiDevice.__init__(self, device, name, xiaomi_hub)
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return self._should_poll
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
@property
def device_class(self):
"""Return the class of binary sensor."""
return self._device_class
def update(self):
"""Update the sensor state."""
_LOGGER.debug('Updating xiaomi sensor (%s) by polling', self._sid)
self._get_from_hub(self._sid)
class XiaomiNatgasSensor(XiaomiBinarySensor):
"""Representation of a XiaomiNatgasSensor."""
def __init__(self, device, xiaomi_hub):
"""Initialize the XiaomiSmokeSensor."""
self._density = None
XiaomiBinarySensor.__init__(self, device, 'Natgas Sensor', xiaomi_hub,
'alarm', 'gas')
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_DENSITY: self._density}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
if DENSITY in data:
self._density = int(data.get(DENSITY))
value = data.get(self._data_key)
if value is None:
return False
if value in ('1', '2'):
if self._state:
return False
self._state = True
return True
if value == '0':
if self._state:
self._state = False
return True
return False
class XiaomiMotionSensor(XiaomiBinarySensor):
"""Representation of a XiaomiMotionSensor."""
def __init__(self, device, hass, xiaomi_hub):
"""Initialize the XiaomiMotionSensor."""
self._hass = hass
self._no_motion_since = 0
self._unsub_set_no_motion = None
if 'proto' not in device or int(device['proto'][0:1]) == 1:
data_key = 'status'
else:
data_key = 'motion_status'
XiaomiBinarySensor.__init__(self, device, 'Motion Sensor', xiaomi_hub,
data_key, 'motion')
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_NO_MOTION_SINCE: self._no_motion_since}
attrs.update(super().device_state_attributes)
return attrs
@callback
def _async_set_no_motion(self, now):
"""Set state to False."""
self._unsub_set_no_motion = None
self._state = False
self.async_schedule_update_ha_state()
def parse_data(self, data, raw_data):
"""Parse data sent by gateway.
Polling (proto v1, firmware version 1.4.1_159.0143)
>> { "cmd":"read","sid":"158..."}
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'read_ack', 'data': '{"voltage":3005}'}
Multicast messages (proto v1, firmware version 1.4.1_159.0143)
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'report', 'data': '{"status":"motion"}'}
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'report', 'data': '{"no_motion":"120"}'}
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'report', 'data': '{"no_motion":"180"}'}
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'report', 'data': '{"no_motion":"300"}'}
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'heartbeat', 'data': '{"voltage":3005}'}
"""
if raw_data['cmd'] == 'heartbeat':
_LOGGER.debug(
'Skipping heartbeat of the motion sensor. '
'It can introduce an incorrect state because of a firmware '
'bug (https://github.com/home-assistant/home-assistant/pull/'
'11631#issuecomment-357507744).')
return
if NO_MOTION in data:
self._no_motion_since = data[NO_MOTION]
self._state = False
return True
value = data.get(self._data_key)
if value is None:
return False
if value == MOTION:
if self._data_key == 'motion_status':
if self._unsub_set_no_motion:
self._unsub_set_no_motion()
self._unsub_set_no_motion = async_call_later(
self._hass,
120,
self._async_set_no_motion
)
if self.entity_id is not None:
self._hass.bus.fire('xiaomi_aqara.motion', {
'entity_id': self.entity_id
})
self._no_motion_since = 0
if self._state:
return False
self._state = True
return True
class XiaomiDoorSensor(XiaomiBinarySensor):
"""Representation of a XiaomiDoorSensor."""
def __init__(self, device, xiaomi_hub):
"""Initialize the XiaomiDoorSensor."""
self._open_since = 0
if 'proto' not in device or int(device['proto'][0:1]) == 1:
data_key = 'status'
else:
data_key = 'window_status'
XiaomiBinarySensor.__init__(self, device, 'Door Window Sensor',
xiaomi_hub, data_key, 'opening')
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_OPEN_SINCE: self._open_since}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
self._should_poll = False
if NO_CLOSE in data: # handle push from the hub
self._open_since = data[NO_CLOSE]
return True
value = data.get(self._data_key)
if value is None:
return False
if value == 'open':
self._should_poll = True
if self._state:
return False
self._state = True
return True
if value == 'close':
self._open_since = 0
if self._state:
self._state = False
return True
return False
class XiaomiWaterLeakSensor(XiaomiBinarySensor):
"""Representation of a XiaomiWaterLeakSensor."""
def __init__(self, device, xiaomi_hub):
"""Initialize the XiaomiWaterLeakSensor."""
if 'proto' not in device or int(device['proto'][0:1]) == 1:
data_key = 'status'
else:
data_key = 'wleak_status'
XiaomiBinarySensor.__init__(self, device, 'Water Leak Sensor',
xiaomi_hub, data_key, 'moisture')
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
self._should_poll = False
value = data.get(self._data_key)
if value is None:
return False
if value == 'leak':
self._should_poll = True
if self._state:
return False
self._state = True
return True
if value == 'no_leak':
if self._state:
self._state = False
return True
return False
class XiaomiSmokeSensor(XiaomiBinarySensor):
"""Representation of a XiaomiSmokeSensor."""
def __init__(self, device, xiaomi_hub):
"""Initialize the XiaomiSmokeSensor."""
self._density = 0
XiaomiBinarySensor.__init__(self, device, 'Smoke Sensor', xiaomi_hub,
'alarm', 'smoke')
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_DENSITY: self._density}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
if DENSITY in data:
self._density = int(data.get(DENSITY))
value = data.get(self._data_key)
if value is None:
return False
if value in ('1', '2'):
if self._state:
return False
self._state = True
return True
if value == '0':
if self._state:
self._state = False
return True
return False
class XiaomiVibration(XiaomiBinarySensor):
"""Representation of a Xiaomi Vibration Sensor."""
def __init__(self, device, name, data_key, xiaomi_hub):
"""Initialize the XiaomiVibration."""
self._last_action = None
super().__init__(device, name, xiaomi_hub, data_key, None)
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_LAST_ACTION: self._last_action}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
value = data.get(self._data_key)
if value is None:
return False
if value not in ('vibrate', 'tilt', 'free_fall'):
_LOGGER.warning("Unsupported movement_type detected: %s",
value)
return False
self.hass.bus.fire('xiaomi_aqara.movement', {
'entity_id': self.entity_id,
'movement_type': value
})
self._last_action = value
return True
class XiaomiButton(XiaomiBinarySensor):
"""Representation of a Xiaomi Button."""
def __init__(self, device, name, data_key, hass, xiaomi_hub):
"""Initialize the XiaomiButton."""
self._hass = hass
self._last_action = None
XiaomiBinarySensor.__init__(self, device, name, xiaomi_hub,
data_key, None)
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_LAST_ACTION: self._last_action}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
value = data.get(self._data_key)
if value is None:
return False
if value == 'long_click_press':
self._state = True
click_type = 'long_click_press'
elif value == 'long_click_release':
self._state = False
click_type = 'hold'
elif value == 'click':
click_type = 'single'
elif value == 'double_click':
click_type = 'double'
elif value == 'both_click':
click_type = 'both'
elif value == 'double_both_click':
click_type = 'double_both'
elif value == 'shake':
click_type = 'shake'
elif value == 'long_click':
click_type = 'long'
elif value == 'long_both_click':
click_type = 'long_both'
else:
_LOGGER.warning("Unsupported click_type detected: %s", value)
return False
self._hass.bus.fire('xiaomi_aqara.click', {
'entity_id': self.entity_id,
'click_type': click_type
})
self._last_action = click_type
return True
class XiaomiCube(XiaomiBinarySensor):
"""Representation of a Xiaomi Cube."""
def __init__(self, device, hass, xiaomi_hub):
"""Initialize the Xiaomi Cube."""
self._hass = hass
self._last_action = None
self._state = False
if 'proto' not in device or int(device['proto'][0:1]) == 1:
data_key = 'status'
else:
data_key = 'cube_status'
XiaomiBinarySensor.__init__(self, device, 'Cube', xiaomi_hub,
data_key, None)
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_LAST_ACTION: self._last_action}
attrs.update(super().device_state_attributes)
return attrs
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
if self._data_key in data:
self._hass.bus.fire('xiaomi_aqara.cube_action', {
'entity_id': self.entity_id,
'action_type': data[self._data_key]
})
self._last_action = data[self._data_key]
if 'rotate' in data:
self._hass.bus.fire('xiaomi_aqara.cube_action', {
'entity_id': self.entity_id,
'action_type': 'rotate',
'action_value': float(data['rotate'].replace(",", "."))
})
self._last_action = 'rotate'
if 'rotate_degree' in data:
self._hass.bus.fire('xiaomi_aqara.cube_action', {
'entity_id': self.entity_id,
'action_type': 'rotate',
'action_value': float(data['rotate_degree'].replace(",", "."))
})
self._last_action = 'rotate'
return True
| apache-2.0 |
Lyleo/nupic | nupic/datafiles/extra/firstOrder/raw/makeDataset.py | 17 | 3488 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Generate artificial datasets
"""
import numpy
from nupic.data.file import File
def createFirstOrderModel(numCategories=5, alpha=0.5):
categoryList = ['cat%02d' % i for i in range(numCategories)]
initProbability = numpy.ones(numCategories)/numCategories
transitionTable = numpy.random.dirichlet(alpha=[alpha]*numCategories,
size=numCategories)
return categoryList, initProbability, transitionTable
def generateFirstOrderData(model, numIterations=10000, seqLength=5,
resets=True, suffix='train'):
print "Creating %d iteration file with seqLength %d" % (numIterations, seqLength)
print "Filename",
categoryList, initProbability, transitionTable = model
initProbability = initProbability.cumsum()
transitionTable = transitionTable.cumsum(axis=1)
outputFile = 'fo_%d_%d_%s.csv' % (numIterations, seqLength, suffix)
print "Filename", outputFile
fields = [('reset', 'int', 'R'), ('name', 'string', '')]
o = File(outputFile, fields)
seqIdx = 0
rand = numpy.random.rand()
catIdx = numpy.searchsorted(initProbability, rand)
for i in xrange(numIterations):
rand = numpy.random.rand()
if seqIdx == 0 and resets:
catIdx = numpy.searchsorted(initProbability, rand)
reset = 1
else:
catIdx = numpy.searchsorted(transitionTable[catIdx], rand)
reset = 0
o.write([reset,categoryList[catIdx]])
seqIdx = (seqIdx+1)%seqLength
o.close()
if __name__=='__main__':
numpy.random.seed(1956)
model = createFirstOrderModel()
categoryList = model[0]
categoryFile = open("categories.txt", 'w')
for category in categoryList:
categoryFile.write(category+'\n')
categoryFile.close()
#import pylab
#pylab.imshow(model[2], interpolation='nearest')
#pylab.show()
for resets in [True, False]:
for seqLength in [2, 10]:
for numIterations in [1000, 10000, 100000]:
generateFirstOrderData(model,
numIterations=numIterations,
seqLength=seqLength,
resets=resets,
suffix='train_%s' % ('resets' if resets else 'noresets',))
generateFirstOrderData(model, numIterations=10000, seqLength=seqLength,
resets=resets,
suffix='test_%s' % ('resets' if resets else 'noresets',))
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.