repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
pixelated-project/pixelated-user-agent | service/test/functional/features/page_objects/backup_account_page.py | 2 | 1111 | #
# Copyright (c) 2017 ThoughtWorks, Inc.
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
from base_page import BasePage
class BackupAccountPage(BasePage):
def __init__(self, context):
super(BackupAccountPage, self).__init__(context, context.backup_account_url)
self._locators = {
'logout_button': 'button[name="logout"]'
}
def logout(self):
logout_button = self.find_element_by_css_selector(self._locators['logout_button'])
logout_button.click()
| agpl-3.0 | -5,102,962,663,372,319,000 | 36.033333 | 90 | 0.715572 | false |
gnowledge/ncert_nroer | objectapp/cnlgb.py | 3 | 21411 | from objectapp.models import *
from gstudio.models import *
from django.template.defaultfilters import slugify
import inflect
def get_cnlgb_list(self):
z = []
k = get_lex_sentence(self)
if not k:
pass
else:
z.extend(k)
l = get_lex_sentence_optional(self)
if not l:
pass
else:
z.extend(l)
m = get_CNL_dependency(self)
if not m:
pass
else:
z.extend(m)
n = membership_sentence(self)
if not n:
pass
else:
z.extend(n)
o = get_attr_sentence(self)
if not o:
pass
else:
z.extend(o)
rel = get_rel(self)
if not rel:
pass
else:
z.extend(rel)
return z
def advanced_cnlgb(self):
zz=[]
p = get_CNL_sentence_authors(self)
if not p:
pass
else:
zz.extend(p)
"""Generates CNL sentence for RT/R"""
rt = get_RT_sentence(self)
if not rt:
pass
else:
zz.extend(rt)
return zz
def get_lex_sentence(self):
if not self.ref.__class__.__name__ is 'Gbobject':
pass
else:
cns=self.get_nbh
d=[]
for k in cns:
if k=='title':
title = str(cns['title'])
title_slug = slugify(title)
if not cns[k]:
pass
else:
g=str(title_slug)+" is a proper-noun."
d.append(g.capitalize())
return d
def get_lex_sentence_optional(self):
if not self.ref.__class__.__name__ is 'Gbobject':
pass
else:
cns=self.get_nbh
title = str(cns['title'])
title_slug = slugify(title)
d=[]
for k in cns:
if k =='altnames':
alt_name = str(cns['altnames'])
alt_slug = slugify(alt_name)
if not cns[k]:
pass
else:
i=str(cns[k])+" is an alternate name for "+str(title_slug)+"."
d.append(i.capitalize())
elif k == 'plural':
if not cns[k]:
pass
else:
pl = str(cns[k])
pl_slug = slugify(pl)
m = str(pl_slug)+" is a plural of "+str(title_slug)+"."
d.append(m.capitalize())
return d
def get_CNL_sentence_authors(self):
if not self.ref.__class__.__name__ is 'Gbobject':
pass
else:
title = self.title
title_slug = slugify(title)
d=[]
if self.authors.all():
auth = []
auth = self.authors.all()
len_auth=len(auth)
if len_auth == 1:
for each in auth:
aut=slugify(each)
e=str(aut).title()+" is an author to "+str(title_slug).title()+"."
d.append(e)
else:
##print "len not 1"
y=[]
for each in self.authors.all():
a=each
a_slug=slugify(a)
y.append(a_slug)
for e_i in y:
if y.index(e_i) == 0:
sen = str(e_i)
else:
sen = str(sen)+" and "+str(e_i)
e = str(sen).title()+" are all chosen authors to "+str(title_slug).title()+"."
d.append(e)
return d
def get_CNL_dependency(self):
if not self.ref.__class__.__name__ is 'Gbobject':
pass
else:
title = self.title
title_slug = slugify(title)
d=[]
if self.prior_nodes.all():
p_n_a = []
p_n_a = self.prior_nodes.all()
len_pna=len(p_n_a)
if len_pna == 1:
for each in p_n_a:
pn=slugify(each)
h=str(pn).title()+" is a prior_node for "+str(title_slug).title()+"."+str(title_slug).title()+" depends upon "+str(pn).title()+"."
d.append(h)
else:
sen = dependency_plural(p_n_a)
h = str(sen)+". It is the prior_node and required for the meaning of "+str(title_slug).title()+"."
d.append(h)
if self.posterior_nodes.all():
p_n_a = []
p_n_a = self.posterior_nodes.all()
len_pna=len(p_n_a)
if len_pna == 1:
for each in p_n_a:
pn = slugify(each)
p = str(pn).title()+" is a posterior_node for "+str(title_slug).title()+"."+str(title_slug).title()+" is required for the meaning of "+str(pn).title()+"."
d.append(p)
else:
sen = dependency_plural(p_n_a)
p = str(sen)+". It is the posterior_node and depends on the meaning of "+str(title_slug).title()+"."
d.append(p)
return d
#Generates dependency sentence for plural
def dependency_plural(p_n_a):
for each in p_n_a:
a = each
each_r = each.ref.__class__.__name__
a_slug = slugify(a)
y=[]
for a_slug in p_n_a:
if len(y) == 0:
#print "If Y is empty, for first item"
if each_r == 'Relationtype':
b_slug = str(a_slug).title()+" is an adjective"
else:
b_slug = str(a_slug).title()+" is a proper-noun"
y.append(b_slug)
else:
if each_r != 'Relationtype':
#print "Its not a relation_type, but a proper-noun"
aa_slug = str(a_slug).title()+" is a proper-noun"
y.append(aa_slug)
else:
#print "It is a relationtype"
ab_slug = str(a_slug).title()+" is an adjective"
y.append(ab_slug)
for e_i in y:
if y.index(e_i) == 0:
sen = str(e_i)
else:
sen = str(sen)+" and "+str(e_i)
return sen
def membership_sentence(self):
"""Returns CNL sentences for membership"""
if not self.ref.__class__.__name__ is 'Gbobject':
pass
else:
cns=self.get_nbh
title = str(cns['title'])
title_slug = slugify(title)
d=[]
for k in cns:
if self.ref.__class__.__name__ is 'Gbobject':
if k=='member_of':
if not cns[k]:
pass
else:
cmm = []
cmm = self.objecttypes.all()
len_cmm=len(cmm)
if len_cmm == 1:
for each in cmm:
cm=slugify(each)
j=str(title_slug)+" is a member of a "+str(cm)+"."
d.append(j.capitalize())
else:
#print "len not 1"
y=[]
for each in self.objecttypes.all():
a=each
a_slug=slugify(a)
y.append(a_slug)
for e_i in y:
if y.index(e_i) == 0:
sen = str(e_i)
else:
sen = str(sen)+" and a "+str(e_i)
j= str(title_slug)+" is a member of a "+sen+"."
d.append(j.capitalize())
return d
#Returns attributes for the given Gbobject
def get_attr_sentence(self):
if not self.ref.__class__.__name__ is 'Gbobject':
pass
else:
at = Gbobject.get_attributes(self)
if at:
a = []
title = self.title
for k,v in at.iteritems():
attr = k
for each in v:
value = each
sen = "The "+str(attr)+" of "+str(title)+" is "+str(value)+"."
a.append(sen)
return a
def get_list_relation(self, lr):
"""Returns the list of relations"""
lst = []
gbr = self.get_relations1
if not gbr:
pass
else:
for k,v in gbr.iteritems():
if k == 'lrelations':
val_l = v
if k == 'rrelations':
val_r = v
if lr == 0:
return val_l
elif lr == 1:
return val_r
def get_CNL_sentence_RT(self, lst, rst, detail_level):
if self.ref.__class__.__name__ is 'Relationtype':
core = [] #core data list
core_t = []
core_i = []
reflexive = []
adv = [] # advanced data list
title=self.title
title_slug=slugify(title)
inverse=self.inverse
inverse_slug=slugify(inverse)
is_symmetrical=self.is_symmetrical
is_reflexive=self.is_reflexive
llist = []
rlist = []
llist = lst
rlist = rst
#Flag variable that checks if plural-left,right
plural_l = 0
plural_r = 0
if isinstance(llist,list):
"""If llist is a list"""
ll = []
for each in llist:
if each.ref.__class__.__name__ is not 'Gbobject':
"""Common-noun"""
lst = "a "+str(each).lower()
else:
"""Proper-noun"""
lst = str(each).title()
ll.append(lst)
if len(ll) == 1:
for e in ll:
left_subtype = e
else:
"""Since left-ST is plural, flag value assigned 1 """
plural_l = 1
for e in ll:
if ll.index(e)==0:
sen = str(e)
else:
sen = str(sen)+" and "+str(e)
left_subtype = sen
else:
"""If llist is not a list"""
if lst.ref.__class__.__name__ is 'Gbobject':
left_subtype = lst
else:
left_subtype = "a "+str(lst)
if isinstance(rlist,list):
"""If rlist is a list"""
rl = []
#print "Is a list"
for each in rlist:
if each.ref.__class__.__name__ is not 'Gbobject':
"""Common-noun"""
rst = "a "+str(each)
else:
rst = each
rl.append(rst)
if len(rl) == 1:
for e in rl:
right_subtype = e
else:
"""Since right-ST is plural, flag value assigned 1 """
plural_r = 1
for e in rl:
if rl.index(e)==0:
sen = str(e)
else:
sen = str(sen)+" and "+str(e)
right_subtype = sen
else:
"""If Rlist is not a list"""
if rst.ref.__class__.__name__ is 'Gbobject':
right_subtype = rst
else:
right_subtype = "a "+str(rst)
#Core sentence - title
rel = rel_CNL(self, left_subtype, right_subtype, plural_l)
core_t.extend(rel)
rlex = rel_lex_sentence(self)
adv.extend(rlex)
app_NT = get_app_NT(self)
adv.extend(app_NT)
st = get_RT_subjecttype(self, left_subtype, right_subtype)
adv.extend(st)
#Is symmetrical
if is_symmetrical:
symm = is_symmetrical_RT(self, left_subtype, right_subtype, plural_r)
core_i.extend(symm)
else:
asymm = is_asymmetrical_RT(self, left_subtype, right_subtype, plural_r)
core_i.extend(asymm)
#Is reflexive
if not is_reflexive:
pass
else:
if detail_level == 1 or plural_l == 1:
st = right_subtype
else:
st = left_subtype
is_refl = is_reflexive_sentence(self, st)
reflexive.extend(is_refl)
if detail_level==0:
#Title,Reflexive
for e in core_t:
a = e
reflexive.insert(0, a)
return reflexive
elif detail_level==1:
#Inverse,reflexive
for e in core_i:
a = e
reflexive.insert(0, a)
return reflexive
elif detail_level==2:
#Title, Inverse & Reflexive
core.extend(core_t)
core.extend(core_i)
core.extend(reflexive)
return core
elif detail_level==3:
#Return advanced grammatical information
return adv
elif detail_level==4:
#Return all info - Core & Advanced Info
newlist = []
newlist.extend(core)
newlist.extend(adv)
return newlist
#Checks if RT-title is a transitive verb finite singular or an iterative adjective
def istv_title(self):
p = inflect.engine()
from django.template.defaultfilters import slugify
destination = open( "/home/user/gnowsys-studio/demo/aFile.pl", "r+" )
f = destination.read()
a_t = self.title
a = slugify(a_t)
if '-' not in a:
if a[-1] == 's':
a_s = p.singular_noun(a)
a_lex = "tv_finsg("+a+", "+str(a_s)+")."
strpos = f.find(a_lex)
if strpos != -1:
return True
else:
return False
#Checks if RT-inverse is a transitive verb finite singular or an iterative adjective
def istv_inverse(self):
p = inflect.engine()
destination = open( "/home/user/gnowsys-studio/demo/aFile.pl", "r+" )
f = destination.read()
a_t = self.inverse
a = slugify(a_t)
if '-' not in a:
if a[-1] == 's':
a_s = p.singular_noun(a)
a_lex = "tv_finsg("+a+", "+str(a_s)+")."
strpos = f.find(a_lex)
if strpos != -1:
return True
else:
return False
def rel_CNL(self, left_subtype, right_subtype, plural_l):
"""To generate sentence for relation"""
title = self.title
title_slug = slugify(title)
if self.ref.__class__.__name__ is 'Relationtype':
rel = []
if istv_title(self):
st = str(left_subtype)+" "+str(title).lower()+" "+str(right_subtype)+"."
else:
if plural_l == 0:
st = str(left_subtype)+" is "+str(title).lower()+" "+str(right_subtype)+"."
elif plural_l == 1:
st = str(left_subtype)+" are "+str(title).lower()+" "+str(right_subtype)+"."
rel.append(st)
return rel
def is_reflexive_sentence(self, st):
"""Generates reflexive sentence"""
refl = []
title = self.title
title_slug = slugify(title)
if istv_title(self):
j= "It is a reflexive sentence. "+str(st).title()+" "+str(title)+" "+str(st)+"."
else:
j= "It is a reflexive sentence. "+str(st).title()+" is "+str(title)+" "+str(st)+"."
refl.append(j)
return refl
def is_symmetrical_RT(self, left_subtype, right_subtype, plural_r):
"""Generates CNL Sentence for Relation/RT if symmetrical"""
symm = []
title = self.title
title_slug = slugify(title)
#if (type_lst == 'Gbobject' and type_rst == 'Gbobject'):
if istv_title(self):
g = str(right_subtype).title()+" "+str(title)+" "+str(left_subtype).title()+"."
else:
if plural_r == 0:
g = str(right_subtype).title()+" is "+str(title)+" "+str(left_subtype).title()+"."
else:
g = str(right_subtype).title()+" are "+str(title)+" "+str(left_subtype).title()+"."
symm.append(g)
return symm
def is_asymmetrical_RT(self, left_subtype, right_subtype, plural_r):
"""Generates CNL Sentence for Relation/RT if symmetrical"""
asymm = []
inverse = self.inverse
inverse_slug = slugify(inverse)
if istv_inverse(self):
g = str(right_subtype).title()+" "+str(inverse)+" "+str(left_subtype).title()+"."
else:
if plural_r == 0:
g = str(right_subtype).title()+" is "+str(inverse)+" "+str(left_subtype).title()+"."
elif plural_r == 1:
g = str(right_subtype).title()+" is "+str(inverse)+" "+str(left_subtype).title()+"."
asymm.append(g)
return asymm
def rel_lex_sentence(self):
"""Generates RT's title & inverse sentence"""
if self.ref.__class__.__name__ is 'Relationtype':
rlex = []
title=self.title
title_slug=slugify(title)
inverse=self.inverse
inverse_slug=slugify(inverse)
h="A relation_type's title is "+str(title_slug)+"."
rlex.append(h.capitalize())
if (title==inverse):
b="Its title and its inverse are equal."
else:
b="Its inverse is "+str(inverse_slug)+"."
rlex.append(b.capitalize())
return rlex
def get_app_NT(self):
"""Generates CNL Sentences for left & right applicable NT for RT"""
if self.ref.__class__.__name__ is 'Relationtype':
a = []
l_app = self.left_applicable_nodetypes
r_app=self.right_applicable_nodetypes
e = "Its left_applicable_nodetype is "+str(l_app).upper()+"."
a.append(e)
f = "Its right_applicable_nodetype is "+str(r_app).upper()+"."
a.append(f)
return a
def get_RT_sentence(self):
#Generates CNL Sentences in RT in detail
if self.ref.__class__.__name__ is 'Relationtype':
sentence = get_CNL_sentence_RT(self, self.left_subjecttype, self.right_subjecttype, 4)
return sentence
#Generates CNL sentences in Relation in detail
elif self.ref.__class__.__name__ is 'Relation':
sentence = get_CNL_sentence_RT(self.relationtype, self.left_subject, self.right_subject, 4)
return sentence
#Get relations for the Gbobject:Singular, plural
def get_rel(self):
sen = []
sentence = []
lr = Relation.objects.filter(left_subject = self.id)
rr = Relation.objects.filter(right_subject = self.id)
if lr:
"""List which stores each right subject"""
lst = get_list_relation(self, 0)
for k,v in lst.iteritems():
rel = Relationtype.objects.filter(title = k)
val = v
for rt in rel:
sen = get_CNL_sentence_RT(rt, self, val, 0)
sentence.extend(sen)
if rr:
"""List which stores each left subject"""
lst = get_list_relation(self, 1)
for k,v in lst.iteritems():
rel = Relationtype.objects.filter(inverse = k)
val = v
for rt in rel:
sen = get_CNL_sentence_RT(rt, val, self,1)
sentence.extend(sen)
return sentence
def get_RT_subjecttype(self, left_subtype, right_subtype):
"""Returns CNL sentence of left & right subject type of RT"""
if self.ref.__class__.__name__ is 'Relationtype':
st = []
ce = "Its left_subjecttype is "+str(left_subtype)+"."
c = ce.capitalize()
st.append(c)
de = "Its right_subjecttype is "+str(right_subtype)+"."
d = de.capitalize()
st.append(d)
return st
else:
pass
| agpl-3.0 | 7,259,495,591,314,798,000 | 32.824645 | 177 | 0.429499 | false |
dbbhattacharya/kitsune | vendor/packages/logilab-common/optparser.py | 6 | 3225 | # -*- coding: utf-8 -*-
# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Extend OptionParser with commands.
Example:
>>> parser = OptionParser()
>>> parser.usage = '%prog COMMAND [options] <arg> ...'
>>> parser.add_command('build', 'mymod.build')
>>> parser.add_command('clean', run_clean, add_opt_clean)
>>> run, options, args = parser.parse_command(sys.argv[1:])
>>> return run(options, args[1:])
With mymod.build that defines two functions run and add_options
"""
__docformat__ = "restructuredtext en"
# XXX merge with optik_ext ? merge with clcommands ?
import sys
import optparse
class OptionParser(optparse.OptionParser):
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(self, *args, **kwargs)
self._commands = {}
self.min_args, self.max_args = 0, 1
def add_command(self, name, mod_or_funcs, help=''):
"""name of the command
name of module or tuple of functions (run, add_options)
"""
assert isinstance(mod_or_funcs, str) or isinstance(mod_or_funcs, tuple), \
"mod_or_funcs has to be a module name or a tuple of functions"
self._commands[name] = (mod_or_funcs, help)
def print_main_help(self):
optparse.OptionParser.print_help(self)
print '\ncommands:'
for cmdname, (_, help) in self._commands.items():
print '% 10s - %s' % (cmdname, help)
def parse_command(self, args):
if len(args) == 0:
self.print_main_help()
sys.exit(1)
cmd = args[0]
args = args[1:]
if cmd not in self._commands:
if cmd in ('-h', '--help'):
self.print_main_help()
sys.exit(0)
elif self.version is not None and cmd == "--version":
self.print_version()
sys.exit(0)
self.error('unknown command')
self.prog = '%s %s' % (self.prog, cmd)
mod_or_f, help = self._commands[cmd]
# optparse inserts self.description between usage and options help
self.description = help
if isinstance(mod_or_f, str):
exec 'from %s import run, add_options' % mod_or_f
else:
run, add_options = mod_or_f
add_options(self)
(options, args) = self.parse_args(args)
if not (self.min_args <= len(args) <= self.max_args):
self.error('incorrect number of arguments')
return run, options, args
| bsd-3-clause | 2,284,299,466,963,705,900 | 34.054348 | 82 | 0.631628 | false |
agilemobiledev/spiderfoot | ext/dns/rdtypes/ANY/NSEC3PARAM.py | 85 | 3169 | # Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import struct
import dns.exception
import dns.rdata
class NSEC3PARAM(dns.rdata.Rdata):
"""NSEC3PARAM record
@ivar algorithm: the hash algorithm number
@type algorithm: int
@ivar flags: the flags
@type flags: int
@ivar iterations: the number of iterations
@type iterations: int
@ivar salt: the salt
@type salt: string"""
__slots__ = ['algorithm', 'flags', 'iterations', 'salt']
def __init__(self, rdclass, rdtype, algorithm, flags, iterations, salt):
super(NSEC3PARAM, self).__init__(rdclass, rdtype)
self.algorithm = algorithm
self.flags = flags
self.iterations = iterations
self.salt = salt
def to_text(self, origin=None, relativize=True, **kw):
if self.salt == '':
salt = '-'
else:
salt = self.salt.encode('hex-codec')
return '%u %u %u %s' % (self.algorithm, self.flags, self.iterations, salt)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
algorithm = tok.get_uint8()
flags = tok.get_uint8()
iterations = tok.get_uint16()
salt = tok.get_string()
if salt == '-':
salt = ''
else:
salt = salt.decode('hex-codec')
return cls(rdclass, rdtype, algorithm, flags, iterations, salt)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
l = len(self.salt)
file.write(struct.pack("!BBHB", self.algorithm, self.flags,
self.iterations, l))
file.write(self.salt)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(algorithm, flags, iterations, slen) = struct.unpack('!BBHB',
wire[current : current + 5])
current += 5
rdlen -= 5
salt = wire[current : current + slen].unwrap()
current += slen
rdlen -= slen
if rdlen != 0:
raise dns.exception.FormError
return cls(rdclass, rdtype, algorithm, flags, iterations, salt)
from_wire = classmethod(from_wire)
def _cmp(self, other):
b1 = cStringIO.StringIO()
self.to_wire(b1)
b2 = cStringIO.StringIO()
other.to_wire(b2)
return cmp(b1.getvalue(), b2.getvalue())
| gpl-2.0 | 8,172,667,477,668,317,000 | 35.011364 | 89 | 0.623541 | false |
ilastikdev/ilastik | ilastik/applets/objectClassification/opObjectClassification.py | 1 | 59400 | ###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
import numpy
import numpy.lib.recfunctions as rfn
import vigra
import time
import warnings
import itertools
from collections import defaultdict
from functools import partial
from lazyflow.graph import Operator, InputSlot, OutputSlot
from lazyflow.stype import Opaque
from lazyflow.rtype import List
from lazyflow.operators import OpValueCache, OpSlicedBlockedArrayCache, OperatorWrapper, OpMultiArrayStacker
from lazyflow.request import Request, RequestPool, RequestLock
from lazyflow.classifiers import ParallelVigraRfLazyflowClassifierFactory, ParallelVigraRfLazyflowClassifier
from ilastik.utility import OperatorSubView, MultiLaneOperatorABC, OpMultiLaneWrapper
from ilastik.utility.mode import mode
from ilastik.applets.objectExtraction.opObjectExtraction import default_features_key
from ilastik.applets.objectExtraction.opObjectExtraction import OpObjectExtraction
from ilastik.applets.base.applet import DatasetConstraintError
import logging
logger = logging.getLogger(__name__)
MISSING_VALUE = 0
class OpObjectClassification(Operator, MultiLaneOperatorABC):
"""The top-level operator for object classification.
Most functionality is handled by specialized operators such as
OpObjectTrain and OpObjectPredict.
Also transfers existing labels if the upstream object segmentation
changes. The transfer is conservative: labels only get transfered
from an old object to a new object if they overlap sufficiently,
and the label does not overlap with other objects.
"""
name = "OpObjectClassification"
category = "Top-level"
###############
# Input slots #
###############
BinaryImages = InputSlot(level=1) # for visualization
RawImages = InputSlot(level=1) # for visualization
SegmentationImages = InputSlot(level=1) #connected components
# the actual feature arrays
# same format as OpObjectExtraction.RegionFeatures
ObjectFeatures = InputSlot(rtype=List, stype=Opaque, level=1)
# the names of the features computed in the object extraction operator
# same format as OpObjectExtraction.ComputedFeatureNames
ComputedFeatureNames = InputSlot(rtype=List, stype=Opaque)
# the features selected in our own GUI
# same format as ComputedFeatureNames
SelectedFeatures = InputSlot(rtype=List, stype=Opaque)
LabelsAllowedFlags = InputSlot(stype='bool', level=1)
AllowDeleteLabels = InputSlot(stype='bool', value=True)
AllowDeleteLastLabelOnly = InputSlot(stype='bool', value=False)
AllowAddLabel = InputSlot(stype='bool', value=True)
SuggestedLabelNames = InputSlot(stype=Opaque, value=[])
LabelInputs = InputSlot(stype=Opaque, rtype=List, optional=True, level=1)
FreezePredictions = InputSlot(stype='bool', value=False)
EnableLabelTransfer = InputSlot(stype='bool', value=False)
# for reading from disk
InputProbabilities = InputSlot(level=1, stype=Opaque, rtype=List, optional=True)
################
# Output slots #
################
NumLabels = OutputSlot()
Classifier = OutputSlot()
LabelImages = OutputSlot(level=1)
Predictions = OutputSlot(level=1, stype=Opaque, rtype=List)
Probabilities = OutputSlot(level=1, stype=Opaque, rtype=List)
# pulls whatever is in the cache, but does not try to compute more.
CachedProbabilities = OutputSlot(level=1, stype=Opaque, rtype=List)
PredictionImages = OutputSlot(level=1) #Labels, by the majority vote
UncachedPredictionImages = OutputSlot(level=1)
PredictionProbabilityChannels = OutputSlot(level=2) # Classification predictions, enumerated by channel
ProbabilityChannelImage = OutputSlot(level=1)
SegmentationImagesOut = OutputSlot(level=1) #input connected components
BadObjects = OutputSlot(level=1, stype=Opaque, rtype=List) #Objects with NaN-like features
BadObjectImages = OutputSlot(level=1) #Images, where objects with NaN-like features are black
Warnings = OutputSlot(stype=Opaque) #Warnings about objects with NaN-like features encountered in training
# Used for labeling
Eraser = OutputSlot()
DeleteLabel = OutputSlot()
# GUI-only (not part of the pipeline, but saved to the project)
LabelNames = OutputSlot()
LabelColors = OutputSlot()
PmapColors = OutputSlot()
def __init__(self, *args, **kwargs):
super(OpObjectClassification, self).__init__(*args, **kwargs)
# internal operators
opkwargs = dict(parent=self)
self.opTrain = OpObjectTrain(parent=self)
self.opPredict = OpMultiLaneWrapper(OpObjectPredict, **opkwargs)
self.opLabelsToImage = OpMultiLaneWrapper(OpRelabelSegmentation, **opkwargs)
self.opPredictionsToImage = OpMultiLaneWrapper(OpRelabelSegmentation, **opkwargs)
self.opPredictionImageCache = OpMultiLaneWrapper(OpSlicedBlockedArrayCache, **opkwargs)
self.opPredictionImageCache.name="OpObjectClassification.opPredictionImageCache"
self.opProbabilityChannelsToImage = OpMultiLaneWrapper(OpMultiRelabelSegmentation, **opkwargs)
self.opBadObjectsToImage = OpMultiLaneWrapper(OpRelabelSegmentation, **opkwargs)
self.opBadObjectsToWarningMessage = OpBadObjectsToWarningMessage(parent=self)
self.classifier_cache = OpValueCache(parent=self)
self.classifier_cache.name = "OpObjectClassification.classifier_cache"
# connect inputs
self.opTrain.Features.connect(self.ObjectFeatures)
self.opTrain.Labels.connect(self.LabelInputs)
self.opTrain.FixClassifier.setValue(False)
self.opTrain.SelectedFeatures.connect(self.SelectedFeatures)
self.classifier_cache.Input.connect(self.opTrain.Classifier)
# Find the highest label in all the label images
self.opMaxLabel = OpMaxLabel( parent=self )
self.opMaxLabel.Inputs.connect( self.LabelInputs )
self.opPredict.Features.connect(self.ObjectFeatures)
self.opPredict.Classifier.connect(self.classifier_cache.Output)
self.opPredict.SelectedFeatures.connect(self.SelectedFeatures)
# Not directly connected. Must always use setValue() to update.
# See _updateNumClasses()
# self.opPredict.LabelsCount.connect(self.opMaxLabel.Output) # See _updateNumClasses()
self.opLabelsToImage.Image.connect(self.SegmentationImages)
self.opLabelsToImage.ObjectMap.connect(self.LabelInputs)
self.opLabelsToImage.Features.connect(self.ObjectFeatures)
self.opPredictionsToImage.Image.connect(self.SegmentationImages)
self.opPredictionsToImage.ObjectMap.connect(self.opPredict.Predictions)
self.opPredictionsToImage.Features.connect(self.ObjectFeatures)
#self.opPredictionImageCache.name = "prediction_image_cache"
self.opPredictionImageCache.fixAtCurrent.connect( self.FreezePredictions )
self.opPredictionImageCache.Input.connect( self.opPredictionsToImage.Output )
self.opProbabilityChannelsToImage.Image.connect(self.SegmentationImages)
self.opProbabilityChannelsToImage.ObjectMaps.connect(self.opPredict.ProbabilityChannels)
self.opProbabilityChannelsToImage.Features.connect(self.ObjectFeatures)
class OpWrappedCache(Operator):
"""
This quick hack is necessary because there's not currently a way to wrap an OperatorWrapper.
We need to double-wrap the cache, so we need this operator to provide the first level of wrapping.
"""
Input = InputSlot(level=1)
innerBlockShape = InputSlot()
outerBlockShape = InputSlot()
fixAtCurrent = InputSlot(value = False)
Output = OutputSlot(level=1)
def __init__(self, *args, **kwargs):
super( OpWrappedCache, self ).__init__( *args, **kwargs )
self._innerOperator = OperatorWrapper( OpSlicedBlockedArrayCache, parent=self )
self._innerOperator.Input.connect( self.Input )
self._innerOperator.fixAtCurrent.connect( self.fixAtCurrent )
self._innerOperator.innerBlockShape.connect( self.innerBlockShape )
self._innerOperator.outerBlockShape.connect( self.outerBlockShape )
self.Output.connect( self._innerOperator.Output )
def execute(self, slot, subindex, roi, destination):
assert False, "Shouldn't get here."
def propagateDirty(self, slot, subindex, roi):
pass # Nothing to do...
# Wrap the cache for probability channels twice TWICE.
self.opProbChannelsImageCache = OpMultiLaneWrapper( OpWrappedCache, parent=self )
self.opProbChannelsImageCache.name = "OpObjectClassification.opProbChannelsImageCache"
self.opProbChannelsImageCache.Input.connect(self.opProbabilityChannelsToImage.Output)
self.opProbChannelsImageCache.fixAtCurrent.connect( self.FreezePredictions )
self.opBadObjectsToImage.Image.connect(self.SegmentationImages)
self.opBadObjectsToImage.ObjectMap.connect(self.opPredict.BadObjects)
self.opBadObjectsToImage.Features.connect(self.ObjectFeatures)
self.opBadObjectsToWarningMessage.BadObjects.connect(self.opTrain.BadObjects)
self.opPredict.InputProbabilities.connect(self.InputProbabilities)
def _updateNumClasses(*args):
"""
When the number of labels changes, we MUST make sure that the prediction image changes its shape (the number of channels).
Since setupOutputs is not called for mere dirty notifications, but is called in response to setValue(),
we use this function to call setValue().
"""
numClasses = len(self.LabelNames.value)
self.opPredict.LabelsCount.setValue( numClasses )
self.opTrain.LabelsCount.setValue( numClasses )
self.NumLabels.setValue( numClasses )
self.LabelNames.notifyDirty( _updateNumClasses )
self.LabelNames.setValue( [] )
self.LabelColors.setValue( [] )
self.PmapColors.setValue( [] )
self.opStackProbabilities = OperatorWrapper( OpMultiArrayStacker, parent=self )
self.opStackProbabilities.Images.connect( self.opProbChannelsImageCache.Output )
self.opStackProbabilities.AxisFlag.setValue('c')
# connect outputs
self.LabelImages.connect(self.opLabelsToImage.Output)
self.Predictions.connect(self.opPredict.Predictions)
self.Probabilities.connect(self.opPredict.Probabilities)
self.CachedProbabilities.connect(self.opPredict.CachedProbabilities)
self.PredictionImages.connect(self.opPredictionImageCache.Output)
self.UncachedPredictionImages.connect(self.opPredictionsToImage.Output)
self.PredictionProbabilityChannels.connect(self.opProbChannelsImageCache.Output)
self.ProbabilityChannelImage.connect( self.opStackProbabilities.Output )
self.BadObjects.connect(self.opPredict.BadObjects)
self.BadObjectImages.connect(self.opBadObjectsToImage.Output)
self.Warnings.connect(self.opBadObjectsToWarningMessage.WarningMessage)
self.Classifier.connect(self.classifier_cache.Output)
self.SegmentationImagesOut.connect(self.SegmentationImages)
# Not directly connected. Must always use setValue() to update.
# See _updateNumClasses()
# self.NumLabels.connect( self.opMaxLabel.Output )
self.Eraser.setValue(100)
self.DeleteLabel.setValue(-1)
self._labelBBoxes = []
self._ambiguousLabels = []
self._needLabelTransfer = False
def handleNewInputImage(multislot, index, *args):
def handleInputReady(slot):
self.setupCaches(multislot.index(slot))
multislot[index].notifyReady(handleInputReady)
self.SegmentationImages.notifyInserted(handleNewInputImage)
self._predict_enabled = False
def setupCaches(self, imageIndex):
"""Setup the label input and caches to correct dimensions"""
numImages=len(self.SegmentationImages)
cctype = self.SegmentationImages[imageIndex].meta.dtype
if not issubclass(cctype, numpy.integer):
msg = "Connected Components image should be of integer type.\n"\
"Ask your workflow developer to change the input applet accordingly.\n"
raise DatasetConstraintError("Object Classification", msg)
self.LabelInputs.resize(numImages)
self.LabelInputs[imageIndex].meta.shape = (1,)
self.LabelInputs[imageIndex].meta.dtype = object
self.LabelInputs[imageIndex].meta.mapping_dtype = numpy.uint8
self.LabelInputs[imageIndex].meta.axistags = None
self._resetLabelInputs(imageIndex)
def _resetLabelInputs(self, imageIndex, roi=None):
labels = dict()
for t in range(self.SegmentationImages[imageIndex].meta.shape[0]):
#initialize, because volumina needs to reshape to use it as a datasink
labels[t] = numpy.zeros((2,))
self.LabelInputs[imageIndex].setValue(labels)
if imageIndex in range(len(self._ambiguousLabels)):
self._ambiguousLabels[imageIndex] = None
self._labelBBoxes[imageIndex] = dict()
else:
self._ambiguousLabels.insert(imageIndex, None)
self._labelBBoxes.insert(imageIndex, dict())
def removeLabel(self, label):
#remove this label from the inputs
for islot, label_slot in enumerate(self.LabelInputs):
if not label_slot.ready() or islot>= len(self.RawImages) or \
not self.RawImages[islot].ready():
continue
cur_labels = label_slot.value
nTimes = self.RawImages[islot].meta.shape[0]
nLabels = len(self.LabelNames.value)+1 #+1 because we already took out the name in labelingGui
for t in range(nTimes):
label_values = cur_labels[t]
label_values[label_values==label+1] = 0
for nextLabel in range(label, nLabels):
label_values[label_values==nextLabel+1]=nextLabel
self.LabelInputs.setDirty([])
def setupOutputs(self):
self.Warnings.meta.shape = (1,)
axisOrder = [ tag.key for tag in self.RawImages[0].meta.axistags ]
blockDimsX = { 't' : (1,1),
'z' : (128,256),
'y' : (128,256),
'x' : (1,1),
'c' : (100, 100) }
blockDimsY = { 't' : (1,1),
'z' : (128,256),
'y' : (1,1),
'x' : (128,256),
'c' : (100,100) }
blockDimsZ = { 't' : (1,1),
'z' : (1,1),
'y' : (128,256),
'x' : (128,256),
'c' : (100,100) }
innerBlockShapeX = tuple( blockDimsX[k][0] for k in axisOrder )
outerBlockShapeX = tuple( blockDimsX[k][1] for k in axisOrder )
innerBlockShapeY = tuple( blockDimsY[k][0] for k in axisOrder )
outerBlockShapeY = tuple( blockDimsY[k][1] for k in axisOrder )
innerBlockShapeZ = tuple( blockDimsZ[k][0] for k in axisOrder )
outerBlockShapeZ = tuple( blockDimsZ[k][1] for k in axisOrder )
self.opPredictionImageCache.innerBlockShape.setValue( (innerBlockShapeX, innerBlockShapeY, innerBlockShapeZ) )
self.opPredictionImageCache.outerBlockShape.setValue( (outerBlockShapeX, outerBlockShapeY, outerBlockShapeZ) )
self.opProbChannelsImageCache.innerBlockShape.setValue( (innerBlockShapeX, innerBlockShapeY, innerBlockShapeZ) )
self.opProbChannelsImageCache.outerBlockShape.setValue( (outerBlockShapeX, outerBlockShapeY, outerBlockShapeZ) )
def setInSlot(self, slot, subindex, roi, value):
pass
def propagateDirty(self, slot, subindex, roi):
if slot==self.SegmentationImages and len(self.LabelInputs)>0:
self._ambiguousLabels[subindex[0]] = self.LabelInputs[subindex[0]].value
self._needLabelTransfer = True
def assignObjectLabel(self, imageIndex, coordinate, assignedLabel):
"""
Update the assigned label of the object located at the given coordinate.
Does nothing if no object resides at the given coordinate.
"""
segmentationShape = self.SegmentationImagesOut[imageIndex].meta.shape
assert len(coordinate) == len( segmentationShape ), "Coordinate: {} is has the wrong length for this image, which is of shape: {}".format( coordinate, segmentationShape )
slicing = tuple(slice(i, i+1) for i in coordinate)
arr = self.SegmentationImagesOut[imageIndex][slicing].wait()
objIndex = arr.flat[0]
if objIndex == 0: # background; FIXME: do not hardcode
return
timeCoord = coordinate[0]
labelslot = self.LabelInputs[imageIndex]
labelsdict = labelslot.value
labels = labelsdict[timeCoord]
nobjects = len(labels)
if objIndex >= nobjects:
newLabels = numpy.zeros((objIndex + 1),)
newLabels[:nobjects] = labels[:]
labels = newLabels
labels[objIndex] = assignedLabel
labelsdict[timeCoord] = labels
labelslot.setValue(labelsdict)
labelslot.setDirty([(timeCoord, objIndex)])
#Fill the cache of label bounding boxes, if it was empty
if len(self._labelBBoxes[imageIndex].keys())==0:
#it's the first label for this image
feats = self.ObjectFeatures[imageIndex]([timeCoord]).wait()
#the bboxes should be the same for all channels
mins = feats[timeCoord][default_features_key]["Coord<Minimum>"]
maxs = feats[timeCoord][default_features_key]["Coord<Maximum>"]
bboxes = dict()
bboxes["Coord<Minimum>"] = mins
bboxes["Coord<Maximum>"] = maxs
self._labelBBoxes[imageIndex][timeCoord]=bboxes
def triggerTransferLabels(self, imageIndex):
if not self._needLabelTransfer:
return None
if not self.SegmentationImages[imageIndex].ready():
return None
if len(self._labelBBoxes[imageIndex].keys())==0:
#we either don't have any labels or we just read the project from file
#nothing to transfer
self._needLabelTransfer = False
return None
if not self.EnableLabelTransfer:
self._resetLabelInputs(imageIndex)
self._needLabelTransfer = False
return None
labels = dict()
for timeCoord in range(self.SegmentationImages[imageIndex].meta.shape[0]):
#we have to get new object features to get bounding boxes
logger.info("Transferring labels to the new segmentation. This might take a while...")
new_feats = self.ObjectFeatures[imageIndex]([timeCoord]).wait()
coords = dict()
coords["Coord<Minimum>"] = new_feats[timeCoord][default_features_key]["Coord<Minimum>"]
coords["Coord<Maximum>"] = new_feats[timeCoord][default_features_key]["Coord<Maximum>"]
#FIXME: pass axistags
new_labels, old_labels_lost, new_labels_lost = self.transferLabels(
self._ambiguousLabels[imageIndex][timeCoord],
self._labelBBoxes[imageIndex][timeCoord],
coords
)
labels[timeCoord] = new_labels
self._labelBBoxes[imageIndex][timeCoord]=coords
self._ambiguousLabels[imageIndex][timeCoord]=numpy.zeros((2,)) #initialize ambig. labels as normal labels
self.LabelInputs[imageIndex].setValue(labels)
self._needLabelTransfer = False
return new_labels, old_labels_lost, new_labels_lost
@staticmethod
def transferLabels(old_labels, old_bboxes, new_bboxes, axistags = None):
#transfer labels from old segmentation to new segmentation
mins_old = old_bboxes["Coord<Minimum>"]
maxs_old = old_bboxes["Coord<Maximum>"]
mins_new = new_bboxes["Coord<Minimum>"]
maxs_new = new_bboxes["Coord<Maximum>"]
nobj_old = mins_old.shape[0]
nobj_new = mins_new.shape[0]
if axistags is None:
axistags = "xyz"
data2D = False
if mins_old.shape[1]==2:
data2D = True
class bbox():
def __init__(self, minmaxs, axistags):
self.xmin = minmaxs[0][axistags.index('x')]
self.ymin = minmaxs[0][axistags.index('y')]
if not data2D:
self.zmin = minmaxs[0][axistags.index('z')]
else:
self.zmin = 0
self.xmax = minmaxs[1][axistags.index('x')]
self.ymax = minmaxs[1][axistags.index('y')]
if not data2D:
self.zmax = minmaxs[1][axistags.index('z')]
else:
self.zmax = 0
self.rad_x = 0.5*(self.xmax - self.xmin)
self.cent_x = self.xmin+self.rad_x
self.rad_y = 0.5*(self.ymax-self.ymin)
self.cent_y = self.ymin+self.rad_y
self.rad_z = 0.5*(self.zmax-self.zmin)
self.cent_z = self.zmin+self.rad_z
@staticmethod
def overlap(bbox_tuple):
this = bbox_tuple[0]
that = bbox_tuple[1]
over_x = this.rad_x+that.rad_x - (abs(this.cent_x-that.cent_x))
over_y = this.rad_y+that.rad_y - (abs(this.cent_y-that.cent_y))
over_z = this.rad_z+that.rad_z - (abs(this.cent_z-that.cent_z))
if not data2D:
if over_x>0 and over_y>0 and over_z>0:
return over_x*over_y*over_z
else:
if over_x>0 and over_y>0:
return over_x*over_y
return 0
nonzeros = numpy.nonzero(old_labels)[0]
bboxes_old = [bbox(x, axistags) for x in zip(mins_old[nonzeros], maxs_old[nonzeros])]
bboxes_new = [bbox(x, axistags) for x in zip(mins_new, maxs_new)]
#remove background
#FIXME: assuming background is 0 again
bboxes_new = bboxes_new[1:]
double_for_loop = itertools.product(bboxes_old, bboxes_new)
overlaps = map(bbox.overlap, double_for_loop)
overlaps = numpy.asarray(overlaps)
overlaps = overlaps.reshape((len(bboxes_old), len(bboxes_new)))
new_labels = numpy.zeros((nobj_new,), dtype=numpy.uint32)
old_labels_lost = dict()
old_labels_lost["full"]=[]
old_labels_lost["partial"]=[]
new_labels_lost = dict()
new_labels_lost["conflict"]=[]
for iobj in range(overlaps.shape[0]):
#take the object with maximum overlap
overlapsum = numpy.sum(overlaps[iobj, :])
if overlapsum==0:
old_labels_lost["full"].append((bboxes_old[iobj].cent_x, bboxes_old[iobj].cent_y, bboxes_old[iobj].cent_z))
continue
newindex = numpy.argmax(overlaps[iobj, :])
if overlapsum-overlaps[iobj,newindex]>0:
#this object overlaps with more than one new object
old_labels_lost["partial"].append((bboxes_old[iobj].cent_x, bboxes_old[iobj].cent_y, bboxes_old[iobj].cent_z))
overlaps[iobj, :] = 0
overlaps[iobj, newindex] = 1 #doesn't matter what number>0
for iobj in range(overlaps.shape[1]):
labels = numpy.where(overlaps[:, iobj]>0)[0]
if labels.shape[0]==1:
new_labels[iobj+1]=old_labels[nonzeros[labels[0]]] #iobj+1 because of the background
elif labels.shape[0]>1:
new_labels_lost["conflict"].append((bboxes_new[iobj].cent_x, bboxes_new[iobj].cent_y, bboxes_new[iobj].cent_z))
new_labels = new_labels
new_labels[0]=0 #FIXME: hardcoded background value again
return new_labels, old_labels_lost, new_labels_lost
def exportLabelInfo(self, file_path):
"""
For all images with labels, export object bounding boxes and label classes as JSON.
"""
import json
import collections
from functools import partial
logger.info("Exporting label information as json to: {}".format( file_path ))
json_data_all_lanes = collections.OrderedDict()
for lane_index, (label_slot, object_feature_slot) in enumerate(zip(self.LabelInputs, self.ObjectFeatures)):
logger.info("Processing image #{}".format( lane_index ))
json_data_this_lane = collections.OrderedDict()
labels_timewise = label_slot.value
for t in sorted(labels_timewise.keys()):
labels = labels_timewise[t]
if not any(labels):
continue
object_features_timewise = object_feature_slot([t]).wait()
object_features = object_features_timewise[t]
min_coords = object_features["Default features"]["Coord<Minimum>"]
max_coords = object_features["Default features"]["Coord<Maximum>"]
# Don't bother saving info for objects that aren't labeled
min_coords = min_coords[labels.nonzero()]
max_coords = max_coords[labels.nonzero()]
labels = labels[labels.nonzero()]
json_data_this_time = collections.OrderedDict()
bounding_boxes = collections.OrderedDict()
# Convert from numpy array to list (for json)
bounding_boxes["Coord<Minimum>"] = map( partial(map, int), min_coords )
bounding_boxes["Coord<Maximum>"] = map( partial(map, int), max_coords )
json_data_this_time["bounding_boxes"] = bounding_boxes
json_data_this_time["labels"] = map(int, labels)
json_data_this_lane[int(t)] = json_data_this_time
json_data_all_lanes[lane_index] = json_data_this_lane
with open(file_path, 'w') as f:
json.dump(json_data_all_lanes, f)
logger.info("Label export FINISHED.")
def importLabelInfo(self, file_path):
"""
Read labels and bounding boxes from a JSON file.
For all image lanes in the JSON file, replace all labels in that image.
For image lanes NOT listed in the JSON file, keep the existing labels.
"""
import json
logger.info("Reading label information from json: {}".format( file_path ))
with open(file_path, 'r') as f:
json_data_all_lanes = json.load(f)
max_label = 0
new_labels_all_lanes = {}
for lane_index_str in sorted(json_data_all_lanes.keys(), key=int):
lane_index = int(lane_index_str)
logger.info("Processing image #{}".format( lane_index ))
json_data_this_lane = json_data_all_lanes[lane_index_str]
new_labels_this_lane = {}
for time_str in sorted(json_data_this_lane.keys(), key=int):
time = int(time_str)
old_features_timewise = self.ObjectFeatures[lane_index]([time]).wait()
old_features = old_features_timewise[time]
current_bboxes = {}
current_bboxes["Coord<Minimum>"] = old_features["Default features"]["Coord<Minimum>"]
current_bboxes["Coord<Maximum>"] = old_features["Default features"]["Coord<Maximum>"]
json_data_this_time = json_data_this_lane[time_str]
saved_labels = numpy.array( json_data_this_time["labels"] )
max_label = max( max_label, saved_labels.max() )
saved_bboxes = {}
saved_bboxes["Coord<Minimum>"] = numpy.array( json_data_this_time["bounding_boxes"]["Coord<Minimum>"] )
saved_bboxes["Coord<Maximum>"] = numpy.array( json_data_this_time["bounding_boxes"]["Coord<Maximum>"] )
# Calculate new labels
newlabels, oldlost, newlost = OpObjectClassification.transferLabels(saved_labels, saved_bboxes, current_bboxes, None)
new_labels_this_lane[time] = newlabels
logger.info("Lane {}, time {} new labels: {}".format( lane_index, time, list(newlabels) ))
logger.info("Lane {}, time {} lost OLD: {}".format( lane_index, time, oldlost ))
logger.info("Lane {}, time {} lost NEW: {}".format( lane_index, time, newlost ))
# Apply new labels
new_labels_all_lanes[lane_index] = new_labels_this_lane
# If we have a new max label, add label classes as needed.
label_names = self.LabelNames.value
if len(self.LabelNames.value) < max_label:
new_label_names = list(label_names)
for class_index in range( len(label_names)+1, int(max_label)+1 ):
new_label_names.append( "Label {}".format( class_index ) )
self.LabelNames.setValue( new_label_names )
for lane_index, new_labels_timewise in sorted(new_labels_all_lanes.items()):
for t in range( self.SegmentationImages[lane_index].meta.getTaggedShape()['t'] ):
if t not in new_labels_timewise:
# No replacement labels. Copy old labels.
new_labels_timewise[t] = self.LabelInputs[lane_index].value[t]
logger.info("Applying new labels to lane {}".format( lane_index ))
self.LabelInputs[lane_index].setValue(new_labels_timewise)
logger.info("Label import FINISHED")
def createExportTable(self, lane, roi):
numLanes = len(self.SegmentationImages)
assert lane < numLanes, \
"Can't export features for lane {} (only {} lanes exist)"\
.format( lane, numLanes )
return self.opPredict[lane].createExportTable(roi)
def addLane(self, laneIndex):
numLanes = len(self.SegmentationImages)
assert numLanes == laneIndex, "Image lanes must be appended."
for slot in self.inputs.values():
if slot.level > 0 and len(slot) == laneIndex:
slot.resize(numLanes + 1)
def removeLane(self, laneIndex, finalLength):
for slot in self.inputs.values():
if slot.level > 0 and len(slot) == finalLength + 1:
slot.removeSlot(laneIndex, finalLength)
try:
self._ambiguousLabels.pop(laneIndex)
self._labelBBoxes.pop(laneIndex)
except:
#FIXME: sometimes this pop is called for no reason and makes the project unusable. We should fix the underlying issue.
pass
def getLane(self, laneIndex):
return OperatorSubView(self, laneIndex)
def _atleast_nd(a, ndim):
"""Like numpy.atleast_1d and friends, but supports arbitrary ndim,
always puts extra dimensions last, and resizes.
"""
if ndim < a.ndim:
return
nnew = ndim - a.ndim
newshape = tuple(list(a.shape) + [1] * nnew)
a.resize(newshape)
def _concatenate(arrays, axis):
"""wrapper to numpy.concatenate that resizes arrays first."""
arrays = list(a for a in arrays if 0 not in a.shape)
if len(arrays) == 0:
return numpy.array([])
maxd = max(max(a.ndim for a in arrays), 2)
for a in arrays:
_atleast_nd(a, maxd)
return numpy.concatenate(arrays, axis=axis)
def make_feature_array(feats, selected, labels=None):
featlist = []
labellist = []
row_names = []
col_names = []
for t in sorted(feats.keys()):
featsMatrix_tmp = []
index = None
if labels is not None:
labellist_tmp = []
lab = labels[t].squeeze()
index = numpy.nonzero(lab)
labellist_tmp.append(lab[index])
timestep_col_names = []
for plugin in sorted(feats[t].keys()):
if plugin == default_features_key or plugin not in selected:
continue
for featname in sorted(feats[t][plugin].keys()):
if featname not in selected[plugin]:
continue
value = feats[t][plugin][featname]
ft = numpy.asarray(value.squeeze())
if index is not None:
ft = ft[index]
featsMatrix_tmp.append(ft)
timestep_col_names.extend([(plugin, featname)] * value.shape[1])
if not col_names:
col_names = timestep_col_names
elif col_names != timestep_col_names:
raise Exception('different time slices did not have same features.')
#FIXME: we can do it all with just arrays
featsMatrix_tmp_combined = _concatenate(featsMatrix_tmp, axis=1)
featlist.append(featsMatrix_tmp_combined)
if index is not None:
row_names.extend(list((t, obj) for obj in index[0]))
if labels is not None:
labellist_tmp_combined = _concatenate(labellist_tmp, axis=1)
labellist.append(labellist_tmp_combined)
featMatrix = _concatenate(featlist, axis=0)
if labels is not None:
labelsMatrix = _concatenate(labellist, axis=0)
assert labelsMatrix.shape[0] == featMatrix.shape[0]
return featMatrix, row_names, col_names, labelsMatrix
return featMatrix, row_names, col_names
def replace_missing(a):
rows, cols = numpy.where(numpy.isnan(a) + numpy.isinf(a))
idx = (rows, cols)
rows = list(set(rows.flat))
cols = list(set(cols.flat))
a[idx] = MISSING_VALUE
return rows, cols
class OpObjectTrain(Operator):
"""Trains a random forest on all labeled objects."""
name = "TrainRandomForestObjects"
description = "Train a random forest on multiple images"
category = "Learning"
Labels = InputSlot(level=1, stype=Opaque, rtype=List)
LabelsCount = InputSlot(stype='int')
Features = InputSlot(level=1, rtype=List, stype=Opaque)
SelectedFeatures = InputSlot(rtype=List, stype=Opaque)
FixClassifier = InputSlot(stype="bool")
ForestCount = InputSlot(stype="int", value=1)
Classifier = OutputSlot()
BadObjects = OutputSlot(stype=Opaque)
def __init__(self, *args, **kwargs):
super(OpObjectTrain, self).__init__(*args, **kwargs)
self._tree_count = 100
self.FixClassifier.setValue(False)
def setupOutputs(self):
if self.FixClassifier.value == False:
self.Classifier.meta.dtype = object
self.Classifier.meta.shape = (1,)
self.Classifier.meta.axistags = None
self.BadObjects.meta.shape = (1,)
self.BadObjects.meta.dtype = object
self.BadObjects.meta.axistags = None
def execute(self, slot, subindex, roi, result):
featList = []
all_col_names = []
labelsList = []
# get the number of ALL labels
numLabels=0
if self.LabelsCount.ready():
numLabels = self.LabelsCount[:].wait()
numLabels = int(numLabels[0])
# will be available at slot self.Warnings
all_bad_objects = defaultdict(lambda: defaultdict(list))
all_bad_feats = set()
selected = self.SelectedFeatures([]).wait()
if len(selected)==0:
# no features - no predictions
self.Classifier.setValue(None)
return
for i in range(len(self.Labels)):
# this loop is by image, not time!
# TODO: we should be able to use self.Labels[i].value,
# but the current implementation of Slot.value() does not
# do the right thing.
labels_image = self.Labels[i]([]).wait()
labels_image_filtered = {}
nztimes = []
for timestep, labels_time in labels_image.iteritems():
nz = numpy.nonzero(labels_time)
if len(nz[0])==0:
continue
else:
nztimes.append(timestep)
labels_image_filtered[timestep] = labels_time
if len(nztimes)==0:
continue
# compute the features if there are nonzero labels in this image
# and only for the time steps, which have labels
feats = self.Features[i](nztimes).wait()
featstmp, row_names, col_names, labelstmp = make_feature_array(feats, selected, labels_image_filtered)
if labelstmp.size == 0 or featstmp.size == 0:
continue
rows, cols = replace_missing(featstmp)
featList.append(featstmp)
all_col_names.append(tuple(col_names))
labelsList.append(labelstmp)
for idx in rows:
t, obj = row_names[idx]
all_bad_objects[i][t].append(obj)
for c in cols:
all_bad_feats.add(col_names[c])
if len(labelsList)==0:
#no labels, return here
self.Classifier.setValue(None)
return
self._warnBadObjects(all_bad_objects, all_bad_feats)
if not len(set(all_col_names)) == 1:
raise Exception('different time slices did not have same features.')
featMatrix = _concatenate(featList, axis=0)
labelsMatrix = _concatenate(labelsList, axis=0)
logger.info("training on matrix of shape {}".format(featMatrix.shape))
if featMatrix.size == 0 or labelsMatrix.size == 0:
result[:] = None
return
allLabels=map(long, range(1,numLabels+1))
classifier_factory = ParallelVigraRfLazyflowClassifierFactory( self._tree_count, self.ForestCount.value, labels=allLabels )
classifier = classifier_factory.create_and_train( featMatrix.astype(numpy.float32), numpy.asarray(labelsMatrix, dtype=numpy.uint32) )
avg_oob = numpy.mean(classifier.oobs)
logger.info("training finished, average out-of-bag error: {}".format(avg_oob))
result[0] = classifier
return result
def propagateDirty(self, slot, subindex, roi):
if slot is not self.FixClassifier and \
self.inputs["FixClassifier"].value == False:
slcs = (slice(0, self.ForestCount.value, None),)
self.outputs["Classifier"].setDirty(slcs)
def _warnBadObjects(self, bad_objects, bad_feats):
if len(bad_feats) > 0 or\
any([len(bad_objects[i]) > 0 for i in bad_objects.keys()]):
self.BadObjects.setValue({'objects': bad_objects,
'feats': bad_feats})
class OpObjectPredict(Operator):
"""Predicts object labels in a single image.
Performs prediction on all objects in a time slice at once, and
caches the result.
"""
# WARNING: right now we predict and cache a whole time slice. We
# expect this to be fast because there are relatively few objects
# compared to the number of pixels in pixel classification. If
# this should be too slow, we should instead cache at the object
# level, and only predict for objects visible in the roi.
name = "OpObjectPredict"
Features = InputSlot(rtype=List, stype=Opaque)
SelectedFeatures = InputSlot(rtype=List, stype=Opaque)
Classifier = InputSlot()
LabelsCount = InputSlot(stype='integer')
InputProbabilities = InputSlot(stype=Opaque, rtype=List, optional=True)
Predictions = OutputSlot(stype=Opaque, rtype=List)
Probabilities = OutputSlot(stype=Opaque, rtype=List)
CachedProbabilities = OutputSlot(stype=Opaque, rtype=List)
ProbabilityChannels = OutputSlot(stype=Opaque, rtype=List, level=1)
BadObjects = OutputSlot(stype=Opaque, rtype=List)
#SegmentationThreshold = 0.5
def setupOutputs(self):
self.Predictions.meta.shape = self.Features.meta.shape
self.Predictions.meta.dtype = object
self.Predictions.meta.axistags = None
self.Predictions.meta.mapping_dtype = numpy.uint8
self.Probabilities.meta.shape = self.Features.meta.shape
self.Probabilities.meta.dtype = object
self.Probabilities.meta.mapping_dtype = numpy.float32
self.Probabilities.meta.axistags = None
self.BadObjects.meta.shape = self.Features.meta.shape
self.BadObjects.meta.dtype = object
self.BadObjects.meta.mapping_dtype = numpy.uint8
self.BadObjects.meta.axistags = None
if self.LabelsCount.ready():
nlabels = self.LabelsCount[:].wait()
nlabels = int(nlabels[0])
self.ProbabilityChannels.resize(nlabels)
for oslot in self.ProbabilityChannels:
oslot.meta.shape = self.Features.meta.shape
oslot.meta.dtype = object
oslot.meta.axistags = None
oslot.meta.mapping_dtype = numpy.float32
self.lock = RequestLock()
self.prob_cache = dict()
self.bad_objects = dict()
def execute(self, slot, subindex, roi, result):
assert slot in [self.Predictions,
self.Probabilities,
self.CachedProbabilities,
self.ProbabilityChannels,
self.BadObjects]
times = roi._l
if len(times) == 0:
# we assume that 0-length requests are requesting everything
times = range(self.Predictions.meta.shape[0])
if slot is self.CachedProbabilities:
return {t: self.prob_cache[t] for t in times if t in self.prob_cache}
classifier = self.Classifier.value
if classifier is None:
# this happens if there was no data to train with
return dict((t, numpy.array([])) for t in times)
feats = {}
prob_predictions = {}
selected = self.SelectedFeatures([]).wait()
# FIXME: self.prob_cache is shared, so we need to block.
# However, this makes prediction single-threaded.
with self.lock:
for t in times:
if t in self.prob_cache:
continue
tmpfeats = self.Features([t]).wait()
ftmatrix, _, col_names = make_feature_array(tmpfeats, selected)
rows, cols = replace_missing(ftmatrix)
self.bad_objects[t] = numpy.zeros((ftmatrix.shape[0],))
self.bad_objects[t][rows] = 1
feats[t] = ftmatrix
prob_predictions[t] = 0
def predict_forest(_t):
# Note: We can't use RandomForest.predictLabels() here because we're training in parallel,
# and we have to average the PROBABILITIES from all forests.
# Averaging the label predictions from each forest is NOT equivalent.
# For details please see wikipedia:
# http://en.wikipedia.org/wiki/Electoral_College_%28United_States%29#Irrelevancy_of_national_popular_vote
# (^-^)
prob_predictions[_t] = classifier.predict_probabilities(feats[_t].astype(numpy.float32))
# predict the data with all the forests in parallel
pool = RequestPool()
for t in times:
if t in self.prob_cache:
continue
req = Request( partial(predict_forest, t) )
pool.add(req)
pool.wait()
pool.clean()
for t in times:
if t not in self.prob_cache:
# prob_predictions is a dict-of-arrays, indexed as follows:
# prob_predictions[t][object_index, class_index]
self.prob_cache[t] = prob_predictions[t]
self.prob_cache[t][0] = 0 # Background probability is always zero
if slot == self.Probabilities:
return { t : self.prob_cache[t] for t in times }
elif slot == self.Predictions:
# FIXME: Support SegmentationThreshold again...
labels = dict()
for t in times:
prob_sum = numpy.sum(self.prob_cache[t], axis=1)
labels[t] = 1 + numpy.argmax(self.prob_cache[t], axis=1)
labels[t][0] = 0 # Background gets the zero label
return labels
elif slot == self.ProbabilityChannels:
try:
prob_single_channel = {t: self.prob_cache[t][:, subindex[0]]
for t in times}
except:
# no probabilities available for this class; return zeros
prob_single_channel = {t: numpy.zeros((self.prob_cache[t].shape[0], 1))
for t in times}
return prob_single_channel
elif slot == self.BadObjects:
return { t : self.bad_objects[t] for t in times }
else:
assert False, "Unknown input slot"
def propagateDirty(self, slot, subindex, roi):
self.prob_cache = {}
if slot is self.InputProbabilities:
self.prob_cache = self.InputProbabilities([]).wait()
self.Predictions.setDirty(())
self.Probabilities.setDirty(())
self.ProbabilityChannels.setDirty(())
def createExportTable(self, roi):
if not self.Predictions.ready() or not self.Features.ready():
return None
features = self.Features(roi).wait()
feature_table = OpObjectExtraction.createExportTable(features)
predictions = self.Predictions(roi).wait()
probs = self.Probabilities(roi).wait()
nobjs = []
for t, preds in predictions.iteritems():
nobjs.append(preds.shape[0])
nobjs_total = sum(nobjs)
if nobjs_total==0:
logger.info("Prediction not run yet, won't be exported")
return feature_table
else:
assert nobjs_total==feature_table.shape[0]
def fill_column(slot_value, column, name, channel=None):
start = 0
finish = start
for t, values in slot_value.iteritems():
#FIXME: remove the first object, it's always background
finish = start + nobjs[t]
if channel is None:
column[name][start:finish] = values[:]
else:
column[name][start:finish] = values[:, channel]
start = finish
pred_column = numpy.zeros(nobjs_total, {'names': ['Prediction'], 'formats': [numpy.dtype(numpy.uint8)]})
fill_column(predictions, pred_column, "Prediction")
joint_table = rfn.merge_arrays((feature_table, pred_column), flatten = True, usemask = False)
nchannels = probs[0].shape[-1]
columns = [feature_table, pred_column]
for ich in range(nchannels):
prob_column = numpy.zeros(nobjs_total, {'names': ['Probability of class %d'%ich], \
'formats': [numpy.dtype(numpy.float32)]})
fill_column(probs, prob_column, 'Probability of class %d'%ich, ich)
columns.append(prob_column)
joint_table = rfn.merge_arrays(columns, flatten = True, usemask = False)
return joint_table
class OpRelabelSegmentation(Operator):
"""Takes a segmentation image and a mapping and returns the
mapped image.
For instance, map prediction labels onto objects.
"""
name = "OpToImage"
Image = InputSlot()
ObjectMap = InputSlot(stype=Opaque, rtype=List)
Features = InputSlot(rtype=List, stype=Opaque) #this is needed to limit dirty propagation to the object bbox
Output = OutputSlot()
loggingName = __name__ + ".OpRelabelSegmentation"
logger = logging.getLogger(loggingName)
def setupOutputs(self):
self.Output.meta.assignFrom(self.Image.meta)
self.Output.meta.dtype = self.ObjectMap.meta.mapping_dtype
def execute(self, slot, subindex, roi, result):
tStart = time.time()
tIMG = time.time()
img = self.Image(roi.start, roi.stop).wait()
tIMG = 1000.0*(time.time()-tIMG)
for t in range(roi.start[0], roi.stop[0]):
tMAP = time.time()
map_ = self.ObjectMap([t]).wait()
tmap = map_[t]
# FIXME: necessary because predictions are returned
# enclosed in a list.
if isinstance(tmap, list):
tmap = tmap[0]
tmap = tmap.squeeze()
if tmap.ndim==0:
# no objects, nothing to paint
result[t-roi.start[0]][:] = 0
return result
tMAP = 1000.0*(time.time()-tMAP)
#FIXME: This should be cached (and reset when the input becomes dirty)")
tMAX = time.time()
idx = img.max()
if len(tmap) <= idx:
newTmap = numpy.zeros((idx + 1,)) # And maybe this should be cached, too?
newTmap[:len(tmap)] = tmap[:]
tmap = newTmap
tMAX = 1000.0*(time.time()-tMAX)
#do the work thing
tWORK = time.time()
result[t-roi.start[0]] = tmap[img[t-roi.start[0]]]
tWORK = 1000.0*(time.time()-tWORK)
if self.logger.getEffectiveLevel() >= logging.DEBUG:
tStart = 1000.0*(time.time()-tStart)
self.logger.debug("took %f msec. (img: %f, wait ObjectMap: %f, do work: %f, max: %f)" % (tStart, tIMG, tMAP, tWORK, tMAX))
return result
def propagateDirty(self, slot, subindex, roi):
if slot is self.Image:
self.Output.setDirty(roi)
elif slot is self.ObjectMap or slot is self.Features:
# this is hacky. the gui's onClick() function calls
# setDirty with a (time, object) pair, while elsewhere we
# call setDirty with ().
if len(roi._l) == 0:
self.Output.setDirty(slice(None))
elif isinstance(roi._l[0], int):
for t in roi._l:
self.Output.setDirty(slice(t))
else:
assert len(roi._l[0]) == 2
# for each dirty object, only set its bounding box dirty
ts = list(set(t for t, _ in roi._l))
feats = self.Features(ts).wait()
for t, obj in roi._l:
min_coords = feats[t][default_features_key]['Coord<Minimum>'][obj].astype(numpy.uint32)
max_coords = feats[t][default_features_key]['Coord<Maximum>'][obj].astype(numpy.uint32)
slcs = list(slice(*args) for args in zip(min_coords, max_coords))
slcs = [slice(t, t+1),] + slcs + [slice(None),]
self.Output.setDirty(slcs)
class OpMultiRelabelSegmentation(Operator):
"""Takes a segmentation image and multiple mappings and returns the
mapped images.
For instance, map prediction probabilities for different classes
onto objects.
"""
name = "OpToImageMulti"
Image = InputSlot()
ObjectMaps = InputSlot(stype=Opaque, rtype=List, level=1)
Features = InputSlot(rtype=List, stype=Opaque) #this is needed to limit dirty propagation to the object bbox
Output = OutputSlot(level=1)
def __init__(self, *args, **kwargs):
super(OpMultiRelabelSegmentation, self).__init__(*args, **kwargs)
self._innerOperators = []
def setupOutputs(self):
nmaps = len(self.ObjectMaps)
for islot in self.ObjectMaps:
op = OpRelabelSegmentation(parent=self)
op.Image.connect(self.Image)
op.ObjectMap.connect(islot)
op.Features.connect(self.Features)
self._innerOperators.append(op)
self.Output.resize(nmaps)
for i, oslot in enumerate(self.Output):
oslot.connect(self._innerOperators[i].Output)
def propagateDirty(self, slot, subindex, roi):
pass
class OpMaxLabel(Operator):
"""Finds the maximum label value in the input labels.
Special operator for object classification labels, expects
inputs to be in a dictionary
"""
name = "OpMaxLabel"
Inputs = InputSlot(level=1,rtype=List, stype=Opaque)
Output = OutputSlot()
def __init__(self, *args, **kwargs):
super(OpMaxLabel, self).__init__(*args, **kwargs)
self.Output.meta.shape = (1,)
self.Output.meta.dtype = object
self._output = 0 #internal cache
def setupOutputs(self):
self.updateOutput()
self.Output.setValue(self._output)
def execute(self, slot, subindex, roi, result):
result[0] = self._output
return result
def propagateDirty(self, inputSlot, subindex, roi):
self.updateOutput()
self.Output.setValue(self._output)
def updateOutput(self):
# Return the max value of all our inputs
maxValue = None
for i, inputSubSlot in enumerate(self.Inputs):
subSlotLabelDict = self.Inputs[i][:].wait()
for v in subSlotLabelDict.itervalues():
subSlotMax = numpy.max(v)
if maxValue is None:
maxValue = subSlotMax
else:
maxValue = max(maxValue, subSlotMax)
self._output = int(maxValue)
class OpBadObjectsToWarningMessage(Operator):
"""Parses an input dictionary of bad objects and bad features, and
sets an informative warning message to its output slot.
"""
name = "OpBadObjectsToWarningMessage"
_blockSep = "\n\n"
_itemSep = "\n"
_objectSep = ", "
_itemIndent = " "
# the input slot
# format: BadObjects.value = {
# 'objects':
# {img_key: {time_key: [obj_index, obj_index_2, ...]}},
# 'feats':
# set()
# }
BadObjects = InputSlot(stype=Opaque)
# the output slot
# format: WarningMessage.value =
# {'title': a, 'text': b, 'info': c, 'details': d} if message available, the keys 'info' and 'details' might be omitted
# {} otherwise
WarningMessage = OutputSlot(stype=Opaque)
def setupOutputs(self):
pass
def propagateDirty(self, slot, subindex, roi):
try:
d = self.BadObjects[:].wait()
except AssertionError as E:
if "has no value" in str(E):
# since we are in propagateDirty, the input got reset or we got disconnected, either case
# means we don't have to issue warnings any more
return
# don't know what this is about, raise again
raise
warn = {}
warn['title'] = 'Warning'
warn['text'] = 'Encountered bad objects/features while training.'
warn['details'] = self._formatMessage(d)
if len(warn['details']) == 0:
return
self.WarningMessage.setValue(warn)
def execute(self, slot, subindex, roi, result):
pass
def _formatMessage(self, d):
a = []
try:
keys = d.keys()
# a) objects
if 'objects' in keys:
keys.remove('objects')
s = self._formatObjects(d['objects'])
if len(s) > 0:
a.append(s)
# b) features
if 'feats' in keys:
keys.remove('feats')
s = self._formatFeatures(d['feats'])
if len(s)>0:
a.append(s)
if len(keys)>0:
logger.warning("Encountered unknown bad object keywords: {}".format(keys))
except AttributeError:
raise TypeError("Expected input to be a dictionary, got {}".format(type(d)))
return self._blockSep.join(a)
def _formatFeatures(self, f):
try:
a = self._itemSep.join(map(str, sorted(f)))
except TypeError:
raise TypeError("Expected bad features to be a set, got {}".format(type(f)))
if len(a)>0:
a = "The following features had bad values:" + self._itemSep + a
return a
def _formatObjects(self, obj):
a = []
indent = 1
try:
# loop image indices
for img in obj.keys():
imtext = self._itemIndent*indent + "at image index {}".format(img)
indent += 1
# just show time slice if more than 1 time slice exists (avoid confusion/obfuscation)
needTime = len(obj[img].keys())>1
b = []
# loop time values
for t in obj[img].keys():
# object numbers
c = self._objectSep.join(map(str,obj[img][t]))
if len(c)>0:
c = self._itemIndent*indent + "Objects " + c
if needTime:
c = self._itemIndent*indent + "at time {}".format(t) + self._itemSep + self._itemIndent + c
b.append(c)
indent -= 1
if len(b)>0:
a.append(self._itemSep.join([imtext] + b))
except AttributeError:
raise TypeError("bad objects dictionary has wrong format.")
if len(a)>0:
return self._itemSep.join(["The following objects had bad features:"] +a)
else:
return ""
| gpl-3.0 | -8,452,644,986,475,700,000 | 40.772152 | 178 | 0.599865 | false |
direvus/ansible | lib/ansible/modules/cloud/amazon/ecs_cluster.py | 25 | 7340 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ecs_cluster
short_description: create or terminate ecs clusters
notes:
- When deleting a cluster, the information returned is the state of the cluster prior to deletion.
- It will also wait for a cluster to have instances registered to it.
description:
- Creates or terminates ecs clusters.
version_added: "2.0"
author: Mark Chance (@Java1Guy)
requirements: [ boto3 ]
options:
state:
description:
- The desired state of the cluster
required: true
choices: ['present', 'absent', 'has_instances']
name:
description:
- The cluster name
required: true
delay:
description:
- Number of seconds to wait
required: false
repeat:
description:
- The number of times to wait for the cluster to have an instance
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Cluster creation
- ecs_cluster:
name: default
state: present
# Cluster deletion
- ecs_cluster:
name: default
state: absent
- name: Wait for register
ecs_cluster:
name: "{{ new_cluster }}"
state: has_instances
delay: 10
repeat: 10
register: task_output
'''
RETURN = '''
activeServicesCount:
description: how many services are active in this cluster
returned: 0 if a new cluster
type: int
clusterArn:
description: the ARN of the cluster just created
type: string
returned: 0 if a new cluster
sample: arn:aws:ecs:us-west-2:172139249013:cluster/test-cluster-mfshcdok
clusterName:
description: name of the cluster just created (should match the input argument)
type: string
returned: always
sample: test-cluster-mfshcdok
pendingTasksCount:
description: how many tasks are waiting to run in this cluster
returned: 0 if a new cluster
type: int
registeredContainerInstancesCount:
description: how many container instances are available in this cluster
returned: 0 if a new cluster
type: int
runningTasksCount:
description: how many tasks are running in this cluster
returned: 0 if a new cluster
type: int
status:
description: the status of the new cluster
returned: always
type: string
sample: ACTIVE
'''
import time
try:
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
class EcsClusterManager:
"""Handles ECS Clusters"""
def __init__(self, module):
self.module = module
# self.ecs = boto3.client('ecs')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
self.ecs = boto3_conn(module, conn_type='client', resource='ecs',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
def find_in_array(self, array_of_clusters, cluster_name, field_name='clusterArn'):
for c in array_of_clusters:
if c[field_name].endswith(cluster_name):
return c
return None
def describe_cluster(self, cluster_name):
response = self.ecs.describe_clusters(clusters=[
cluster_name
])
if len(response['failures']) > 0:
c = self.find_in_array(response['failures'], cluster_name, 'arn')
if c and c['reason'] == 'MISSING':
return None
# fall thru and look through found ones
if len(response['clusters']) > 0:
c = self.find_in_array(response['clusters'], cluster_name)
if c:
return c
raise Exception("Unknown problem describing cluster %s." % cluster_name)
def create_cluster(self, clusterName='default'):
response = self.ecs.create_cluster(clusterName=clusterName)
return response['cluster']
def delete_cluster(self, clusterName):
return self.ecs.delete_cluster(cluster=clusterName)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent', 'has_instances']),
name=dict(required=True, type='str'),
delay=dict(required=False, type='int', default=10),
repeat=dict(required=False, type='int', default=10)
))
required_together = [['state', 'name']]
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
cluster_mgr = EcsClusterManager(module)
try:
existing = cluster_mgr.describe_cluster(module.params['name'])
except Exception as e:
module.fail_json(msg="Exception describing cluster '" + module.params['name'] + "': " + str(e))
results = dict(changed=False)
if module.params['state'] == 'present':
if existing and 'status' in existing and existing['status'] == "ACTIVE":
results['cluster'] = existing
else:
if not module.check_mode:
# doesn't exist. create it.
results['cluster'] = cluster_mgr.create_cluster(module.params['name'])
results['changed'] = True
# delete the cluster
elif module.params['state'] == 'absent':
if not existing:
pass
else:
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
results['cluster'] = existing
if 'status' in existing and existing['status'] == "INACTIVE":
results['changed'] = False
else:
if not module.check_mode:
cluster_mgr.delete_cluster(module.params['name'])
results['changed'] = True
elif module.params['state'] == 'has_instances':
if not existing:
module.fail_json(msg="Cluster '" + module.params['name'] + " not found.")
return
# it exists, so we should delete it and mark changed.
# return info about the cluster deleted
delay = module.params['delay']
repeat = module.params['repeat']
time.sleep(delay)
count = 0
for i in range(repeat):
existing = cluster_mgr.describe_cluster(module.params['name'])
count = existing['registeredContainerInstancesCount']
if count > 0:
results['changed'] = True
break
time.sleep(delay)
if count == 0 and i is repeat - 1:
module.fail_json(msg="Cluster instance count still zero after " + str(repeat) + " tries of " + str(delay) + " seconds each.")
return
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 | 2,366,873,363,764,501,000 | 31.334802 | 137 | 0.624251 | false |
dbjohnson/spanner | test/test_countdown.py | 1 | 1191 | import unittest
import sys
from cStringIO import StringIO
from spanner import countdown
include = True
class CountdownTestCase(unittest.TestCase):
def setUp(self):
# redirect stdout
self.old_stdout = sys.stdout
self.trapped_stdout = sys.stdout = StringIO()
def tearDown(self):
# reset stdout
sys.stdout = self.old_stdout
def test_normal_function(self):
iterable = range(100)
timer = countdown.timer_for_iterable(iterable)
self.assertTrue(timer.total_iterations == len(iterable))
for i in iterable:
timer.tick()
output = self.trapped_stdout.getvalue()
self.assertTrue('%d loops completed' % len(iterable) in output)
self.assertTrue(timer.iterations_elapsed == len(iterable))
def test_overrun(self):
iterable = range(100)
timer = countdown.timer_for_iterable(iterable)
self.assertTrue(timer.total_iterations == len(iterable))
for i in iterable:
timer.tick()
timer.tick()
output = self.trapped_stdout.getvalue()
sys.stdout = self.old_stdout
self.assertTrue('Timer overrun!' in output)
| mit | -4,540,423,269,309,845,500 | 28.04878 | 71 | 0.641478 | false |
nsmoooose/csp | csp/data/ui/scripts/windows/quitresume.py | 1 | 2828 | #!/usr/bin/python
# Combat Simulator Project
# Copyright (C) 2002-2005 The Combat Simulator Project
# http://csp.sourceforge.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
Combat Simulator Project : Quit and resume window script
"""
import csp.cspsim
from csp.data.ui.scripts.utils import SlotManager
from csp.data.ui.scripts.windows.options import Options
class QuitResume(csp.cspsim.Window, SlotManager):
def __init__(self, cspsim):
csp.cspsim.Window.__init__(self)
SlotManager.__init__(self)
# Install the move window event handler.
self.moveEventHandler = csp.cspsim.ControlMoveEventHandler(self)
self.cspsim = cspsim
# Load the user interface for this window.
serializer = csp.cspsim.Serialization()
serializer.load(self, 'quit_resume.xml')
resumeButton = self.getById('resume')
if resumeButton != None:
self.connectToClickSignal(resumeButton, self.resume_Click)
endSimButton = self.getById('end_simulation')
if endSimButton != None:
self.connectToClickSignal(endSimButton, self.end_simulation_Click)
if self.cspsim.isPaused() == False:
self.cspsim.togglePause()
def resume_Click(self):
if self.cspsim.isPaused():
self.cspsim.togglePause()
topMenu = self.getWindowManager().getById('topMenuWindow')
if topMenu != None:
topMenu.close()
self.close()
def end_simulation_Click(self):
# Start by changing the screen. This makes it possible for us to
# display the main menu and the desktop.
self.getWindowManager().closeAll()
self.cspsim.displayMenuScreen()
# End the simulation by unloading everything.
self.cspsim.unloadSimulation()
# Use the UserInterfaceStartup class to return to the main menu
# and the desktop.
from csp.data.ui.scripts.startup import UserInterfaceStartup
startup = UserInterfaceStartup(self.cspsim) # TODO: the UserInterfaceStartup object created in sim.py still exist !!!
startup.run()
| gpl-2.0 | 9,025,672,013,542,257,000 | 37.216216 | 125 | 0.682107 | false |
ferchaure/bci | Parsers/beta_fpga.py | 1 | 7945 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 30 17:28:58 2015
@author: fernando chaure
"""
from configuration import GENERAL_CONFIG as CONFIG
from multiprocess_config import *
import numpy as np
from configuration import CONFIG_PARSER
from os import path
COMM = {}
for x in CONFIG_PARSER['FORMAT_CONFIG']:
if CONFIG_PARSER['FORMAT_CONFIG'][x].isdigit():
COMM[x] = int(CONFIG_PARSER['FORMAT_CONFIG'][x])
else:
COMM[x] = CONFIG_PARSER['FORMAT_CONFIG'][x]
if not CONFIG['ONLINE_MODE']:
if CONFIG_PARSER.has_key("DATA_INFO"):
INFO = {}
for x in CONFIG_PARSER['DATA_INFO']:
if CONFIG_PARSER['DATA_INFO'][x].isdigit():
INFO[x] = int(CONFIG_PARSER['DATA_INFO'][x])
else:
INFO[x] = CONFIG_PARSER['DATA_INFO'][x]
from configuration import FILE_CONFIG
if path.exists(path.join(CONFIG_PARSER['FOLDER'],CONFIG_PARSER['FORMAT_CONFIG']['ch_order'])): #check if the string is a valid file name
CH_ORD = np.loadtxt(path.join(CONFIG_PARSER['FOLDER'],CONFIG_PARSER['FORMAT_CONFIG']['ch_order']),np.int)-1
else: #else should be a list of ints
CH_ORD = np.fromstring(CONFIG_PARSER['FORMAT_CONFIG']['ch_order'],np.int,sep=' ')-1
class Parser():
"""Une secciones de datos raw y crea la matriz que se pasara al siguiente proceso"""
def __init__(self,send_warnings,logging,data_in):
#define como sera el inicio de la trama en funcion de la cantidad de canales
dicc_aux = {1:'\x23''\xFF', 2:'\x46''\xFF'} #AUXILIAR
self.FFplus = np.fromstring(dicc_aux[COMM['ampcount']], np.int16)
self.logging = logging
self.counter =- 1 #el counter de la trama
self.frames_parsed = 0 #ubicacion en el bloque q se esta creando
self.data = data_in()
self.c_t = 0 #ubicacion en el bloque q se esta leyendo
self.sinc = 0
self.first_read = True
self.send_warnings = send_warnings
self.data_raw = np.ndarray(COMM['l_frame'] * CONFIG['PAQ_USB'] * 2, np.int16) #es el doble de grande que el que sera utilizado normalmente
if not CONFIG['ONLINE_MODE']:
self.open_file = None
self.file_reading = FILE_CONFIG['FORMAT_FILE']
self.unread_frames = 0
self.n_file = 0
def get_raw(self,extra_data):
new_pack_data = (CONFIG['PAQ_USB'] + extra_data) * COMM['l_frame']
return self.data_raw[:new_pack_data]
def online_update(self, data):
"""Recibe datos, los parsea. Si llena una trama fija retorna 0,
si faltan mas datos retorna cuantos, si sobran retorna -1"""
max_c_t = data.size / COMM['l_frame']
#si recien empieza con esos datos, primero sincroniza
if self.c_t == 0 :
self.sinc = 0
while self.sinc < COMM['l_frame']:
if (data[self.sinc] == self.FFplus) and not( reduce(lambda x,y: x^y, data[self.sinc:self.sinc+ COMM['l_frame']])):
#parsea:
self.data.channels[:, self.frames_parsed] = (data[COMM['channels_pos'] + self.sinc:self.sinc+COMM['channels_pos'] + CONFIG['#CHANNELS']]-2**15)[CH_ORD]
self.frames_parsed += 1
counter_old = self.counter
self.counter = (data[COMM['counter_pos'] + self.sinc])
if np.int16(counter_old+1) != self.counter:
self.logging.error(Errors_Messages[COUNTER_ERROR])
try:
self.send_warnings.put_nowait([COUNTER_ERROR,1])
except Queue_Full:
self.logging.error(Errors_Messages[SLOW_GRAPHICS_SIGNAL])
#fin del priner parseo
break
self.sinc += 1
if(self.sinc >0 ):
max_c_t = max_c_t-1
if (self.sinc == COMM['l_frame']):
self.logging.error(Errors_Messages[CANT_SYNCHRONIZE])
try:
self.send_warnings.put_nowait([CANT_SYNCHRONIZE,CONFIG['PAQ_USB']])
except Queue_Full:
self.logging.error(Errors_Messages[SLOW_GRAPHICS_SIGNAL])
self.sinc =0
self.c_t=0 #se concidera analizado y corrupto todo el paquete la proxima se empieza desde cero
return CONFIG['PAQ_USB'] - self.frames_parsed
else:
self.c_t+=1
#recorre hasta llenar tramas parseadas o parsear todo el bloque de datos crudos
while self.frames_parsed < CONFIG['PAQ_USB'] and self.c_t < max_c_t:
init_trama = self.c_t*COMM['l_frame']+self.sinc
if(data[init_trama] != self.FFplus): #desincronizado
#sinc=incronizar(c_t*40+sinc)# esto cambia c_t y s
#print "desincronizacion detectada"
self.logging.error(Errors_Messages[DATA_NONSYNCHRONIZED])
try:
self.send_warnings.put_nowait([DATA_NONSYNCHRONIZED,CONFIG['PAQ_USB']-self.c_t])
except Queue_Full:
self.logging.error(Errors_Messages[SLOW_GRAPHICS_SIGNAL])
self.c_t = max_c_t #se concidera analizado y corrupto todo el paquete la proxima se empieza desde cero
break
#parsea:
self.data.channels[:, self.frames_parsed] = (data[init_trama + COMM['channels_pos']:init_trama + COMM['channels_pos'] + CONFIG['#CHANNELS']]-2**15)[CH_ORD]
#ojo aca se tendria q parsear y guardar el resto de la informacion q viene en la trama
self.frames_parsed += 1
counter_old = self.counter
self.counter = data[init_trama+COMM['counter_pos']]
#comparo counteres aviso
if np.int16(counter_old + 1) != self.counter:
#guardo discontinuidad!!!
#print "perdida de datos segun counter"
self.logging.error(Errors_Messages[COUNTER_ERROR])
try:
self.send_warnings.put_nowait([COUNTER_ERROR,1])
except Queue_Full:
self.logging.error(Errors_Messages[SLOW_GRAPHICS_SIGNAL])
#fin del parseo
self.c_t += 1
if self.frames_parsed == CONFIG['PAQ_USB']:
self.frames_parsed = 0
#retorno cuanto falta parsear del bloque crudo
if self.c_t == max_c_t:
self.c_t = 0
return 0
else:
#me sobra data puedo volver a update
return -1
else:
#retorno cuanto le falta para terminar el bloque de canales
self.c_t = 0
return CONFIG['PAQ_USB'] - self.frames_parsed
def offline_update(self):
if (self.unread_frames == 0) and (self.n_file < INFO['files']):
self.n_file = self.n_file+1
self.open_file = open(FILE_CONFIG['FORMAT_FILE'][:-1]+str(self.n_file))
if self.n_file == INFO['files']:
self.unread_frames = INFO['samples4lastfile']
else:
self.unread_frames = INFO['samples4file']
elif (self.unread_frames == 0) and (self.n_file == INFO['files']):
return 1
if self.unread_frames >= CONFIG['PAQ_USB']:
self.new_data = np.fromfile(self.open_file,np.int16,CONFIG['PAQ_USB']*COMM['l_frame'])
self.unread_frames = self.unread_frames - CONFIG['PAQ_USB']
else:
self.new_data = np.fromfile(self.open_file,np.int16,self.unread_frames*COMM['l_frame'])
self.unread_frames = 0
if self.unread_frames == 0:
self.open_file.close()
return self.online_update(self.new_data)
| mit | 3,970,772,683,720,532,000 | 44.66092 | 171 | 0.555318 | false |
pynag/pynag | tests/test_utils.py | 1 | 18547 | from __future__ import absolute_import
import os
import sys
# Make sure we import from working tree
pynagbase = os.path.dirname(os.path.realpath(__file__ + "/.."))
sys.path.insert(0, pynagbase)
import unittest2 as unittest
from mock import patch
import shutil
import tempfile
import pynag.Utils as utils
import pynag.Model
from pynag.Utils import PynagError
from tests import tests_dir
import pynag.Utils.misc
class testUtils(unittest.TestCase):
def setUp(self):
# Utils should work fine with just about any data, but lets use
# testdata01
os.chdir(tests_dir)
os.chdir('dataset01')
pynag.Model.config = None
pynag.Model.cfg_file = './nagios/nagios.cfg'
pynag.Model.ObjectDefinition.objects.get_all()
self.tmp_dir = tempfile.mkdtemp() # Will be deleted after test runs
os.environ['LANG'] = 'en_US@UTF8'
def tearDown(self):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def testCompareFilterWithGrep(self):
""" test pynag.Utils.grep() by comparing it with pynag.Model.Service.objects.filter()
# TODO: Currently pynag.Model.Service.objects.filter() has some bugs, so some tests here fail.
"""
self._compare_search_expressions(use='generic-service')
self._compare_search_expressions(register=1, use='generic-service')
self._compare_search_expressions(host_name__exists=True)
self._compare_search_expressions(host_name__exists=False)
self._compare_search_expressions(host_name__notcontains='l')
self._compare_search_expressions(host_name__notcontains='this value cannot possibly exist')
self._compare_search_expressions(host_name__startswith='l')
self._compare_search_expressions(host_name__endswith='m')
self._compare_search_expressions(host_name__isnot='examplehost for testing purposes')
def testGrep(self):
""" Test cases based on gradecke's testing """
host = pynag.Model.string_to_class['host']()
host['use'] = "generic-host"
host['name'] = "ABC"
host['_code'] = "ABC"
host['_function'] = "Server,Production"
host2 = pynag.Model.string_to_class['host']()
host2['use'] = "generic-host"
host2['name'] = "XYZ"
host2['_code'] = "XYZ"
host2['_function'] = "Switch,Production"
hosts = host, host2
result = pynag.Utils.grep(hosts, **{'_code__contains': 'ABC'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__contains': 'BC'})
self.assertEqual(1, len(result))
# Check that contains does not match nonexisting values
result = pynag.Utils.grep(hosts, **{'_code__contains': ''})
self.assertEqual(2, len(result))
result = pynag.Utils.grep(hosts, **{'nonexistant__contains': ''})
self.assertEqual(0, len(result))
result = pynag.Utils.grep(hosts, **{'_code__notcontains': 'ABC'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__notcontains': 'BC'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__startswith': 'ABC'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__startswith': 'AB'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__notstartswith': 'AB'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__endswith': 'ABC'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__endswith': 'BC'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__notendswith': 'YZ'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'_code__exists': True})
self.assertEqual(2, len(result))
result = pynag.Utils.grep(hosts, **{'_code__exists': False})
self.assertEqual(0, len(result))
result = pynag.Utils.grep(hosts, **{'_function__has_field': 'Server'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(
hosts, **{'_function__has_field': 'Production'})
self.assertEqual(2, len(result))
result = pynag.Utils.grep(hosts, **{'name__notcontains': 'A'})
self.assertEqual(1, len(result))
result = pynag.Utils.grep(hosts, **{'name__regex': 'A.C'})
self.assertEqual(1, len(result))
self.assertEqual('ABC', result[0].name)
result = pynag.Utils.grep(hosts, **{'name__in': ['ABC', 'BCD']})
self.assertEqual(1, len(result))
self.assertEqual('ABC', result[0].name)
result = pynag.Utils.grep(hosts, **{'name__notin': ['ABC', 'BCD']})
self.assertEqual(1, len(result))
self.assertEqual('XYZ', result[0].name)
result = pynag.Utils.grep(hosts, **{'search': 'Switch'})
self.assertEqual(1, len(result))
self.assertEqual('XYZ', result[0].name)
def _compare_search_expressions(self, **expression):
# print "Testing search expression %s" % expression
all_services = pynag.Model.Service.objects.all
result1 = pynag.Model.Service.objects.filter(**expression)
result2 = pynag.Utils.grep(all_services, **expression)
self.assertEqual(
result1, result2, msg="Search output from pynag.Utils.grep() does not match pynag.Model.Service.objects.filter() when using parameters %s\nFilter: %s\nGrep: %s" %
(expression, result1, result2))
return len(result1)
def test_run_command_file_not_found(self):
command = '/bin/doesnotexist'
expected_msg = '\* Could not run command \(return code= %s\)\n' % 127
expected_msg += '\* Error was:\n.*: %s: (not found|No such file or directory)\n' % command
expected_msg += '\* Command was:\n%s\n' % command
expected_msg += '\* Output was:\n\n'
expected_msg += 'Check if y/our path is correct: %s' % os.getenv(
'PATH')
self.assertRaisesRegexp(
utils.PynagError, expected_msg, utils.runCommand, command, raise_error_on_fail=True)
def test_gitrepo_init_empty(self):
from getpass import getuser
from platform import node
emptyish = [None, '', ' ', '\n ']
for x in emptyish:
repo = utils.GitRepo(
directory=self.tmp_dir,
auto_init=True,
author_name=x,
author_email=x
)
self.assertEquals(repo.author_name, 'Pynag User')
expected_email = '%s@%s' % (getuser(), node())
self.assertEquals(repo.author_email, expected_email)
def test_gitrepo_init_with_author(self):
tempfile.mkstemp(dir=self.tmp_dir)
author_name = 'Git Owner'
author_email = '[email protected]'
repo = utils.GitRepo(
directory=self.tmp_dir,
auto_init=True,
author_name=author_name,
author_email=author_email
)
self.assertEquals(repo.author_name, author_name)
self.assertEquals(repo.author_email, author_email)
self.assertEquals(len(repo.log()), 1)
self.assertEquals(repo.log()[0]['author_name'], author_name)
self.assertEquals(repo.log()[0]['author_email'], author_email)
def test_gitrepo_init_with_files(self):
tempfile.mkstemp(dir=self.tmp_dir)
# If pynag defaults will fail, correctly, adjust for test
author_email = None
from getpass import getuser
from platform import node
nodename = node()
if nodename.endswith('.(none)'):
nodename[:-7] + '.example.com'
author_email = '%s@%s' % (getuser(), nodename)
repo = utils.GitRepo(
directory=self.tmp_dir,
auto_init=True,
author_name=None,
author_email=author_email
)
# Check that there is an initial commit
expected_email = '%s@%s' % (getuser(), nodename)
self.assertEquals(len(repo.log()), 1)
self.assertEquals(repo.log()[0]['comment'], 'Initial Commit')
self.assertEquals(repo.log()[0]['author_name'], 'Pynag User')
self.assertEquals(repo.log()[0]['author_email'], expected_email)
# Test kwargs functionality
self.assertEquals(
repo.log(author_email=expected_email)[0]['author_email'], expected_email)
self.assertEquals(
repo.log(comment__contains='Initial')[0]['comment'], 'Initial Commit')
self.assertEquals(len(repo.log(comment__contains='nothing')), 0)
# Test show method
initial_hash = repo.log()[0]['hash']
initial_hash_valid_commits = repo.get_valid_commits()[0]
self.assertEquals(initial_hash, initial_hash_valid_commits)
gitrunpatcher = patch('pynag.Utils.GitRepo._run_command')
validcommitspatcher = patch('pynag.Utils.GitRepo.get_valid_commits')
gitrunpatch = gitrunpatcher.start()
validcommitspatch = validcommitspatcher.start()
validcommitspatch.return_value = [initial_hash]
repo.show(initial_hash)
gitrunpatch.assert_called_once_with('git show %s' % initial_hash)
gitrunpatcher.stop()
validcommitspatcher.stop()
self.assertRaisesRegexp(
PynagError, '%s is not a valid commit id' % initial_hash)
# Add file
tempfile.mkstemp(dir=self.tmp_dir)
self.assertEquals(len(repo.get_uncommited_files()), 1)
self.assertEquals(repo.is_up_to_date(), False)
# Commit file
repo.commit(filelist=repo.get_uncommited_files()[0]['filename'])
self.assertEquals(repo.is_up_to_date(), True)
self.assertEquals(len(repo.get_uncommited_files()), 0)
self.assertEquals(len(repo.get_valid_commits()), 2)
log_entry = repo.log()[0]
self.assertEquals(log_entry['comment'], 'commited by pynag')
def test_gitrepo_deprecated_methods(self):
"""
Delete this class as deprecated methods are removed.
"""
repo = utils.GitRepo(directory=self.tmp_dir, auto_init=True)
testfilename = 'testfile.name.txt'
add_method_patcher = patch('pynag.Utils.GitRepo.add')
add_method_patch = add_method_patcher.start()
repo._git_add(testfilename)
add_method_patch.assert_called_once_with(testfilename)
add_method_patcher.stop()
commit_method_mocker = patch('pynag.Utils.GitRepo.commit')
commit_method_mock = commit_method_mocker.start()
repo._git_commit(filename=testfilename, message='test')
commit_method_mock.assert_called_once_with(
message='test', filelist=[testfilename])
commit_method_mock.reset_mock()
repo._git_commit(
filename=None, message='test', filelist=[testfilename])
commit_method_mock.assert_called_once_with(
message='test', filelist=[testfilename])
commit_method_mock.reset_mock()
repo._git_commit(
filename=testfilename, message='test', filelist=[testfilename])
commit_method_mock.assert_called_once_with(
message='test', filelist=[testfilename, testfilename])
commit_method_mocker.stop()
def test_gitrepo_diff(self):
""" Test git diff works as expected """
# Create repo and write one test commit
git = utils.GitRepo(directory=self.tmp_dir, auto_init=True)
tmp_filename = "%s/%s" % (self.tmp_dir, 'testfile.txt')
open(tmp_filename, 'w').write('test data\n')
git.commit()
# First try diff with no changes made:
diff = git.diff()
self.assertEquals(diff, '')
# Now append to our file and see the difference:
extra_data = 'extra data\n'
open(tmp_filename, 'a').write(extra_data)
# Call diff with no params, check if extra_data is in the diff
diff = git.diff()
self.assertTrue(diff.find(extra_data) > 0)
# Call diff with filename as parameter, check if extra_data is in the
# diff
diff = git.diff(commit_id_or_filename=tmp_filename)
self.assertTrue(diff.find(extra_data) > 0)
# Call commit again and confirm there is no diff
git.commit()
diff = git.diff()
self.assertEquals(diff, '')
# Call a diff against first commit, see if we find our changes in the
# commit.
all_commits = git.get_valid_commits()
first_commit = all_commits.pop()
diff = git.diff(commit_id_or_filename=first_commit)
self.assertTrue(diff.find(extra_data) > 0)
# Revert latest change, and make sure diff is gone.
last_commit = all_commits.pop(0)
git.revert(last_commit)
diff = git.diff(commit_id_or_filename=first_commit)
self.assertTrue(diff.find(extra_data) == -1)
# At last try to diff against an invalid commit id
try:
git.diff('invalid commit id')
self.assertTrue(
False, "we wanted exception when calling diff on invalid commit id")
except PynagError:
pass
def test_send_nsca(self):
""" test pynag.Utils.send_nsca
By its very nature, send_nsca binary itself does not allow for much testing,
however we can still test if the function is working as expected
"""
# Run send_nsca normally for a smoke test (we don't know much about what send_nsca will do with out packet)
# This test will only fail if there are unhandled tracebacks in the
# code somewhere
try:
pynag.Utils.send_nsca(code=0, message="test", nscahost="localhost")
except OSError as e:
# We don't care about the result if we have error because send_nsca
# is not installed
if e.errno != 2:
raise e
result = pynag.Utils.send_nsca(
code=0, message="match", nscahost="match", hostname="test", service=None, nscabin="/bin/grep", nscaconf="-")
self.assertEqual(0, result[0])
self.assertEqual('(standard input):1\n', result[1])
result = pynag.Utils.send_nsca(
code=0, message="match", nscahost="nomatch", hostname="test", service=None, nscabin="/bin/grep", nscaconf="-")
self.assertEqual(1, result[0])
self.assertEqual('(standard input):0\n', result[1])
class testFakeNagiosEnvironment(unittest.TestCase):
def setUp(self):
self.environment = pynag.Utils.misc.FakeNagiosEnvironment()
self.environment.create_minimal_environment()
def tearDown(self):
self.environment.terminate()
def testMinimal(self):
""" Minimal Test of our FakeNagiosEnvironment """
nagios = pynag.Utils.misc.FakeNagiosEnvironment()
nagios.create_minimal_environment()
nagios.config.parse()
self.assertTrue(os.path.isfile(nagios.config.cfg_file))
self.assertTrue(os.path.isdir(nagios.objects_dir))
def testModelUpdates(self):
""" Test backup and restores of Model global variables """
nagios = self.environment
original_config = pynag.Model.config
original_cfg_file = pynag.Model.cfg_file
original_dir = pynag.Model.pynag_directory
# Update model, and check if updates succeeded
nagios.update_model()
self.assertEqual(pynag.Model.config, nagios.config)
self.assertEqual(pynag.Model.cfg_file, nagios.config.cfg_file)
self.assertEqual(pynag.Model.pynag_directory, nagios.objects_dir)
# See if we can restore our model
nagios.restore_model()
self.assertEqual(pynag.Model.config, original_config)
self.assertEqual(pynag.Model.cfg_file, original_cfg_file)
self.assertEqual(pynag.Model.pynag_directory, original_dir)
def testStartStop(self):
""" Try to start and stop our nagios environment """
self.environment.start()
pid = open(os.path.join(self.environment.tempdir, "nagios.pid")).read()
pid = int(pid)
try:
os.kill(pid, 0)
except OSError:
self.assertTrue(False, "Did not find a running process with process_id=%s" % pid)
self.environment.stop()
try:
os.kill(pid, 0)
self.assertTrue(False, "Seems like process with process_id=%s is still running" % pid)
except OSError:
pass
def testOpenDecorator(self):
""" Makes sure the fake nagios environment cannot go outside its directory """
# Try to open a regular file
self.environment.config.open(self.environment.config.cfg_file).close()
self.assertTrue(True, "Successfully opened nagios.cfg")
try:
self.environment.config.open("/etc/passwd").close()
self.assertTrue(False, "I was able to open a file outside my tempdir!")
except PynagError:
pass
def testUpdateModel_NoRestore(self):
self.environment.update_model()
def testLivestatus(self):
host_name = "localhost"
self.environment.update_model()
pynag.Model.Host(host_name=host_name, use="generic-host").save()
self.environment.guess_livestatus_path()
self.environment.configure_livestatus()
self.environment.start()
livestatus = self.environment.get_livestatus()
hosts = livestatus.get_hosts(name=host_name)
self.assertTrue(hosts, "Could not find a host called %s" % (host_name))
def testImports(self):
""" Test FakeNagiosEnvironment.import_config() """
host1 = "host1"
host2 = "host2"
tempdir = tempfile.mkdtemp()
tempfile1 = tempfile.mktemp(suffix='.cfg')
tempfile2 = os.path.join(tempdir, 'file2.cfg')
with open(tempfile1, 'w') as f:
f.write("define host {\nname host1\n}")
with open(tempfile2, 'w') as f:
f.write("define host {\nname host2\n}")
self.environment.import_config(tempdir)
self.environment.import_config(tempfile1)
self.environment.update_model()
host1 = pynag.Model.Host.objects.filter(name=host1)
host2 = pynag.Model.Host.objects.filter(name=host2)
self.assertTrue(host1)
self.assertTrue(host2)
if __name__ == "__main__":
unittest.main()
# vim: sts=4 expandtab autoindent
| gpl-2.0 | -4,237,721,732,372,206,000 | 38.715203 | 174 | 0.618429 | false |
IveWong/jasmine | setup.py | 56 | 1974 | from setuptools import setup, find_packages, os
import json
with open('package.json') as packageFile:
version = json.load(packageFile)['version']
setup(
name="jasmine-core",
version=version,
url="http://jasmine.github.io",
author="Pivotal Labs",
author_email="[email protected]",
description=('Jasmine is a Behavior Driven Development testing framework for JavaScript. It does not rely on '+
'browsers, DOM, or any JavaScript framework. Thus it\'s suited for websites, '+
'Node.js (http://nodejs.org) projects, or anywhere that JavaScript can run.'),
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
],
packages=['jasmine_core', 'jasmine_core.images'],
package_dir={'jasmine_core': 'lib/jasmine-core', 'jasmine_core.images': 'images'},
package_data={'jasmine_core': ['*.js', '*.css'], 'jasmine_core.images': ['*.png']},
include_package_data=True,
install_requires=['glob2>=0.4.1', 'ordereddict==1.1']
)
| mit | -2,955,940,814,518,186,500 | 41 | 115 | 0.618541 | false |
semonte/intellij-community | plugins/hg4idea/testData/bin/mercurial/repo.py | 88 | 1379 | # repo.py - repository base classes for mercurial
#
# Copyright 2005, 2006 Matt Mackall <[email protected]>
# Copyright 2006 Vadim Gelfer <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
import error
class repository(object):
def capable(self, name):
'''tell whether repo supports named capability.
return False if not supported.
if boolean capability, return True.
if string capability, return string.'''
if name in self.capabilities:
return True
name_eq = name + '='
for cap in self.capabilities:
if cap.startswith(name_eq):
return cap[len(name_eq):]
return False
def requirecap(self, name, purpose):
'''raise an exception if the given capability is not present'''
if not self.capable(name):
raise error.CapabilityError(
_('cannot %s; remote repository does not '
'support the %r capability') % (purpose, name))
def local(self):
return False
def cancopy(self):
return self.local()
def rjoin(self, path):
url = self.url()
if url.endswith('/'):
return url + path
else:
return url + '/' + path
| apache-2.0 | -534,216,216,974,158,300 | 30.340909 | 73 | 0.607687 | false |
angelapper/edx-platform | common/djangoapps/track/tests/test_shim.py | 24 | 10533 | """Ensure emitted events contain the fields legacy processors expect to find."""
from collections import namedtuple
import ddt
from django.test.utils import override_settings
from mock import sentinel
from openedx.core.lib.tests.assertions.events import assert_events_equal
from . import FROZEN_TIME, EventTrackingTestCase
from .. import transformers
from ..shim import PrefixedEventProcessor
LEGACY_SHIM_PROCESSOR = [
{
'ENGINE': 'track.shim.LegacyFieldMappingProcessor'
}
]
GOOGLE_ANALYTICS_PROCESSOR = [
{
'ENGINE': 'track.shim.GoogleAnalyticsProcessor'
}
]
@override_settings(
EVENT_TRACKING_PROCESSORS=LEGACY_SHIM_PROCESSOR,
)
class LegacyFieldMappingProcessorTestCase(EventTrackingTestCase):
"""Ensure emitted events contain the fields legacy processors expect to find."""
def test_event_field_mapping(self):
data = {sentinel.key: sentinel.value}
context = {
'accept_language': sentinel.accept_language,
'referer': sentinel.referer,
'username': sentinel.username,
'session': sentinel.session,
'ip': sentinel.ip,
'host': sentinel.host,
'agent': sentinel.agent,
'path': sentinel.path,
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
emitted_event = self.get_event()
expected_event = {
'accept_language': sentinel.accept_language,
'referer': sentinel.referer,
'event_type': sentinel.name,
'name': sentinel.name,
'context': {
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'path': sentinel.path,
},
'event': data,
'username': sentinel.username,
'event_source': 'server',
'time': FROZEN_TIME,
'agent': sentinel.agent,
'host': sentinel.host,
'ip': sentinel.ip,
'page': None,
'session': sentinel.session,
}
assert_events_equal(expected_event, emitted_event)
def test_missing_fields(self):
self.tracker.emit(sentinel.name)
emitted_event = self.get_event()
expected_event = {
'accept_language': '',
'referer': '',
'event_type': sentinel.name,
'name': sentinel.name,
'context': {},
'event': {},
'username': '',
'event_source': 'server',
'time': FROZEN_TIME,
'agent': '',
'host': '',
'ip': '',
'page': None,
'session': '',
}
assert_events_equal(expected_event, emitted_event)
@override_settings(
EVENT_TRACKING_PROCESSORS=GOOGLE_ANALYTICS_PROCESSOR,
)
class GoogleAnalyticsProcessorTestCase(EventTrackingTestCase):
"""Ensure emitted events contain the fields necessary for Google Analytics."""
def test_event_fields(self):
""" Test that course_id is added as the label if present, and nonInteraction is set. """
data = {sentinel.key: sentinel.value}
context = {
'path': sentinel.path,
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
emitted_event = self.get_event()
expected_event = {
'context': context,
'data': data,
'label': sentinel.course_id,
'name': sentinel.name,
'nonInteraction': 1,
'timestamp': FROZEN_TIME,
}
assert_events_equal(expected_event, emitted_event)
def test_no_course_id(self):
""" Test that a label is not added if course_id is not specified, but nonInteraction is still set. """
data = {sentinel.key: sentinel.value}
context = {
'path': sentinel.path,
'user_id': sentinel.user_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
emitted_event = self.get_event()
expected_event = {
'context': context,
'data': data,
'name': sentinel.name,
'nonInteraction': 1,
'timestamp': FROZEN_TIME,
}
assert_events_equal(expected_event, emitted_event)
@override_settings(
EVENT_TRACKING_BACKENDS={
'0': {
'ENGINE': 'eventtracking.backends.routing.RoutingBackend',
'OPTIONS': {
'backends': {
'first': {'ENGINE': 'track.tests.InMemoryBackend'}
},
'processors': [
{
'ENGINE': 'track.shim.GoogleAnalyticsProcessor'
}
]
}
},
'1': {
'ENGINE': 'eventtracking.backends.routing.RoutingBackend',
'OPTIONS': {
'backends': {
'second': {
'ENGINE': 'track.tests.InMemoryBackend'
}
}
}
}
}
)
class MultipleShimGoogleAnalyticsProcessorTestCase(EventTrackingTestCase):
"""Ensure changes don't impact other backends"""
def test_multiple_backends(self):
data = {
sentinel.key: sentinel.value,
}
context = {
'path': sentinel.path,
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
segment_emitted_event = self.tracker.backends['0'].backends['first'].events[0]
log_emitted_event = self.tracker.backends['1'].backends['second'].events[0]
expected_event = {
'context': context,
'data': data,
'label': sentinel.course_id,
'name': sentinel.name,
'nonInteraction': 1,
'timestamp': FROZEN_TIME,
}
assert_events_equal(expected_event, segment_emitted_event)
expected_event = {
'context': context,
'data': data,
'name': sentinel.name,
'timestamp': FROZEN_TIME,
}
assert_events_equal(expected_event, log_emitted_event)
SequenceDDT = namedtuple('SequenceDDT', ['action', 'tab_count', 'current_tab', 'legacy_event_type'])
@ddt.ddt
class EventTransformerRegistryTestCase(EventTrackingTestCase):
"""
Test the behavior of the event registry
"""
def setUp(self):
super(EventTransformerRegistryTestCase, self).setUp()
self.registry = transformers.EventTransformerRegistry()
@ddt.data(
('edx.ui.lms.sequence.next_selected', transformers.NextSelectedEventTransformer),
('edx.ui.lms.sequence.previous_selected', transformers.PreviousSelectedEventTransformer),
('edx.ui.lms.sequence.tab_selected', transformers.SequenceTabSelectedEventTransformer),
('edx.video.foo.bar', transformers.VideoEventTransformer),
)
@ddt.unpack
def test_event_registry_dispatch(self, event_name, expected_transformer):
event = {'name': event_name}
transformer = self.registry.create_transformer(event)
self.assertIsInstance(transformer, expected_transformer)
@ddt.data(
'edx.ui.lms.sequence.next_selected.what',
'edx',
'unregistered_event',
)
def test_dispatch_to_nonexistent_events(self, event_name):
event = {'name': event_name}
with self.assertRaises(KeyError):
self.registry.create_transformer(event)
@ddt.ddt
class PrefixedEventProcessorTestCase(EventTrackingTestCase):
"""
Test PrefixedEventProcessor
"""
@ddt.data(
SequenceDDT(action=u'next', tab_count=5, current_tab=3, legacy_event_type=u'seq_next'),
SequenceDDT(action=u'next', tab_count=5, current_tab=5, legacy_event_type=None),
SequenceDDT(action=u'previous', tab_count=5, current_tab=3, legacy_event_type=u'seq_prev'),
SequenceDDT(action=u'previous', tab_count=5, current_tab=1, legacy_event_type=None),
)
def test_sequence_linear_navigation(self, sequence_ddt):
event_name = u'edx.ui.lms.sequence.{}_selected'.format(sequence_ddt.action)
event = {
u'name': event_name,
u'event': {
u'current_tab': sequence_ddt.current_tab,
u'tab_count': sequence_ddt.tab_count,
u'id': u'ABCDEFG',
}
}
process_event_shim = PrefixedEventProcessor()
result = process_event_shim(event)
# Legacy fields get added when needed
if sequence_ddt.action == u'next':
offset = 1
else:
offset = -1
if sequence_ddt.legacy_event_type:
self.assertEqual(result[u'event_type'], sequence_ddt.legacy_event_type)
self.assertEqual(result[u'event'][u'old'], sequence_ddt.current_tab)
self.assertEqual(result[u'event'][u'new'], sequence_ddt.current_tab + offset)
else:
self.assertNotIn(u'event_type', result)
self.assertNotIn(u'old', result[u'event'])
self.assertNotIn(u'new', result[u'event'])
def test_sequence_tab_navigation(self):
event_name = u'edx.ui.lms.sequence.tab_selected'
event = {
u'name': event_name,
u'event': {
u'current_tab': 2,
u'target_tab': 5,
u'tab_count': 9,
u'id': u'block-v1:abc',
u'widget_placement': u'top',
}
}
process_event_shim = PrefixedEventProcessor()
result = process_event_shim(event)
self.assertEqual(result[u'event_type'], u'seq_goto')
self.assertEqual(result[u'event'][u'old'], 2)
self.assertEqual(result[u'event'][u'new'], 5)
| agpl-3.0 | 7,864,666,722,493,166,000 | 31.915625 | 110 | 0.565461 | false |
m-urban/beets | test/test_mpdstats.py | 24 | 1572 | # This file is part of beets.
# Copyright 2015
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from mock import Mock
from test._common import unittest
from test.helper import TestHelper
from beets.library import Item
from beetsplug.mpdstats import MPDStats
class MPDStatsTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.load_plugins('mpdstats')
def tearDown(self):
self.teardown_beets()
self.unload_plugins()
def test_update_rating(self):
item = Item(title='title', path='', id=1)
item.add(self.lib)
log = Mock()
mpdstats = MPDStats(self.lib, log)
self.assertFalse(mpdstats.update_rating(item, True))
self.assertFalse(mpdstats.update_rating(None, True))
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
| mit | -2,219,937,740,855,212,800 | 29.823529 | 71 | 0.702926 | false |
nysan/yocto-autobuilder | lib/python2.6/site-packages/SQLAlchemy-0.7.1-py2.6-linux-x86_64.egg/sqlalchemy/dialects/postgresql/base.py | 8 | 53160 | # postgresql/base.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the PostgreSQL database.
For information on connecting using specific drivers, see the documentation
section regarding that driver.
Sequences/SERIAL
----------------
PostgreSQL supports sequences, and SQLAlchemy uses these as the default means
of creating new primary key values for integer-based primary key columns. When
creating tables, SQLAlchemy will issue the ``SERIAL`` datatype for
integer-based primary key columns, which generates a sequence and server side
default corresponding to the column.
To specify a specific named sequence to be used for primary key generation,
use the :func:`~sqlalchemy.schema.Sequence` construct::
Table('sometable', metadata,
Column('id', Integer, Sequence('some_id_seq'), primary_key=True)
)
When SQLAlchemy issues a single INSERT statement, to fulfill the contract of
having the "last insert identifier" available, a RETURNING clause is added to
the INSERT statement which specifies the primary key columns should be
returned after the statement completes. The RETURNING functionality only takes
place if Postgresql 8.2 or later is in use. As a fallback approach, the
sequence, whether specified explicitly or implicitly via ``SERIAL``, is
executed independently beforehand, the returned value to be used in the
subsequent insert. Note that when an
:func:`~sqlalchemy.sql.expression.insert()` construct is executed using
"executemany" semantics, the "last inserted identifier" functionality does not
apply; no RETURNING clause is emitted nor is the sequence pre-executed in this
case.
To force the usage of RETURNING by default off, specify the flag
``implicit_returning=False`` to :func:`.create_engine`.
Transaction Isolation Level
---------------------------
:func:`.create_engine` accepts an ``isolation_level`` parameter which results
in the command ``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL
<level>`` being invoked for every new connection. Valid values for this
parameter are ``READ_COMMITTED``, ``READ_UNCOMMITTED``, ``REPEATABLE_READ``,
and ``SERIALIZABLE``. Note that the psycopg2 dialect does *not* use this
technique and uses psycopg2-specific APIs (see that dialect for details).
INSERT/UPDATE...RETURNING
-------------------------
The dialect supports PG 8.2's ``INSERT..RETURNING``, ``UPDATE..RETURNING`` and
``DELETE..RETURNING`` syntaxes. ``INSERT..RETURNING`` is used by default
for single-row INSERT statements in order to fetch newly generated
primary key identifiers. To specify an explicit ``RETURNING`` clause,
use the :meth:`._UpdateBase.returning` method on a per-statement basis::
# INSERT..RETURNING
result = table.insert().returning(table.c.col1, table.c.col2).\\
values(name='foo')
print result.fetchall()
# UPDATE..RETURNING
result = table.update().returning(table.c.col1, table.c.col2).\\
where(table.c.name=='foo').values(name='bar')
print result.fetchall()
# DELETE..RETURNING
result = table.delete().returning(table.c.col1, table.c.col2).\\
where(table.c.name=='foo')
print result.fetchall()
Indexes
-------
PostgreSQL supports partial indexes. To create them pass a postgresql_where
option to the Index constructor::
Index('my_index', my_table.c.id, postgresql_where=tbl.c.value > 10)
"""
import re
from sqlalchemy import sql, schema, exc, util
from sqlalchemy.engine import default, reflection
from sqlalchemy.sql import compiler, expression, util as sql_util
from sqlalchemy import types as sqltypes
try:
from uuid import UUID as _python_UUID
except ImportError:
_python_UUID = None
from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, VARCHAR, \
CHAR, TEXT, FLOAT, NUMERIC, \
DATE, BOOLEAN, REAL
RESERVED_WORDS = set(
["all", "analyse", "analyze", "and", "any", "array", "as", "asc",
"asymmetric", "both", "case", "cast", "check", "collate", "column",
"constraint", "create", "current_catalog", "current_date",
"current_role", "current_time", "current_timestamp", "current_user",
"default", "deferrable", "desc", "distinct", "do", "else", "end",
"except", "false", "fetch", "for", "foreign", "from", "grant", "group",
"having", "in", "initially", "intersect", "into", "leading", "limit",
"localtime", "localtimestamp", "new", "not", "null", "off", "offset",
"old", "on", "only", "or", "order", "placing", "primary", "references",
"returning", "select", "session_user", "some", "symmetric", "table",
"then", "to", "trailing", "true", "union", "unique", "user", "using",
"variadic", "when", "where", "window", "with", "authorization",
"between", "binary", "cross", "current_schema", "freeze", "full",
"ilike", "inner", "is", "isnull", "join", "left", "like", "natural",
"notnull", "outer", "over", "overlaps", "right", "similar", "verbose"
])
_DECIMAL_TYPES = (1231, 1700)
_FLOAT_TYPES = (700, 701, 1021, 1022)
_INT_TYPES = (20, 21, 23, 26, 1005, 1007, 1016)
class BYTEA(sqltypes.LargeBinary):
__visit_name__ = 'BYTEA'
class DOUBLE_PRECISION(sqltypes.Float):
__visit_name__ = 'DOUBLE_PRECISION'
class INET(sqltypes.TypeEngine):
__visit_name__ = "INET"
PGInet = INET
class CIDR(sqltypes.TypeEngine):
__visit_name__ = "CIDR"
PGCidr = CIDR
class MACADDR(sqltypes.TypeEngine):
__visit_name__ = "MACADDR"
PGMacAddr = MACADDR
class TIMESTAMP(sqltypes.TIMESTAMP):
def __init__(self, timezone=False, precision=None):
super(TIMESTAMP, self).__init__(timezone=timezone)
self.precision = precision
class TIME(sqltypes.TIME):
def __init__(self, timezone=False, precision=None):
super(TIME, self).__init__(timezone=timezone)
self.precision = precision
class INTERVAL(sqltypes.TypeEngine):
"""Postgresql INTERVAL type.
The INTERVAL type may not be supported on all DBAPIs.
It is known to work on psycopg2 and not pg8000 or zxjdbc.
"""
__visit_name__ = 'INTERVAL'
def __init__(self, precision=None):
self.precision = precision
@classmethod
def _adapt_from_generic_interval(cls, interval):
return INTERVAL(precision=interval.second_precision)
@property
def _type_affinity(self):
return sqltypes.Interval
PGInterval = INTERVAL
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
def __init__(self, length=None, varying=False):
if not varying:
# BIT without VARYING defaults to length 1
self.length = length or 1
else:
# but BIT VARYING can be unlimited-length, so no default
self.length = length
self.varying = varying
PGBit = BIT
class UUID(sqltypes.TypeEngine):
"""Postgresql UUID type.
Represents the UUID column type, interpreting
data either as natively returned by the DBAPI
or as Python uuid objects.
The UUID type may not be supported on all DBAPIs.
It is known to work on psycopg2 and not pg8000.
"""
__visit_name__ = 'UUID'
def __init__(self, as_uuid=False):
"""Construct a UUID type.
:param as_uuid=False: if True, values will be interpreted
as Python uuid objects, converting to/from string via the
DBAPI.
"""
if as_uuid and _python_UUID is None:
raise NotImplementedError(
"This version of Python does not support the native UUID type."
)
self.as_uuid = as_uuid
def bind_processor(self, dialect):
if self.as_uuid:
def process(value):
if value is not None:
value = str(value)
return value
return process
else:
return None
def result_processor(self, dialect, coltype):
if self.as_uuid:
def process(value):
if value is not None:
value = _python_UUID(value)
return value
return process
else:
return None
PGUuid = UUID
class ARRAY(sqltypes.MutableType, sqltypes.Concatenable, sqltypes.TypeEngine):
"""Postgresql ARRAY type.
Represents values as Python lists.
The ARRAY type may not be supported on all DBAPIs.
It is known to work on psycopg2 and not pg8000.
"""
__visit_name__ = 'ARRAY'
def __init__(self, item_type, mutable=False, as_tuple=False):
"""Construct an ARRAY.
E.g.::
Column('myarray', ARRAY(Integer))
Arguments are:
:param item_type: The data type of items of this array. Note that
dimensionality is irrelevant here, so multi-dimensional arrays like
``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
``ARRAY(ARRAY(Integer))`` or such. The type mapping figures out on
the fly
:param mutable=False: Specify whether lists passed to this
class should be considered mutable - this enables
"mutable types" mode in the ORM. Be sure to read the
notes for :class:`.MutableType` regarding ORM
performance implications (default changed from ``True`` in
0.7.0).
.. note:: This functionality is now superseded by the
``sqlalchemy.ext.mutable`` extension described in
:ref:`mutable_toplevel`.
:param as_tuple=False: Specify whether return results
should be converted to tuples from lists. DBAPIs such
as psycopg2 return lists by default. When tuples are
returned, the results are hashable. This flag can only
be set to ``True`` when ``mutable`` is set to
``False``. (new in 0.6.5)
"""
if isinstance(item_type, ARRAY):
raise ValueError("Do not nest ARRAY types; ARRAY(basetype) "
"handles multi-dimensional arrays of basetype")
if isinstance(item_type, type):
item_type = item_type()
self.item_type = item_type
self.mutable = mutable
if mutable and as_tuple:
raise exc.ArgumentError(
"mutable must be set to False if as_tuple is True."
)
self.as_tuple = as_tuple
def copy_value(self, value):
if value is None:
return None
elif self.mutable:
return list(value)
else:
return value
def compare_values(self, x, y):
return x == y
def is_mutable(self):
return self.mutable
def bind_processor(self, dialect):
item_proc = self.item_type.dialect_impl(dialect).bind_processor(dialect)
if item_proc:
def convert_item(item):
if isinstance(item, (list, tuple)):
return [convert_item(child) for child in item]
else:
return item_proc(item)
else:
def convert_item(item):
if isinstance(item, (list, tuple)):
return [convert_item(child) for child in item]
else:
return item
def process(value):
if value is None:
return value
return [convert_item(item) for item in value]
return process
def result_processor(self, dialect, coltype):
item_proc = self.item_type.dialect_impl(dialect).result_processor(dialect, coltype)
if item_proc:
def convert_item(item):
if isinstance(item, list):
r = [convert_item(child) for child in item]
if self.as_tuple:
r = tuple(r)
return r
else:
return item_proc(item)
else:
def convert_item(item):
if isinstance(item, list):
r = [convert_item(child) for child in item]
if self.as_tuple:
r = tuple(r)
return r
else:
return item
def process(value):
if value is None:
return value
r = [convert_item(item) for item in value]
if self.as_tuple:
r = tuple(r)
return r
return process
PGArray = ARRAY
class ENUM(sqltypes.Enum):
def create(self, bind=None, checkfirst=True):
if not bind.dialect.supports_native_enum:
return
if not checkfirst or \
not bind.dialect.has_type(bind, self.name, schema=self.schema):
bind.execute(CreateEnumType(self))
def drop(self, bind=None, checkfirst=True):
if not bind.dialect.supports_native_enum:
return
if not checkfirst or \
bind.dialect.has_type(bind, self.name, schema=self.schema):
bind.execute(DropEnumType(self))
def _on_table_create(self, event, target, bind, **kw):
self.create(bind=bind, checkfirst=True)
def _on_metadata_create(self, event, target, bind, **kw):
if self.metadata is not None:
self.create(bind=bind, checkfirst=True)
def _on_metadata_drop(self, event, target, bind, **kw):
self.drop(bind=bind, checkfirst=True)
colspecs = {
sqltypes.Interval:INTERVAL,
sqltypes.Enum:ENUM,
}
ischema_names = {
'integer' : INTEGER,
'bigint' : BIGINT,
'smallint' : SMALLINT,
'character varying' : VARCHAR,
'character' : CHAR,
'"char"' : sqltypes.String,
'name' : sqltypes.String,
'text' : TEXT,
'numeric' : NUMERIC,
'float' : FLOAT,
'real' : REAL,
'inet': INET,
'cidr': CIDR,
'uuid': UUID,
'bit': BIT,
'bit varying': BIT,
'macaddr': MACADDR,
'double precision' : DOUBLE_PRECISION,
'timestamp' : TIMESTAMP,
'timestamp with time zone' : TIMESTAMP,
'timestamp without time zone' : TIMESTAMP,
'time with time zone' : TIME,
'time without time zone' : TIME,
'date' : DATE,
'time': TIME,
'bytea' : BYTEA,
'boolean' : BOOLEAN,
'interval':INTERVAL,
'interval year to month':INTERVAL,
'interval day to second':INTERVAL,
}
class PGCompiler(compiler.SQLCompiler):
def visit_match_op(self, binary, **kw):
return "%s @@ to_tsquery(%s)" % (
self.process(binary.left),
self.process(binary.right))
def visit_ilike_op(self, binary, **kw):
escape = binary.modifiers.get("escape", None)
return '%s ILIKE %s' % \
(self.process(binary.left), self.process(binary.right)) \
+ (escape and
(' ESCAPE ' + self.render_literal_value(escape, None))
or '')
def visit_notilike_op(self, binary, **kw):
escape = binary.modifiers.get("escape", None)
return '%s NOT ILIKE %s' % \
(self.process(binary.left), self.process(binary.right)) \
+ (escape and
(' ESCAPE ' + self.render_literal_value(escape, None))
or '')
def render_literal_value(self, value, type_):
value = super(PGCompiler, self).render_literal_value(value, type_)
# TODO: need to inspect "standard_conforming_strings"
if self.dialect._backslash_escapes:
value = value.replace('\\', '\\\\')
return value
def visit_sequence(self, seq):
return "nextval('%s')" % self.preparer.format_sequence(seq)
def limit_clause(self, select):
text = ""
if select._limit is not None:
text += " \n LIMIT " + self.process(sql.literal(select._limit))
if select._offset is not None:
if select._limit is None:
text += " \n LIMIT ALL"
text += " OFFSET " + self.process(sql.literal(select._offset))
return text
def get_select_precolumns(self, select):
if select._distinct is not False:
if select._distinct is True:
return "DISTINCT "
elif isinstance(select._distinct, (list, tuple)):
return "DISTINCT ON (" + ', '.join(
[self.process(col) for col in select._distinct]
)+ ") "
else:
return "DISTINCT ON (" + self.process(select._distinct) + ") "
else:
return ""
def for_update_clause(self, select):
if select.for_update == 'nowait':
return " FOR UPDATE NOWAIT"
else:
return super(PGCompiler, self).for_update_clause(select)
def returning_clause(self, stmt, returning_cols):
columns = [
self.process(
self.label_select_column(None, c, asfrom=False),
within_columns_clause=True,
result_map=self.result_map)
for c in expression._select_iterables(returning_cols)
]
return 'RETURNING ' + ', '.join(columns)
def visit_extract(self, extract, **kwargs):
field = self.extract_map.get(extract.field, extract.field)
if extract.expr.type:
affinity = extract.expr.type._type_affinity
else:
affinity = None
casts = {
sqltypes.Date:'date',
sqltypes.DateTime:'timestamp',
sqltypes.Interval:'interval', sqltypes.Time:'time'
}
cast = casts.get(affinity, None)
if isinstance(extract.expr, sql.ColumnElement) and cast is not None:
expr = extract.expr.op('::')(sql.literal_column(cast))
else:
expr = extract.expr
return "EXTRACT(%s FROM %s)" % (
field, self.process(expr))
class PGDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column)
impl_type = column.type.dialect_impl(self.dialect)
if column.primary_key and \
column is column.table._autoincrement_column and \
not isinstance(impl_type, sqltypes.SmallInteger) and \
(
column.default is None or
(
isinstance(column.default, schema.Sequence) and
column.default.optional
)
):
if isinstance(impl_type, sqltypes.BigInteger):
colspec += " BIGSERIAL"
else:
colspec += " SERIAL"
else:
colspec += " " + self.dialect.type_compiler.process(column.type)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def visit_create_enum_type(self, create):
type_ = create.element
return "CREATE TYPE %s AS ENUM (%s)" % (
self.preparer.format_type(type_),
",".join("'%s'" % e for e in type_.enums)
)
def visit_drop_enum_type(self, drop):
type_ = drop.element
return "DROP TYPE %s" % (
self.preparer.format_type(type_)
)
def visit_create_index(self, create):
preparer = self.preparer
index = create.element
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s (%s)" \
% (preparer.quote(
self._index_identifier(index.name), index.quote),
preparer.format_table(index.table),
', '.join([preparer.format_column(c)
for c in index.columns]))
if "postgres_where" in index.kwargs:
whereclause = index.kwargs['postgres_where']
util.warn_deprecated(
"The 'postgres_where' argument has been renamed "
"to 'postgresql_where'.")
elif 'postgresql_where' in index.kwargs:
whereclause = index.kwargs['postgresql_where']
else:
whereclause = None
if whereclause is not None:
whereclause = sql_util.expression_as_ddl(whereclause)
where_compiled = self.sql_compiler.process(whereclause)
text += " WHERE " + where_compiled
return text
class PGTypeCompiler(compiler.GenericTypeCompiler):
def visit_INET(self, type_):
return "INET"
def visit_CIDR(self, type_):
return "CIDR"
def visit_MACADDR(self, type_):
return "MACADDR"
def visit_FLOAT(self, type_):
if not type_.precision:
return "FLOAT"
else:
return "FLOAT(%(precision)s)" % {'precision': type_.precision}
def visit_DOUBLE_PRECISION(self, type_):
return "DOUBLE PRECISION"
def visit_BIGINT(self, type_):
return "BIGINT"
def visit_datetime(self, type_):
return self.visit_TIMESTAMP(type_)
def visit_enum(self, type_):
if not type_.native_enum or not self.dialect.supports_native_enum:
return super(PGTypeCompiler, self).visit_enum(type_)
else:
return self.visit_ENUM(type_)
def visit_ENUM(self, type_):
return self.dialect.identifier_preparer.format_type(type_)
def visit_TIMESTAMP(self, type_):
return "TIMESTAMP%s %s" % (
getattr(type_, 'precision', None) and "(%d)" %
type_.precision or "",
(type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
)
def visit_TIME(self, type_):
return "TIME%s %s" % (
getattr(type_, 'precision', None) and "(%d)" %
type_.precision or "",
(type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
)
def visit_INTERVAL(self, type_):
if type_.precision is not None:
return "INTERVAL(%d)" % type_.precision
else:
return "INTERVAL"
def visit_BIT(self, type_):
if type_.varying:
compiled = "BIT VARYING"
if type_.length is not None:
compiled += "(%d)" % type_.length
else:
compiled = "BIT(%d)" % type_.length
return compiled
def visit_UUID(self, type_):
return "UUID"
def visit_large_binary(self, type_):
return self.visit_BYTEA(type_)
def visit_BYTEA(self, type_):
return "BYTEA"
def visit_ARRAY(self, type_):
return self.process(type_.item_type) + '[]'
class PGIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def _unquote_identifier(self, value):
if value[0] == self.initial_quote:
value = value[1:-1].\
replace(self.escape_to_quote, self.escape_quote)
return value
def format_type(self, type_, use_schema=True):
if not type_.name:
raise exc.ArgumentError("Postgresql ENUM type requires a name.")
name = self.quote(type_.name, type_.quote)
if not self.omit_schema and use_schema and type_.schema is not None:
name = self.quote_schema(type_.schema, type_.quote) + "." + name
return name
class PGInspector(reflection.Inspector):
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_oid(self, table_name, schema=None):
"""Return the oid from `table_name` and `schema`."""
return self.dialect.get_table_oid(self.bind, table_name, schema,
info_cache=self.info_cache)
class CreateEnumType(schema._CreateDropBase):
__visit_name__ = "create_enum_type"
class DropEnumType(schema._CreateDropBase):
__visit_name__ = "drop_enum_type"
class PGExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
return self._execute_scalar(("select nextval('%s')" % \
self.dialect.identifier_preparer.format_sequence(seq)), type_)
def get_insert_default(self, column):
if column.primary_key and column is column.table._autoincrement_column:
if column.server_default and column.server_default.has_argument:
# pre-execute passive defaults on primary key columns
return self._execute_scalar("select %s" %
column.server_default.arg, column.type)
elif (column.default is None or
(column.default.is_sequence and
column.default.optional)):
# execute the sequence associated with a SERIAL primary
# key column. for non-primary-key SERIAL, the ID just
# generates server side.
try:
seq_name = column._postgresql_seq_name
except AttributeError:
tab = column.table.name
col = column.name
tab = tab[0:29 + max(0, (29 - len(col)))]
col = col[0:29 + max(0, (29 - len(tab)))]
column._postgresql_seq_name = seq_name = "%s_%s_seq" % (tab, col)
sch = column.table.schema
if sch is not None:
exc = "select nextval('\"%s\".\"%s\"')" % \
(sch, seq_name)
else:
exc = "select nextval('\"%s\"')" % \
(seq_name, )
return self._execute_scalar(exc, column.type)
return super(PGExecutionContext, self).get_insert_default(column)
class PGDialect(default.DefaultDialect):
name = 'postgresql'
supports_alter = True
max_identifier_length = 63
supports_sane_rowcount = True
supports_native_enum = True
supports_native_boolean = True
supports_sequences = True
sequences_optional = True
preexecute_autoincrement_sequences = True
postfetch_lastrowid = False
supports_default_values = True
supports_empty_insert = False
default_paramstyle = 'pyformat'
ischema_names = ischema_names
colspecs = colspecs
statement_compiler = PGCompiler
ddl_compiler = PGDDLCompiler
type_compiler = PGTypeCompiler
preparer = PGIdentifierPreparer
execution_ctx_cls = PGExecutionContext
inspector = PGInspector
isolation_level = None
# TODO: need to inspect "standard_conforming_strings"
_backslash_escapes = True
def __init__(self, isolation_level=None, **kwargs):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
def initialize(self, connection):
super(PGDialect, self).initialize(connection)
self.implicit_returning = self.server_version_info > (8, 2) and \
self.__dict__.get('implicit_returning', True)
self.supports_native_enum = self.server_version_info >= (8, 3)
if not self.supports_native_enum:
self.colspecs = self.colspecs.copy()
# pop base Enum type
self.colspecs.pop(sqltypes.Enum, None)
# psycopg2, others may have placed ENUM here as well
self.colspecs.pop(ENUM, None)
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
_isolation_lookup = set(['SERIALIZABLE',
'READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ'])
def set_isolation_level(self, connection, level):
level = level.replace('_', ' ')
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute(
"SET SESSION CHARACTERISTICS AS TRANSACTION "
"ISOLATION LEVEL %s" % level)
cursor.execute("COMMIT")
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
cursor.execute('show transaction isolation level')
val = cursor.fetchone()[0]
cursor.close()
return val.upper()
def do_begin_twophase(self, connection, xid):
self.do_begin(connection.connection)
def do_prepare_twophase(self, connection, xid):
connection.execute("PREPARE TRANSACTION '%s'" % xid)
def do_rollback_twophase(self, connection, xid,
is_prepared=True, recover=False):
if is_prepared:
if recover:
#FIXME: ugly hack to get out of transaction
# context when commiting recoverable transactions
# Must find out a way how to make the dbapi not
# open a transaction.
connection.execute("ROLLBACK")
connection.execute("ROLLBACK PREPARED '%s'" % xid)
connection.execute("BEGIN")
self.do_rollback(connection.connection)
else:
self.do_rollback(connection.connection)
def do_commit_twophase(self, connection, xid,
is_prepared=True, recover=False):
if is_prepared:
if recover:
connection.execute("ROLLBACK")
connection.execute("COMMIT PREPARED '%s'" % xid)
connection.execute("BEGIN")
self.do_rollback(connection.connection)
else:
self.do_commit(connection.connection)
def do_recover_twophase(self, connection):
resultset = connection.execute(
sql.text("SELECT gid FROM pg_prepared_xacts"))
return [row[0] for row in resultset]
def _get_default_schema_name(self, connection):
return connection.scalar("select current_schema()")
def has_table(self, connection, table_name, schema=None):
# seems like case gets folded in pg_class...
if schema is None:
cursor = connection.execute(
sql.text(
"select relname from pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where n.nspname=current_schema() and "
"lower(relname)=:name",
bindparams=[
sql.bindparam('name', unicode(table_name.lower()),
type_=sqltypes.Unicode)]
)
)
else:
cursor = connection.execute(
sql.text(
"select relname from pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where n.nspname=:schema and "
"lower(relname)=:name",
bindparams=[
sql.bindparam('name',
unicode(table_name.lower()), type_=sqltypes.Unicode),
sql.bindparam('schema',
unicode(schema), type_=sqltypes.Unicode)]
)
)
return bool(cursor.first())
def has_sequence(self, connection, sequence_name, schema=None):
if schema is None:
cursor = connection.execute(
sql.text(
"SELECT relname FROM pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where relkind='S' and "
"n.nspname=current_schema() "
"and lower(relname)=:name",
bindparams=[
sql.bindparam('name', unicode(sequence_name.lower()),
type_=sqltypes.Unicode)
]
)
)
else:
cursor = connection.execute(
sql.text(
"SELECT relname FROM pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where relkind='S' and "
"n.nspname=:schema and lower(relname)=:name",
bindparams=[
sql.bindparam('name', unicode(sequence_name.lower()),
type_=sqltypes.Unicode),
sql.bindparam('schema',
unicode(schema), type_=sqltypes.Unicode)
]
)
)
return bool(cursor.first())
def has_type(self, connection, type_name, schema=None):
bindparams = [
sql.bindparam('typname',
unicode(type_name), type_=sqltypes.Unicode),
sql.bindparam('nspname',
unicode(schema), type_=sqltypes.Unicode),
]
if schema is not None:
query = """
SELECT EXISTS (
SELECT * FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n
WHERE t.typnamespace = n.oid
AND t.typname = :typname
AND n.nspname = :nspname
)
"""
else:
query = """
SELECT EXISTS (
SELECT * FROM pg_catalog.pg_type t
WHERE t.typname = :typname
AND pg_type_is_visible(t.oid)
)
"""
cursor = connection.execute(sql.text(query, bindparams=bindparams))
return bool(cursor.scalar())
def _get_server_version_info(self, connection):
v = connection.execute("select version()").scalar()
m = re.match('PostgreSQL (\d+)\.(\d+)(?:\.(\d+))?(?:devel)?', v)
if not m:
raise AssertionError(
"Could not determine version from string '%s'" % v)
return tuple([int(x) for x in m.group(1, 2, 3) if x is not None])
@reflection.cache
def get_table_oid(self, connection, table_name, schema=None, **kw):
"""Fetch the oid for schema.table_name.
Several reflection methods require the table oid. The idea for using
this method is that it can be fetched one time and cached for
subsequent calls.
"""
table_oid = None
if schema is not None:
schema_where_clause = "n.nspname = :schema"
else:
schema_where_clause = "pg_catalog.pg_table_is_visible(c.oid)"
query = """
SELECT c.oid
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE (%s)
AND c.relname = :table_name AND c.relkind in ('r','v')
""" % schema_where_clause
# Since we're binding to unicode, table_name and schema_name must be
# unicode.
table_name = unicode(table_name)
if schema is not None:
schema = unicode(schema)
s = sql.text(query, bindparams=[
sql.bindparam('table_name', type_=sqltypes.Unicode),
sql.bindparam('schema', type_=sqltypes.Unicode)
],
typemap={'oid':sqltypes.Integer}
)
c = connection.execute(s, table_name=table_name, schema=schema)
table_oid = c.scalar()
if table_oid is None:
raise exc.NoSuchTableError(table_name)
return table_oid
@reflection.cache
def get_schema_names(self, connection, **kw):
s = """
SELECT nspname
FROM pg_namespace
ORDER BY nspname
"""
rp = connection.execute(s)
# what about system tables?
# Py3K
#schema_names = [row[0] for row in rp \
# if not row[0].startswith('pg_')]
# Py2K
schema_names = [row[0].decode(self.encoding) for row in rp \
if not row[0].startswith('pg_')]
# end Py2K
return schema_names
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
result = connection.execute(
sql.text(u"SELECT relname FROM pg_class c "
"WHERE relkind = 'r' "
"AND '%s' = (select nspname from pg_namespace n "
"where n.oid = c.relnamespace) " %
current_schema,
typemap = {'relname':sqltypes.Unicode}
)
)
return [row[0] for row in result]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
s = """
SELECT relname
FROM pg_class c
WHERE relkind = 'v'
AND '%(schema)s' = (select nspname from pg_namespace n
where n.oid = c.relnamespace)
""" % dict(schema=current_schema)
# Py3K
#view_names = [row[0] for row in connection.execute(s)]
# Py2K
view_names = [row[0].decode(self.encoding)
for row in connection.execute(s)]
# end Py2K
return view_names
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
s = """
SELECT definition FROM pg_views
WHERE schemaname = :schema
AND viewname = :view_name
"""
rp = connection.execute(sql.text(s),
view_name=view_name, schema=current_schema)
if rp:
# Py3K
#view_def = rp.scalar()
# Py2K
view_def = rp.scalar().decode(self.encoding)
# end Py2K
return view_def
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
SQL_COLS = """
SELECT a.attname,
pg_catalog.format_type(a.atttypid, a.atttypmod),
(SELECT substring(pg_catalog.pg_get_expr(d.adbin, d.adrelid)
for 128)
FROM pg_catalog.pg_attrdef d
WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum
AND a.atthasdef)
AS DEFAULT,
a.attnotnull, a.attnum, a.attrelid as table_oid
FROM pg_catalog.pg_attribute a
WHERE a.attrelid = :table_oid
AND a.attnum > 0 AND NOT a.attisdropped
ORDER BY a.attnum
"""
s = sql.text(SQL_COLS,
bindparams=[sql.bindparam('table_oid', type_=sqltypes.Integer)],
typemap={'attname':sqltypes.Unicode, 'default':sqltypes.Unicode}
)
c = connection.execute(s, table_oid=table_oid)
rows = c.fetchall()
domains = self._load_domains(connection)
enums = self._load_enums(connection)
# format columns
columns = []
for name, format_type, default, notnull, attnum, table_oid in rows:
## strip (5) from character varying(5), timestamp(5)
# with time zone, etc
attype = re.sub(r'\([\d,]+\)', '', format_type)
# strip '[]' from integer[], etc.
attype = re.sub(r'\[\]', '', attype)
nullable = not notnull
is_array = format_type.endswith('[]')
charlen = re.search('\(([\d,]+)\)', format_type)
if charlen:
charlen = charlen.group(1)
kwargs = {}
args = None
if attype == 'numeric':
if charlen:
prec, scale = charlen.split(',')
args = (int(prec), int(scale))
else:
args = ()
elif attype == 'double precision':
args = (53, )
elif attype == 'integer':
args = ()
elif attype in ('timestamp with time zone',
'time with time zone'):
kwargs['timezone'] = True
if charlen:
kwargs['precision'] = int(charlen)
args = ()
elif attype in ('timestamp without time zone',
'time without time zone', 'time'):
kwargs['timezone'] = False
if charlen:
kwargs['precision'] = int(charlen)
args = ()
elif attype == 'bit varying':
kwargs['varying'] = True
if charlen:
args = (int(charlen),)
else:
args = ()
elif attype in ('interval','interval year to month',
'interval day to second'):
if charlen:
kwargs['precision'] = int(charlen)
args = ()
elif charlen:
args = (int(charlen),)
else:
args = ()
while True:
if attype in self.ischema_names:
coltype = self.ischema_names[attype]
break
elif attype in enums:
enum = enums[attype]
coltype = ENUM
if "." in attype:
kwargs['schema'], kwargs['name'] = attype.split('.')
else:
kwargs['name'] = attype
args = tuple(enum['labels'])
break
elif attype in domains:
domain = domains[attype]
attype = domain['attype']
# A table can't override whether the domain is nullable.
nullable = domain['nullable']
if domain['default'] and not default:
# It can, however, override the default
# value, but can't set it to null.
default = domain['default']
continue
else:
coltype = None
break
if coltype:
coltype = coltype(*args, **kwargs)
if is_array:
coltype = ARRAY(coltype)
else:
util.warn("Did not recognize type '%s' of column '%s'" %
(attype, name))
coltype = sqltypes.NULLTYPE
# adjust the default value
autoincrement = False
if default is not None:
match = re.search(r"""(nextval\(')([^']+)('.*$)""", default)
if match is not None:
autoincrement = True
# the default is related to a Sequence
sch = schema
if '.' not in match.group(2) and sch is not None:
# unconditionally quote the schema name. this could
# later be enhanced to obey quoting rules /
# "quote schema"
default = match.group(1) + \
('"%s"' % sch) + '.' + \
match.group(2) + match.group(3)
column_info = dict(name=name, type=coltype, nullable=nullable,
default=default, autoincrement=autoincrement)
columns.append(column_info)
return columns
@reflection.cache
def get_primary_keys(self, connection, table_name, schema=None, **kw):
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
PK_SQL = """
SELECT attname FROM pg_attribute
WHERE attrelid = (
SELECT indexrelid FROM pg_index i
WHERE i.indrelid = :table_oid
AND i.indisprimary = 't')
ORDER BY attnum
"""
t = sql.text(PK_SQL, typemap={'attname':sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
primary_keys = [r[0] for r in c.fetchall()]
return primary_keys
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
cols = self.get_primary_keys(connection, table_name,
schema=schema, **kw)
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
PK_CONS_SQL = """
SELECT conname
FROM pg_catalog.pg_constraint r
WHERE r.conrelid = :table_oid AND r.contype = 'p'
ORDER BY 1
"""
t = sql.text(PK_CONS_SQL, typemap={'conname':sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
name = c.scalar()
return {
'constrained_columns':cols,
'name':name
}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
preparer = self.identifier_preparer
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
FK_SQL = """
SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef
FROM pg_catalog.pg_constraint r
WHERE r.conrelid = :table AND r.contype = 'f'
ORDER BY 1
"""
t = sql.text(FK_SQL, typemap={
'conname':sqltypes.Unicode,
'condef':sqltypes.Unicode})
c = connection.execute(t, table=table_oid)
fkeys = []
for conname, condef in c.fetchall():
m = re.search('FOREIGN KEY \((.*?)\) REFERENCES '
'(?:(.*?)\.)?(.*?)\((.*?)\)', condef).groups()
constrained_columns, referred_schema, \
referred_table, referred_columns = m
constrained_columns = [preparer._unquote_identifier(x)
for x in re.split(r'\s*,\s*', constrained_columns)]
if referred_schema:
referred_schema =\
preparer._unquote_identifier(referred_schema)
elif schema is not None and schema == self.default_schema_name:
# no schema (i.e. its the default schema), and the table we're
# reflecting has the default schema explicit, then use that.
# i.e. try to use the user's conventions
referred_schema = schema
referred_table = preparer._unquote_identifier(referred_table)
referred_columns = [preparer._unquote_identifier(x)
for x in re.split(r'\s*,\s', referred_columns)]
fkey_d = {
'name' : conname,
'constrained_columns' : constrained_columns,
'referred_schema' : referred_schema,
'referred_table' : referred_table,
'referred_columns' : referred_columns
}
fkeys.append(fkey_d)
return fkeys
@reflection.cache
def get_indexes(self, connection, table_name, schema, **kw):
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
IDX_SQL = """
SELECT
i.relname as relname,
ix.indisunique, ix.indexprs, ix.indpred,
a.attname
FROM
pg_class t
join pg_index ix on t.oid = ix.indrelid
join pg_class i on i.oid=ix.indexrelid
left outer join
pg_attribute a
on t.oid=a.attrelid and a.attnum=ANY(ix.indkey)
WHERE
t.relkind = 'r'
and t.oid = :table_oid
and ix.indisprimary = 'f'
ORDER BY
t.relname,
i.relname
"""
t = sql.text(IDX_SQL, typemap={'attname':sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
index_names = {}
indexes = []
sv_idx_name = None
for row in c.fetchall():
idx_name, unique, expr, prd, col = row
if expr:
if idx_name != sv_idx_name:
util.warn(
"Skipped unsupported reflection of "
"expression-based index %s"
% idx_name)
sv_idx_name = idx_name
continue
if prd and not idx_name == sv_idx_name:
util.warn(
"Predicate of partial index %s ignored during reflection"
% idx_name)
sv_idx_name = idx_name
if idx_name in index_names:
index_d = index_names[idx_name]
else:
index_d = {'column_names':[]}
indexes.append(index_d)
index_names[idx_name] = index_d
index_d['name'] = idx_name
if col is not None:
index_d['column_names'].append(col)
index_d['unique'] = unique
return indexes
def _load_enums(self, connection):
if not self.supports_native_enum:
return {}
## Load data types for enums:
SQL_ENUMS = """
SELECT t.typname as "name",
-- no enum defaults in 8.4 at least
-- t.typdefault as "default",
pg_catalog.pg_type_is_visible(t.oid) as "visible",
n.nspname as "schema",
e.enumlabel as "label"
FROM pg_catalog.pg_type t
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
LEFT JOIN pg_catalog.pg_constraint r ON t.oid = r.contypid
LEFT JOIN pg_catalog.pg_enum e ON t.oid = e.enumtypid
WHERE t.typtype = 'e'
ORDER BY "name", e.oid -- e.oid gives us label order
"""
s = sql.text(SQL_ENUMS, typemap={
'attname':sqltypes.Unicode,
'label':sqltypes.Unicode})
c = connection.execute(s)
enums = {}
for enum in c.fetchall():
if enum['visible']:
# 'visible' just means whether or not the enum is in a
# schema that's on the search path -- or not overriden by
# a schema with higher presedence. If it's not visible,
# it will be prefixed with the schema-name when it's used.
name = enum['name']
else:
name = "%s.%s" % (enum['schema'], enum['name'])
if name in enums:
enums[name]['labels'].append(enum['label'])
else:
enums[name] = {
'labels': [enum['label']],
}
return enums
def _load_domains(self, connection):
## Load data types for domains:
SQL_DOMAINS = """
SELECT t.typname as "name",
pg_catalog.format_type(t.typbasetype, t.typtypmod) as "attype",
not t.typnotnull as "nullable",
t.typdefault as "default",
pg_catalog.pg_type_is_visible(t.oid) as "visible",
n.nspname as "schema"
FROM pg_catalog.pg_type t
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
LEFT JOIN pg_catalog.pg_constraint r ON t.oid = r.contypid
WHERE t.typtype = 'd'
"""
s = sql.text(SQL_DOMAINS, typemap={'attname':sqltypes.Unicode})
c = connection.execute(s)
domains = {}
for domain in c.fetchall():
## strip (30) from character varying(30)
attype = re.search('([^\(]+)', domain['attype']).group(1)
if domain['visible']:
# 'visible' just means whether or not the domain is in a
# schema that's on the search path -- or not overriden by
# a schema with higher presedence. If it's not visible,
# it will be prefixed with the schema-name when it's used.
name = domain['name']
else:
name = "%s.%s" % (domain['schema'], domain['name'])
domains[name] = {
'attype':attype,
'nullable': domain['nullable'],
'default': domain['default']
}
return domains
| gpl-2.0 | 6,457,700,991,169,302,000 | 35.687371 | 91 | 0.543435 | false |
shssoichiro/servo | tests/wpt/css-tests/tools/pytest/_pytest/cacheprovider.py | 188 | 8939 | """
merged implementation of the cache provider
the name cache was not choosen to ensure pluggy automatically
ignores the external pytest-cache
"""
import py
import pytest
import json
from os.path import sep as _sep, altsep as _altsep
class Cache(object):
def __init__(self, config):
self.config = config
self._cachedir = config.rootdir.join(".cache")
self.trace = config.trace.root.get("cache")
if config.getvalue("cacheclear"):
self.trace("clearing cachedir")
if self._cachedir.check():
self._cachedir.remove()
self._cachedir.mkdir()
def makedir(self, name):
""" return a directory path object with the given name. If the
directory does not yet exist, it will be created. You can use it
to manage files likes e. g. store/retrieve database
dumps across test sessions.
:param name: must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users.
"""
if _sep in name or _altsep is not None and _altsep in name:
raise ValueError("name is not allowed to contain path separators")
return self._cachedir.ensure_dir("d", name)
def _getvaluepath(self, key):
return self._cachedir.join('v', *key.split('/'))
def get(self, key, default):
""" return cached value for the given key. If no value
was yet cached or the value cannot be read, the specified
default is returned.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param default: must be provided in case of a cache-miss or
invalid cache values.
"""
path = self._getvaluepath(key)
if path.check():
try:
with path.open("r") as f:
return json.load(f)
except ValueError:
self.trace("cache-invalid at %s" % (path,))
return default
def set(self, key, value):
""" save value for the given key.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value: must be of any combination of basic
python types, including nested types
like e. g. lists of dictionaries.
"""
path = self._getvaluepath(key)
try:
path.dirpath().ensure_dir()
except (py.error.EEXIST, py.error.EACCES):
self.config.warn(
code='I9', message='could not create cache path %s' % (path,)
)
return
try:
f = path.open('w')
except py.error.ENOTDIR:
self.config.warn(
code='I9', message='cache could not write path %s' % (path,))
else:
with f:
self.trace("cache-write %s: %r" % (key, value,))
json.dump(value, f, indent=2, sort_keys=True)
class LFPlugin:
""" Plugin which implements the --lf (run last-failing) option """
def __init__(self, config):
self.config = config
active_keys = 'lf', 'failedfirst'
self.active = any(config.getvalue(key) for key in active_keys)
if self.active:
self.lastfailed = config.cache.get("cache/lastfailed", {})
else:
self.lastfailed = {}
def pytest_report_header(self):
if self.active:
if not self.lastfailed:
mode = "run all (no recorded failures)"
else:
mode = "rerun last %d failures%s" % (
len(self.lastfailed),
" first" if self.config.getvalue("failedfirst") else "")
return "run-last-failure: %s" % mode
def pytest_runtest_logreport(self, report):
if report.failed and "xfail" not in report.keywords:
self.lastfailed[report.nodeid] = True
elif not report.failed:
if report.when == "call":
self.lastfailed.pop(report.nodeid, None)
def pytest_collectreport(self, report):
passed = report.outcome in ('passed', 'skipped')
if passed:
if report.nodeid in self.lastfailed:
self.lastfailed.pop(report.nodeid)
self.lastfailed.update(
(item.nodeid, True)
for item in report.result)
else:
self.lastfailed[report.nodeid] = True
def pytest_collection_modifyitems(self, session, config, items):
if self.active and self.lastfailed:
previously_failed = []
previously_passed = []
for item in items:
if item.nodeid in self.lastfailed:
previously_failed.append(item)
else:
previously_passed.append(item)
if not previously_failed and previously_passed:
# running a subset of all tests with recorded failures outside
# of the set of tests currently executing
pass
elif self.config.getvalue("failedfirst"):
items[:] = previously_failed + previously_passed
else:
items[:] = previously_failed
config.hook.pytest_deselected(items=previously_passed)
def pytest_sessionfinish(self, session):
config = self.config
if config.getvalue("cacheshow") or hasattr(config, "slaveinput"):
return
prev_failed = config.cache.get("cache/lastfailed", None) is not None
if (session.testscollected and prev_failed) or self.lastfailed:
config.cache.set("cache/lastfailed", self.lastfailed)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
'--lf', '--last-failed', action='store_true', dest="lf",
help="rerun only the tests that failed "
"at the last run (or all if none failed)")
group.addoption(
'--ff', '--failed-first', action='store_true', dest="failedfirst",
help="run all tests but run the last failures first. "
"This may re-order tests and thus lead to "
"repeated fixture setup/teardown")
group.addoption(
'--cache-show', action='store_true', dest="cacheshow",
help="show cache contents, don't perform collection or tests")
group.addoption(
'--cache-clear', action='store_true', dest="cacheclear",
help="remove all cache contents at start of test run.")
def pytest_cmdline_main(config):
if config.option.cacheshow:
from _pytest.main import wrap_session
return wrap_session(config, cacheshow)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
config.cache = Cache(config)
config.pluginmanager.register(LFPlugin(config), "lfplugin")
@pytest.fixture
def cache(request):
"""
Return a cache object that can persist state between testing sessions.
cache.get(key, default)
cache.set(key, value)
Keys must be a ``/`` separated value, where the first part is usually the
name of your plugin or application to avoid clashes with other cache users.
Values can be any object handled by the json stdlib module.
"""
return request.config.cache
def pytest_report_header(config):
if config.option.verbose:
relpath = py.path.local().bestrelpath(config.cache._cachedir)
return "cachedir: %s" % relpath
def cacheshow(config, session):
from pprint import pprint
tw = py.io.TerminalWriter()
tw.line("cachedir: " + str(config.cache._cachedir))
if not config.cache._cachedir.check():
tw.line("cache is empty")
return 0
dummy = object()
basedir = config.cache._cachedir
vdir = basedir.join("v")
tw.sep("-", "cache values")
for valpath in vdir.visit(lambda x: x.isfile()):
key = valpath.relto(vdir).replace(valpath.sep, "/")
val = config.cache.get(key, dummy)
if val is dummy:
tw.line("%s contains unreadable content, "
"will be ignored" % key)
else:
tw.line("%s contains:" % key)
stream = py.io.TextIO()
pprint(val, stream=stream)
for line in stream.getvalue().splitlines():
tw.line(" " + line)
ddir = basedir.join("d")
if ddir.isdir() and ddir.listdir():
tw.sep("-", "cache directories")
for p in basedir.join("d").visit():
#if p.check(dir=1):
# print("%s/" % p.relto(basedir))
if p.isfile():
key = p.relto(basedir)
tw.line("%s is a file of length %d" % (
key, p.size()))
return 0
| mpl-2.0 | -7,212,911,681,018,630,000 | 35.485714 | 79 | 0.583175 | false |
krafczyk/spack | var/spack/repos/builtin/packages/font-bh-type1/package.py | 5 | 2082 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class FontBhType1(Package):
"""X.org bh-type1 font."""
homepage = "http://cgit.freedesktop.org/xorg/font/bh-type1"
url = "https://www.x.org/archive/individual/font/font-bh-type1-1.0.3.tar.gz"
version('1.0.3', '62d4e8f782a6a0658784072a5df5ac98')
depends_on('font-util')
depends_on('fontconfig', type='build')
depends_on('mkfontdir', type='build')
depends_on('mkfontscale', type='build')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
def install(self, spec, prefix):
configure('--prefix={0}'.format(prefix))
make('install')
# `make install` copies the files to the font-util installation.
# Create a fake directory to convince Spack that we actually
# installed something.
mkdir(prefix.lib)
| lgpl-2.1 | 2,262,413,580,171,185,200 | 39.038462 | 85 | 0.660423 | false |
ddayguerrero/blogme | flask/lib/python3.4/site-packages/werkzeug/contrib/atom.py | 259 | 15588 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.atom
~~~~~~~~~~~~~~~~~~~~~
This module provides a class called :class:`AtomFeed` which can be
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
Example::
def atom_feed(request):
feed = AtomFeed("My Blog", feed_url=request.url,
url=request.host_url,
subtitle="My example blog for a feed test.")
for post in Post.query.limit(10).all():
feed.add(post.title, post.body, content_type='html',
author=post.author, url=post.url, id=post.uid,
updated=post.last_update, published=post.pub_date)
return feed.get_response()
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from werkzeug.utils import escape
from werkzeug.wrappers import BaseResponse
from werkzeug._compat import implements_to_string, string_types
XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == 'xhtml':
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
(name, XHTML_NAMESPACE, content, name)
if not content_type:
return u'<%s>%s</%s>\n' % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
escape(content), name)
def format_iso8601(obj):
"""Format a datetime object for iso8601"""
iso8601 = obj.isoformat()
if obj.tzinfo:
return iso8601
return iso8601 + 'Z'
@implements_to_string
class AtomFeed(object):
"""A helper class that creates Atom feeds.
:param title: the title of the feed. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the feed (not the url *of* the feed)
:param id: a globally unique id for the feed. Must be an URI. If
not present the `feed_url` is used, but one of both is
required.
:param updated: the time the feed was modified the last time. Must
be a :class:`datetime.datetime` object. If not
present the latest entry's `updated` is used.
Treated as UTC if naive datetime.
:param feed_url: the URL to the feed. Should be the URL that was
requested.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param icon: an icon for the feed.
:param logo: a logo for the feed.
:param rights: copyright information for the feed.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param subtitle: a short description of the feed.
:param subtitle_type: the type attribute for the subtitle element.
One of ``'text'``, ``'html'``, ``'text'``
or ``'xhtml'``. Default is ``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param generator: the software that generated this feed. This must be
a tuple in the form ``(name, url, version)``. If
you don't want to specify one of them, set the item
to `None`.
:param entries: a list with the entries for the feed. Entries can also
be added later with :meth:`add`.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
default_generator = ('Werkzeug', None, None)
def __init__(self, title=None, entries=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.url = kwargs.get('url')
self.feed_url = kwargs.get('feed_url', self.url)
self.id = kwargs.get('id', self.feed_url)
self.updated = kwargs.get('updated')
self.author = kwargs.get('author', ())
self.icon = kwargs.get('icon')
self.logo = kwargs.get('logo')
self.rights = kwargs.get('rights')
self.rights_type = kwargs.get('rights_type')
self.subtitle = kwargs.get('subtitle')
self.subtitle_type = kwargs.get('subtitle_type', 'text')
self.generator = kwargs.get('generator')
if self.generator is None:
self.generator = self.default_generator
self.links = kwargs.get('links', [])
self.entries = entries and list(entries) or []
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, string_types + (dict,)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
for author in self.author:
if 'name' not in author:
raise TypeError('author must contain at least a name')
def add(self, *args, **kwargs):
"""Add a new entry to the feed. This function can either be called
with a :class:`FeedEntry` or some keyword and positional arguments
that are forwarded to the :class:`FeedEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
self.entries.append(args[0])
else:
kwargs['feed_url'] = self.feed_url
self.entries.append(FeedEntry(*args, **kwargs))
def __repr__(self):
return '<%s %r (%d entries)>' % (
self.__class__.__name__,
self.title,
len(self.entries)
)
def generate(self):
"""Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one
if not self.author:
if False in map(lambda e: bool(e.author), self.entries):
self.author = ({'name': 'Unknown author'},)
if not self.updated:
dates = sorted([entry.updated for entry in self.entries])
self.updated = dates and dates[-1] or datetime.utcnow()
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
yield ' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
if self.feed_url:
yield u' <link href="%s" rel="self" />\n' % \
escape(self.feed_url)
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' %
(k, escape(link[k])) for k in link)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield ' <email>%s</email>\n' % escape(author['email'])
yield ' </author>\n'
if self.subtitle:
yield ' ' + _make_text_block('subtitle', self.subtitle,
self.subtitle_type)
if self.icon:
yield u' <icon>%s</icon>\n' % escape(self.icon)
if self.logo:
yield u' <logo>%s</logo>\n' % escape(self.logo)
if self.rights:
yield ' ' + _make_text_block('rights', self.rights,
self.rights_type)
generator_name, generator_url, generator_version = self.generator
if generator_name or generator_url or generator_version:
tmp = [u' <generator']
if generator_url:
tmp.append(u' uri="%s"' % escape(generator_url))
if generator_version:
tmp.append(u' version="%s"' % escape(generator_version))
tmp.append(u'>%s</generator>\n' % escape(generator_name))
yield u''.join(tmp)
for entry in self.entries:
for line in entry.generate():
yield u' ' + line
yield u'</feed>\n'
def to_string(self):
"""Convert the feed into a string."""
return u''.join(self.generate())
def get_response(self):
"""Return a response object for the feed."""
return BaseResponse(self.to_string(), mimetype='application/atom+xml')
def __call__(self, environ, start_response):
"""Use the class as WSGI response object."""
return self.get_response()(environ, start_response)
def __str__(self):
return self.to_string()
@implements_to_string
class FeedEntry(object):
"""Represents a single entry in a feed.
:param title: the title of the entry. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param content: the content of the entry.
:param content_type: the type attribute for the content element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param summary: a summary of the entry's content.
:param summary_type: the type attribute for the summary element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the entry.
:param id: a globally unique id for the entry. Must be an URI. If
not present the URL is used, but one of both is required.
:param updated: the time the entry was modified the last time. Must
be a :class:`datetime.datetime` object. Treated as
UTC if naive datetime. Required.
:param author: the author of the entry. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if the feed does not have an
author element.
:param published: the time the entry was initially published. Must
be a :class:`datetime.datetime` object. Treated as
UTC if naive datetime.
:param rights: copyright information for the entry.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param categories: categories for the entry. Must be a list of dictionaries
with term (required), scheme and label (all optional)
:param xml_base: The xml base (url) for this feed item. If not provided
it will default to the item url.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
def __init__(self, title=None, content=None, feed_url=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.content = content
self.content_type = kwargs.get('content_type', 'html')
self.url = kwargs.get('url')
self.id = kwargs.get('id', self.url)
self.updated = kwargs.get('updated')
self.summary = kwargs.get('summary')
self.summary_type = kwargs.get('summary_type', 'html')
self.author = kwargs.get('author', ())
self.published = kwargs.get('published')
self.rights = kwargs.get('rights')
self.links = kwargs.get('links', [])
self.categories = kwargs.get('categories', [])
self.xml_base = kwargs.get('xml_base', feed_url)
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, string_types + (dict,)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
if not self.updated:
raise ValueError('updated is required')
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.title
)
def generate(self):
"""Yields pieces of ATOM XML."""
base = ''
if self.xml_base:
base = ' xml:base="%s"' % escape(self.xml_base)
yield u'<entry%s>\n' % base
yield u' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.published:
yield u' <published>%s</published>\n' % \
format_iso8601(self.published)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield u' <email>%s</email>\n' % escape(author['email'])
yield u' </author>\n'
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' %
(k, escape(link[k])) for k in link)
for category in self.categories:
yield u' <category %s/>\n' % ''.join('%s="%s" ' %
(k, escape(category[k])) for k in category)
if self.summary:
yield u' ' + _make_text_block('summary', self.summary,
self.summary_type)
if self.content:
yield u' ' + _make_text_block('content', self.content,
self.content_type)
yield u'</entry>\n'
def to_string(self):
"""Convert the feed item into a unicode object."""
return u''.join(self.generate())
def __str__(self):
return self.to_string()
| mit | 4,130,128,834,609,485,000 | 42.909859 | 93 | 0.549397 | false |
alexbredo/site-packages | mail.py | 1 | 2015 | # Copyright (c) 2014 Alexander Bredo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import smtplib
from email.mime.text import MIMEText
from bredo.logger import *
class Mail(object):
def __init__(self, smtpserver, sender):
self.log = Logger('kippo-alert.log')
self.smtpserver = smtpserver
self.sender = sender
def send(self, recipient, subject, message):
try:
s = smtplib.SMTP(self.smtpserver)
msg = MIMEText(message)
msg['From'] = self.sender
msg['To'] = recipient
msg['Subject'] = subject
s.sendmail(self.sender, recipient, msg.as_string())
s.quit()
self.log.info('Mail has been successfully send.')
except Exception, e:
self.log.error('Error occured while sending Mail.' + str(e))
| bsd-2-clause | 7,450,823,667,318,306,000 | 37.018868 | 63 | 0.744913 | false |
matmutant/sl4a | python/src/Lib/lib-tk/FixTk.py | 49 | 2844 | import sys, os
# Delay import _tkinter until we have set TCL_LIBRARY,
# so that Tcl_FindExecutable has a chance to locate its
# encoding directory.
# Unfortunately, we cannot know the TCL_LIBRARY directory
# if we don't know the tcl version, which we cannot find out
# without import Tcl. Fortunately, Tcl will itself look in
# <TCL_LIBRARY>\..\tcl<TCL_VERSION>, so anything close to
# the real Tcl library will do.
# Expand symbolic links on Vista
try:
import ctypes
ctypes.windll.kernel32.GetFinalPathNameByHandleW
except (ImportError, AttributeError):
def convert_path(s):
return s
else:
def convert_path(s):
if isinstance(s, str):
s = s.decode("mbcs")
hdir = ctypes.windll.kernel32.\
CreateFileW(s, 0x80, # FILE_READ_ATTRIBUTES
1, # FILE_SHARE_READ
None, 3, # OPEN_EXISTING
0x02000000, # FILE_FLAG_BACKUP_SEMANTICS
None)
if hdir == -1:
# Cannot open directory, give up
return s
buf = ctypes.create_unicode_buffer(u"", 32768)
res = ctypes.windll.kernel32.\
GetFinalPathNameByHandleW(hdir, buf, len(buf),
0) # VOLUME_NAME_DOS
ctypes.windll.kernel32.CloseHandle(hdir)
if res == 0:
# Conversion failed (e.g. network location)
return s
s = buf[:res]
# Ignore leading \\?\
if s.startswith(u"\\\\?\\"):
s = s[4:]
return s
prefix = os.path.join(sys.prefix,"tcl")
if not os.path.exists(prefix):
# devdir/../tcltk/lib
prefix = os.path.join(sys.prefix, os.path.pardir, "tcltk", "lib")
prefix = os.path.abspath(prefix)
# if this does not exist, no further search is needed
if os.path.exists(prefix):
prefix = convert_path(prefix)
if not os.environ.has_key("TCL_LIBRARY"):
for name in os.listdir(prefix):
if name.startswith("tcl"):
tcldir = os.path.join(prefix,name)
if os.path.isdir(tcldir):
os.environ["TCL_LIBRARY"] = tcldir
# Compute TK_LIBRARY, knowing that it has the same version
# as Tcl
import _tkinter
ver = str(_tkinter.TCL_VERSION)
if not os.environ.has_key("TK_LIBRARY"):
v = os.path.join(prefix, 'tk'+ver)
if os.path.exists(os.path.join(v, "tclIndex")):
os.environ['TK_LIBRARY'] = v
# We don't know the Tix version, so we must search the entire
# directory
if not os.environ.has_key("TIX_LIBRARY"):
for name in os.listdir(prefix):
if name.startswith("tix"):
tixdir = os.path.join(prefix,name)
if os.path.isdir(tixdir):
os.environ["TIX_LIBRARY"] = tixdir
| apache-2.0 | -2,717,771,428,278,003,700 | 36.421053 | 69 | 0.580169 | false |
txm/potato | django/conf/locale/en_GB/formats.py | 234 | 2048 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y' # 'Oct. 25, 2006'
TIME_FORMAT = 'P' # '2:30 pm'
DATETIME_FORMAT = 'N j, Y, P' # 'Oct. 25, 2006, 2:30 pm'
YEAR_MONTH_FORMAT = 'F Y' # 'October 2006'
MONTH_DAY_FORMAT = 'F j' # 'October 25'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 pm'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%Y-%m-%d', # '2006-10-25'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| bsd-3-clause | -446,027,609,695,267,260 | 45.545455 | 79 | 0.434082 | false |
prarthitm/edxplatform | pavelib/servers.py | 15 | 10450 | """
Run and manage servers for local development.
"""
from __future__ import print_function
import argparse
import sys
from paver.easy import call_task, cmdopts, consume_args, needs, sh, task
from .assets import collect_assets
from .utils.cmd import django_cmd
from .utils.process import run_process, run_multi_processes
from .utils.timer import timed
DEFAULT_PORT = {"lms": 8000, "studio": 8001}
DEFAULT_SETTINGS = 'devstack'
OPTIMIZED_SETTINGS = "devstack_optimized"
OPTIMIZED_ASSETS_SETTINGS = "test_static_optimized"
ASSET_SETTINGS_HELP = (
"Settings file used for updating assets. Defaults to the value of the settings variable if not provided."
)
def run_server(
system, fast=False, settings=None, asset_settings=None, port=None, contracts=False
):
"""Start the server for LMS or Studio.
Args:
system (str): The system to be run (lms or studio).
fast (bool): If true, then start the server immediately without updating assets (defaults to False).
settings (str): The Django settings module to use; if not provided, use the default.
asset_settings (str) The settings to use when generating assets. If not provided, assets are not generated.
port (str): The port number to run the server on. If not provided, uses the default port for the system.
contracts (bool) If true then PyContracts is enabled (defaults to False).
"""
if system not in ['lms', 'studio']:
print("System must be either lms or studio", file=sys.stderr)
exit(1)
if not settings:
settings = DEFAULT_SETTINGS
if not fast and asset_settings:
args = [system, '--settings={}'.format(asset_settings), '--watch']
# The default settings use DEBUG mode for running the server which means that
# the optimized assets are ignored, so we skip collectstatic in that case
# to save time.
if settings == DEFAULT_SETTINGS:
args.append('--skip-collect')
call_task('pavelib.assets.update_assets', args=args)
if port is None:
port = DEFAULT_PORT[system]
args = [settings, 'runserver', '--traceback', '--pythonpath=.', '0.0.0.0:{}'.format(port)]
if contracts:
args.append("--contracts")
run_process(django_cmd(system, *args))
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
("asset-settings=", "a", ASSET_SETTINGS_HELP),
("port=", "p", "Port"),
("fast", "f", "Skip updating assets"),
])
def lms(options):
"""
Run the LMS server.
"""
settings = getattr(options, 'settings', DEFAULT_SETTINGS)
asset_settings = getattr(options, 'asset-settings', settings)
port = getattr(options, 'port', None)
fast = getattr(options, 'fast', False)
run_server(
'lms',
fast=fast,
settings=settings,
asset_settings=asset_settings,
port=port,
)
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
("asset-settings=", "a", ASSET_SETTINGS_HELP),
("port=", "p", "Port"),
("fast", "f", "Skip updating assets"),
])
def studio(options):
"""
Run the Studio server.
"""
settings = getattr(options, 'settings', DEFAULT_SETTINGS)
asset_settings = getattr(options, 'asset-settings', settings)
port = getattr(options, 'port', None)
fast = getattr(options, 'fast', False)
run_server(
'studio',
fast=fast,
settings=settings,
asset_settings=asset_settings,
port=port,
)
@task
@needs('pavelib.prereqs.install_prereqs')
@consume_args
def devstack(args):
"""
Start the devstack lms or studio server
"""
parser = argparse.ArgumentParser(prog='paver devstack')
parser.add_argument('system', type=str, nargs=1, help="lms or studio")
parser.add_argument('--fast', action='store_true', default=False, help="Skip updating assets")
parser.add_argument('--optimized', action='store_true', default=False, help="Run with optimized assets")
parser.add_argument('--settings', type=str, default=DEFAULT_SETTINGS, help="Settings file")
parser.add_argument('--asset-settings', type=str, default=None, help=ASSET_SETTINGS_HELP)
parser.add_argument(
'--no-contracts',
action='store_true',
default=False,
help="Disable contracts. By default, they're enabled in devstack."
)
args = parser.parse_args(args)
settings = args.settings
asset_settings = args.asset_settings if args.asset_settings else settings
if args.optimized:
settings = OPTIMIZED_SETTINGS
asset_settings = OPTIMIZED_ASSETS_SETTINGS
sh(django_cmd('cms', settings, 'reindex_course', '--setup'))
run_server(
args.system[0],
fast=args.fast,
settings=settings,
asset_settings=asset_settings,
contracts=not args.no_contracts,
)
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
])
def celery(options):
"""
Runs Celery workers.
"""
settings = getattr(options, 'settings', 'dev_with_worker')
run_process(django_cmd('lms', settings, 'celery', 'worker', '--beat', '--loglevel=INFO', '--pythonpath=.'))
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings for both LMS and Studio"),
("asset-settings=", "a", "Django settings for updating assets for both LMS and Studio (defaults to settings)"),
("worker-settings=", "w", "Celery worker Django settings"),
("fast", "f", "Skip updating assets"),
("optimized", "o", "Run with optimized assets"),
("settings-lms=", "l", "Set LMS only, overriding the value from --settings (if provided)"),
("asset-settings-lms=", None, "Set LMS only, overriding the value from --asset-settings (if provided)"),
("settings-cms=", "c", "Set Studio only, overriding the value from --settings (if provided)"),
("asset-settings-cms=", None, "Set Studio only, overriding the value from --asset-settings (if provided)"),
("asset_settings=", None, "deprecated in favor of asset-settings"),
("asset_settings_cms=", None, "deprecated in favor of asset-settings-cms"),
("asset_settings_lms=", None, "deprecated in favor of asset-settings-lms"),
("settings_cms=", None, "deprecated in favor of settings-cms"),
("settings_lms=", None, "deprecated in favor of settings-lms"),
("worker_settings=", None, "deprecated in favor of worker-settings"),
])
def run_all_servers(options):
"""
Runs Celery workers, Studio, and LMS.
"""
settings = getattr(options, 'settings', DEFAULT_SETTINGS)
asset_settings = getattr(options, 'asset_settings', settings)
worker_settings = getattr(options, 'worker_settings', 'dev_with_worker')
fast = getattr(options, 'fast', False)
optimized = getattr(options, 'optimized', False)
if optimized:
settings = OPTIMIZED_SETTINGS
asset_settings = OPTIMIZED_ASSETS_SETTINGS
settings_lms = getattr(options, 'settings_lms', settings)
settings_cms = getattr(options, 'settings_cms', settings)
asset_settings_lms = getattr(options, 'asset_settings_lms', asset_settings)
asset_settings_cms = getattr(options, 'asset_settings_cms', asset_settings)
if not fast:
# First update assets for both LMS and Studio but don't collect static yet
args = [
'lms', 'studio',
'--settings={}'.format(asset_settings),
'--skip-collect'
]
call_task('pavelib.assets.update_assets', args=args)
# Now collect static for each system separately with the appropriate settings.
# Note that the default settings use DEBUG mode for running the server which
# means that the optimized assets are ignored, so we skip collectstatic in that
# case to save time.
if settings != DEFAULT_SETTINGS:
collect_assets(['lms'], asset_settings_lms)
collect_assets(['studio'], asset_settings_cms)
# Install an asset watcher to regenerate files that change
call_task('pavelib.assets.watch_assets', options={'background': True})
# Start up LMS, CMS and Celery
lms_port = DEFAULT_PORT['lms']
cms_port = DEFAULT_PORT['studio']
lms_runserver_args = ["0.0.0.0:{}".format(lms_port)]
cms_runserver_args = ["0.0.0.0:{}".format(cms_port)]
run_multi_processes([
django_cmd(
'lms', settings_lms, 'runserver', '--traceback', '--pythonpath=.', *lms_runserver_args
),
django_cmd(
'studio', settings_cms, 'runserver', '--traceback', '--pythonpath=.', *cms_runserver_args
),
django_cmd(
'lms', worker_settings, 'celery', 'worker', '--beat', '--loglevel=INFO', '--pythonpath=.'
)
])
@task
@needs('pavelib.prereqs.install_prereqs')
@cmdopts([
("settings=", "s", "Django settings"),
("fake-initial", None, "Fake the initial migrations"),
])
@timed
def update_db(options):
"""
Migrates the lms and cms across all databases
"""
settings = getattr(options, 'settings', DEFAULT_SETTINGS)
fake = "--fake-initial" if getattr(options, 'fake_initial', False) else ""
for system in ('lms', 'cms'):
# pylint: disable=line-too-long
sh("NO_EDXAPP_SUDO=1 EDX_PLATFORM_SETTINGS_OVERRIDE={settings} /edx/bin/edxapp-migrate-{system} --traceback --pythonpath=. {fake}".format(
settings=settings,
system=system,
fake=fake))
@task
@needs('pavelib.prereqs.install_prereqs')
@consume_args
@timed
def check_settings(args):
"""
Checks settings files.
"""
parser = argparse.ArgumentParser(prog='paver check_settings')
parser.add_argument('system', type=str, nargs=1, help="lms or studio")
parser.add_argument('settings', type=str, nargs=1, help='Django settings')
args = parser.parse_args(args)
system = args.system[0]
settings = args.settings[0]
try:
import_cmd = "echo 'import {system}.envs.{settings}'".format(system=system, settings=settings)
django_shell_cmd = django_cmd(system, settings, 'shell', '--plain', '--pythonpath=.')
sh("{import_cmd} | {shell_cmd}".format(import_cmd=import_cmd, shell_cmd=django_shell_cmd))
except: # pylint: disable=bare-except
print("Failed to import settings", file=sys.stderr)
| agpl-3.0 | -8,044,404,277,697,591,000 | 35.666667 | 146 | 0.645933 | false |
louyihua/edx-platform | common/djangoapps/contentserver/admin.py | 27 | 1455 | """
Django admin page for CourseAssetCacheTtlConfig, which allows you to configure the TTL
that gets used when sending cachability headers back with request course assets.
"""
from django.contrib import admin
from config_models.admin import ConfigurationModelAdmin
from .models import CourseAssetCacheTtlConfig, CdnUserAgentsConfig
class CourseAssetCacheTtlConfigAdmin(ConfigurationModelAdmin):
"""
Basic configuration for cache TTL.
"""
list_display = [
'cache_ttl'
]
def get_list_display(self, request):
"""
Restore default list_display behavior.
ConfigurationModelAdmin overrides this, but in a way that doesn't
respect the ordering. This lets us customize it the usual Django admin
way.
"""
return self.list_display
class CdnUserAgentsConfigAdmin(ConfigurationModelAdmin):
"""
Basic configuration for CDN user agent whitelist.
"""
list_display = [
'cdn_user_agents'
]
def get_list_display(self, request):
"""
Restore default list_display behavior.
ConfigurationModelAdmin overrides this, but in a way that doesn't
respect the ordering. This lets us customize it the usual Django admin
way.
"""
return self.list_display
admin.site.register(CourseAssetCacheTtlConfig, CourseAssetCacheTtlConfigAdmin)
admin.site.register(CdnUserAgentsConfig, CdnUserAgentsConfigAdmin)
| agpl-3.0 | -863,985,471,842,398,600 | 28.693878 | 86 | 0.709278 | false |
ioef/tlslite-ng | tlslite/utils/asn1parser.py | 206 | 1191 | # Author: Trevor Perrin
# Patch from Google adding getChildBytes()
#
# See the LICENSE file for legal information regarding use of this file.
"""Class for parsing ASN.1"""
from .compat import *
from .codec import *
#Takes a byte array which has a DER TLV field at its head
class ASN1Parser(object):
def __init__(self, bytes):
p = Parser(bytes)
p.get(1) #skip Type
#Get Length
self.length = self._getASN1Length(p)
#Get Value
self.value = p.getFixBytes(self.length)
#Assuming this is a sequence...
def getChild(self, which):
return ASN1Parser(self.getChildBytes(which))
def getChildBytes(self, which):
p = Parser(self.value)
for x in range(which+1):
markIndex = p.index
p.get(1) #skip Type
length = self._getASN1Length(p)
p.getFixBytes(length)
return p.bytes[markIndex : p.index]
#Decode the ASN.1 DER length field
def _getASN1Length(self, p):
firstLength = p.get(1)
if firstLength<=127:
return firstLength
else:
lengthLength = firstLength & 0x7F
return p.get(lengthLength)
| lgpl-2.1 | 7,525,213,748,959,431,000 | 27.357143 | 72 | 0.611251 | false |
gemmaan/moviesenal | Hasil/Lib/site-packages/pip/_vendor/ipaddress.py | 339 | 80176 | # Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
from __future__ import unicode_literals
import itertools
import struct
__version__ = '1.0.17'
# Compatibility functions
_compat_int_types = (int,)
try:
_compat_int_types = (int, long)
except NameError:
pass
try:
_compat_str = unicode
except NameError:
_compat_str = str
assert bytes != str
if b'\0'[0] == 0: # Python 3 semantics
def _compat_bytes_to_byte_vals(byt):
return byt
else:
def _compat_bytes_to_byte_vals(byt):
return [struct.unpack(b'!B', b)[0] for b in byt]
try:
_compat_int_from_byte_vals = int.from_bytes
except AttributeError:
def _compat_int_from_byte_vals(bytvals, endianess):
assert endianess == 'big'
res = 0
for bv in bytvals:
assert isinstance(bv, _compat_int_types)
res = (res << 8) + bv
return res
def _compat_to_bytes(intval, length, endianess):
assert isinstance(intval, _compat_int_types)
assert endianess == 'big'
if length == 4:
if intval < 0 or intval >= 2 ** 32:
raise struct.error("integer out of range for 'I' format code")
return struct.pack(b'!I', intval)
elif length == 16:
if intval < 0 or intval >= 2 ** 128:
raise struct.error("integer out of range for 'QQ' format code")
return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff)
else:
raise NotImplementedError()
if hasattr(int, 'bit_length'):
# Not int.bit_length , since that won't work in 2.7 where long exists
def _compat_bit_length(i):
return i.bit_length()
else:
def _compat_bit_length(i):
for res in itertools.count():
if i >> res == 0:
return res
def _compat_range(start, end, step=1):
assert step > 0
i = start
while i < end:
yield i
i += step
class _TotalOrderingMixin(object):
__slots__ = ()
# Helper that derives the other comparison operations from
# __lt__ and __eq__
# We avoid functools.total_ordering because it doesn't handle
# NotImplemented correctly yet (http://bugs.python.org/issue10042)
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not equal
def __lt__(self, other):
raise NotImplementedError
def __le__(self, other):
less = self.__lt__(other)
if less is NotImplemented or not less:
return self.__eq__(other)
return less
def __gt__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not (less or equal)
def __ge__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
return not less
IPV4LENGTH = 32
IPV6LENGTH = 128
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def ip_address(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the *address* passed isn't either a v4 or a v6
address
"""
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
'%r does not appear to be an IPv4 or IPv6 address. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?' % address)
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
address)
def ip_network(address, strict=True):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP network. Either IPv4 or
IPv6 networks may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if the network has host bits set.
"""
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
'%r does not appear to be an IPv4 or IPv6 network. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?' % address)
raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
address)
def ip_interface(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Interface or IPv6Interface object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
Notes:
The IPv?Interface classes describe an Address on a particular
Network, so they're basically a combination of both the Address
and Network classes.
"""
try:
return IPv4Interface(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Interface(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' %
address)
def v4_int_to_packed(address):
"""Represent an address as 4 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The integer address packed as 4 bytes in network (big-endian) order.
Raises:
ValueError: If the integer is negative or too large to be an
IPv4 IP address.
"""
try:
return _compat_to_bytes(address, 4, 'big')
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv4")
def v6_int_to_packed(address):
"""Represent an address as 16 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv6 IP address.
Returns:
The integer address packed as 16 bytes in network (big-endian) order.
"""
try:
return _compat_to_bytes(address, 16, 'big')
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv6")
def _split_optional_netmask(address):
"""Helper to split the netmask and raise AddressValueError if needed"""
addr = _compat_str(address).split('/')
if len(addr) > 2:
raise AddressValueError("Only one '/' permitted in %r" % address)
return addr
def _find_address_range(addresses):
"""Find a sequence of sorted deduplicated IPv#Address.
Args:
addresses: a list of IPv#Address objects.
Yields:
A tuple containing the first and last IP addresses in the sequence.
"""
it = iter(addresses)
first = last = next(it)
for ip in it:
if ip._ip != last._ip + 1:
yield first, last
first = ip
last = ip
yield first, last
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
return min(bits, _compat_bit_length(~number & (number - 1)))
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> list(summarize_address_range(IPv4Address('192.0.2.0'),
... IPv4Address('192.0.2.130')))
... #doctest: +NORMALIZE_WHITESPACE
[IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
IPv4Network('192.0.2.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
An iterator of the summarized IPv(4|6) network objects.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version of the first address is not 4 or 6.
"""
if (not (isinstance(first, _BaseAddress) and
isinstance(last, _BaseAddress))):
raise TypeError('first and last must be IP addresses, not networks')
if first.version != last.version:
raise TypeError("%s and %s are not of the same version" % (
first, last))
if first > last:
raise ValueError('last IP address must be greater than first')
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = min(_count_righthand_zero_bits(first_int, ip_bits),
_compat_bit_length(last_int - first_int + 1) - 1)
net = ip((first_int, ip_bits - nbits))
yield net
first_int += 1 << nbits
if first_int - 1 == ip._ALL_ONES:
break
def _collapse_addresses_internal(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('192.0.2.0/26')
ip2 = IPv4Network('192.0.2.64/26')
ip3 = IPv4Network('192.0.2.128/26')
ip4 = IPv4Network('192.0.2.192/26')
_collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
[IPv4Network('192.0.2.0/24')]
This shouldn't be called directly; it is called via
collapse_addresses([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
# First merge
to_merge = list(addresses)
subnets = {}
while to_merge:
net = to_merge.pop()
supernet = net.supernet()
existing = subnets.get(supernet)
if existing is None:
subnets[supernet] = net
elif existing != net:
# Merge consecutive subnets
del subnets[supernet]
to_merge.append(supernet)
# Then iterate over resulting networks, skipping subsumed subnets
last = None
for net in sorted(subnets.values()):
if last is not None:
# Since they are sorted,
# last.network_address <= net.network_address is a given.
if last.broadcast_address >= net.broadcast_address:
continue
yield net
last = net
def collapse_addresses(addresses):
"""Collapse a list of IP objects.
Example:
collapse_addresses([IPv4Network('192.0.2.0/25'),
IPv4Network('192.0.2.128/25')]) ->
[IPv4Network('192.0.2.0/24')]
Args:
addresses: An iterator of IPv4Network or IPv6Network objects.
Returns:
An iterator of the collapsed IPv(4|6)Network objects.
Raises:
TypeError: If passed a list of mixed version objects.
"""
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseAddress):
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
try:
ips.append(ip.ip)
except AttributeError:
ips.append(ip.network_address)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, nets[-1]))
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
# find consecutive address ranges in the sorted sequence and summarize them
if ips:
for first, last in _find_address_range(ips):
addrs.extend(summarize_address_range(first, last))
return _collapse_addresses_internal(addrs + nets)
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddress sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNetwork):
return obj._get_networks_key()
elif isinstance(obj, _BaseAddress):
return obj._get_address_key()
return NotImplemented
class _IPAddressBase(_TotalOrderingMixin):
"""The mother class."""
__slots__ = ()
@property
def exploded(self):
"""Return the longhand version of the IP address as a string."""
return self._explode_shorthand_ip_string()
@property
def compressed(self):
"""Return the shorthand version of the IP address as a string."""
return _compat_str(self)
@property
def reverse_pointer(self):
"""The name of the reverse DNS pointer for the IP address, e.g.:
>>> ipaddress.ip_address("127.0.0.1").reverse_pointer
'1.0.0.127.in-addr.arpa'
>>> ipaddress.ip_address("2001:db8::1").reverse_pointer
'1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
"""
return self._reverse_pointer()
@property
def version(self):
msg = '%200s has no version specified' % (type(self),)
raise NotImplementedError(msg)
def _check_int_address(self, address):
if address < 0:
msg = "%d (< 0) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._version))
if address > self._ALL_ONES:
msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._max_prefixlen,
self._version))
def _check_packed_address(self, address, expected_len):
address_len = len(address)
if address_len != expected_len:
msg = (
'%r (len %d != %d) is not permitted as an IPv%d address. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?'
)
raise AddressValueError(msg % (address, address_len,
expected_len, self._version))
@classmethod
def _ip_int_from_prefix(cls, prefixlen):
"""Turn the prefix length into a bitwise netmask
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
@classmethod
def _prefix_from_ip_int(cls, ip_int):
"""Return prefix length from the bitwise netmask.
Args:
ip_int: An integer, the netmask in expanded bitwise format
Returns:
An integer, the prefix length.
Raises:
ValueError: If the input intermingles zeroes & ones
"""
trailing_zeroes = _count_righthand_zero_bits(ip_int,
cls._max_prefixlen)
prefixlen = cls._max_prefixlen - trailing_zeroes
leading_ones = ip_int >> trailing_zeroes
all_ones = (1 << prefixlen) - 1
if leading_ones != all_ones:
byteslen = cls._max_prefixlen // 8
details = _compat_to_bytes(ip_int, byteslen, 'big')
msg = 'Netmask pattern %r mixes zeroes & ones'
raise ValueError(msg % details)
return prefixlen
@classmethod
def _report_invalid_netmask(cls, netmask_str):
msg = '%r is not a valid netmask' % netmask_str
raise NetmaskValueError(msg)
@classmethod
def _prefix_from_prefix_string(cls, prefixlen_str):
"""Return prefix length from a numeric string
Args:
prefixlen_str: The string to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask
"""
# int allows a leading +/- as well as surrounding whitespace,
# so we ensure that isn't the case
if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
cls._report_invalid_netmask(prefixlen_str)
try:
prefixlen = int(prefixlen_str)
except ValueError:
cls._report_invalid_netmask(prefixlen_str)
if not (0 <= prefixlen <= cls._max_prefixlen):
cls._report_invalid_netmask(prefixlen_str)
return prefixlen
@classmethod
def _prefix_from_ip_string(cls, ip_str):
"""Turn a netmask/hostmask string into a prefix length
Args:
ip_str: The netmask/hostmask to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask/hostmask
"""
# Parse the netmask/hostmask like an IP address.
try:
ip_int = cls._ip_int_from_string(ip_str)
except AddressValueError:
cls._report_invalid_netmask(ip_str)
# Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
# Note that the two ambiguous cases (all-ones and all-zeroes) are
# treated as netmasks.
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
pass
# Invert the bits, and try matching a /0+1+/ hostmask instead.
ip_int ^= cls._ALL_ONES
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
cls._report_invalid_netmask(ip_str)
def __reduce__(self):
return self.__class__, (_compat_str(self),)
class _BaseAddress(_IPAddressBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by single IP addresses.
"""
__slots__ = ()
def __int__(self):
return self._ip
def __eq__(self, other):
try:
return (self._ip == other._ip and
self._version == other._version)
except AttributeError:
return NotImplemented
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseAddress):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if self._ip != other._ip:
return self._ip < other._ip
return False
# Shorthand for Integer addition and subtraction. This is not
# meant to ever support addition/subtraction of addresses.
def __add__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) + other)
def __sub__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) - other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return _compat_str(self._string_from_ip_int(self._ip))
def __hash__(self):
return hash(hex(int(self._ip)))
def _get_address_key(self):
return (self._version, self)
def __reduce__(self):
return self.__class__, (self._ip,)
class _BaseNetwork(_IPAddressBase):
"""A generic IP network object.
This IP class contains the version independent methods which are
used by networks.
"""
def __init__(self, address):
self._cache = {}
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return '%s/%d' % (self.network_address, self.prefixlen)
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the network
or broadcast addresses.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast):
yield self._address_class(x)
def __iter__(self):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network, broadcast + 1):
yield self._address_class(x)
def __getitem__(self, n):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
if n >= 0:
if network + n > broadcast:
raise IndexError('address out of range')
return self._address_class(network + n)
else:
n += 1
if broadcast + n < network:
raise IndexError('address out of range')
return self._address_class(broadcast + n)
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseNetwork):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if self.network_address != other.network_address:
return self.network_address < other.network_address
if self.netmask != other.netmask:
return self.netmask < other.netmask
return False
def __eq__(self, other):
try:
return (self._version == other._version and
self.network_address == other.network_address and
int(self.netmask) == int(other.netmask))
except AttributeError:
return NotImplemented
def __hash__(self):
return hash(int(self.network_address) ^ int(self.netmask))
def __contains__(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if isinstance(other, _BaseNetwork):
return False
# dealing with another address
else:
# address
return (int(self.network_address) <= int(other._ip) <=
int(self.broadcast_address))
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network_address in other or (
self.broadcast_address in other or (
other.network_address in self or (
other.broadcast_address in self)))
@property
def broadcast_address(self):
x = self._cache.get('broadcast_address')
if x is None:
x = self._address_class(int(self.network_address) |
int(self.hostmask))
self._cache['broadcast_address'] = x
return x
@property
def hostmask(self):
x = self._cache.get('hostmask')
if x is None:
x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
self._cache['hostmask'] = x
return x
@property
def with_prefixlen(self):
return '%s/%d' % (self.network_address, self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self.network_address, self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self.network_address, self.hostmask)
@property
def num_addresses(self):
"""Number of hosts in the current subnet."""
return int(self.broadcast_address) - int(self.network_address) + 1
@property
def _address_class(self):
# Returning bare address objects (rather than interfaces) allows for
# more consistent behaviour across the network address, broadcast
# address and individual host addresses.
msg = '%200s has no associated address class' % (type(self),)
raise NotImplementedError(msg)
@property
def prefixlen(self):
return self._prefixlen
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = ip_network('192.0.2.0/28')
addr2 = ip_network('192.0.2.1/32')
list(addr1.address_exclude(addr2)) =
[IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
or IPv6:
addr1 = ip_network('2001:db8::1/32')
addr2 = ip_network('2001:db8::1/128')
list(addr1.address_exclude(addr2)) =
[ip_network('2001:db8::1/128'),
ip_network('2001:db8::2/127'),
ip_network('2001:db8::4/126'),
ip_network('2001:db8::8/125'),
...
ip_network('2001:db8:8000::/33')]
Args:
other: An IPv4Network or IPv6Network object of the same type.
Returns:
An iterator of the IPv(4|6)Network objects which is self
minus other.
Raises:
TypeError: If self and other are of differing address
versions, or if other is not a network object.
ValueError: If other is not completely contained by self.
"""
if not self._version == other._version:
raise TypeError("%s and %s are not of the same version" % (
self, other))
if not isinstance(other, _BaseNetwork):
raise TypeError("%s is not a network object" % other)
if not other.subnet_of(self):
raise ValueError('%s not contained in %s' % (other, self))
if other == self:
return
# Make sure we're comparing the network of other.
other = other.__class__('%s/%s' % (other.network_address,
other.prefixlen))
s1, s2 = self.subnets()
while s1 != other and s2 != other:
if other.subnet_of(s1):
yield s2
s1, s2 = s1.subnets()
elif other.subnet_of(s2):
yield s1
s1, s2 = s2.subnets()
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
if s1 == other:
yield s2
elif s2 == other:
yield s1
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
IPv6Network('2001:db8::1000/124') <
IPv6Network('2001:db8::2000/124')
0 if self == other
eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
IPv6Network('2001:db8::1000/124') ==
IPv6Network('2001:db8::1000/124')
1 if self > other
eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
IPv6Network('2001:db8::2000/124') >
IPv6Network('2001:db8::1000/124')
Raises:
TypeError if the IP versions are different.
"""
# does this need to raise a ValueError?
if self._version != other._version:
raise TypeError('%s and %s are not of the same type' % (
self, other))
# self._version == other._version below here:
if self.network_address < other.network_address:
return -1
if self.network_address > other.network_address:
return 1
# self.network_address == other.network_address below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network_address, self.netmask)
def subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), yield an iterator with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError('new prefix must be longer')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError('prefix length diff must be > 0')
new_prefixlen = self._prefixlen + prefixlen_diff
if new_prefixlen > self._max_prefixlen:
raise ValueError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, self))
start = int(self.network_address)
end = int(self.broadcast_address) + 1
step = (int(self.hostmask) + 1) >> prefixlen_diff
for new_addr in _compat_range(start, end, step):
current = self.__class__((new_addr, new_prefixlen))
yield current
def supernet(self, prefixlen_diff=1, new_prefix=None):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 network object.
Raises:
ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
a negative prefix length.
OR
If prefixlen_diff and new_prefix are both set or new_prefix is a
larger number than the current prefix (larger number means a
smaller network)
"""
if self._prefixlen == 0:
return self
if new_prefix is not None:
if new_prefix > self._prefixlen:
raise ValueError('new prefix must be shorter')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = self._prefixlen - new_prefix
new_prefixlen = self.prefixlen - prefixlen_diff
if new_prefixlen < 0:
raise ValueError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
(self.prefixlen, prefixlen_diff))
return self.__class__((
int(self.network_address) & (int(self.netmask) << prefixlen_diff),
new_prefixlen
))
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return (self.network_address.is_multicast and
self.broadcast_address.is_multicast)
def subnet_of(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if (hasattr(other, 'network_address') and
hasattr(other, 'broadcast_address')):
return (other.network_address <= self.network_address and
other.broadcast_address >= self.broadcast_address)
# dealing with another address
else:
raise TypeError('Unable to test subnet containment with element '
'of type %s' % type(other))
def supernet_of(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if (hasattr(other, 'network_address') and
hasattr(other, 'broadcast_address')):
return (other.network_address >= self.network_address and
other.broadcast_address <= self.broadcast_address)
# dealing with another address
else:
raise TypeError('Unable to test subnet containment with element '
'of type %s' % type(other))
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return (self.network_address.is_reserved and
self.broadcast_address.is_reserved)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return (self.network_address.is_link_local and
self.broadcast_address.is_link_local)
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return (self.network_address.is_private and
self.broadcast_address.is_private)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return (self.network_address.is_unspecified and
self.broadcast_address.is_unspecified)
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return (self.network_address.is_loopback and
self.broadcast_address.is_loopback)
class _BaseV4(object):
"""Base IPv4 object.
The following methods are used by IPv4 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 4
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = (2 ** IPV4LENGTH) - 1
_DECIMAL_DIGITS = frozenset('0123456789')
# the valid octets for host and netmasks. only useful for IPv4.
_valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0])
_max_prefixlen = IPV4LENGTH
# There are only a handful of valid v4 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
def _explode_shorthand_ip_string(self):
return _compat_str(self)
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
try:
# Check for a netmask in prefix length form
prefixlen = cls._prefix_from_prefix_string(arg)
except NetmaskValueError:
# Check for a netmask or hostmask in dotted-quad form.
# This may raise NetmaskValueError.
prefixlen = cls._prefix_from_ip_string(arg)
netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError("Expected 4 octets in %r" % ip_str)
try:
return _compat_int_from_byte_vals(
map(cls._parse_octet, octets), 'big')
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_octet(cls, octet_str):
"""Convert a decimal octet into an integer.
Args:
octet_str: A string, the number to parse.
Returns:
The octet as an integer.
Raises:
ValueError: if the octet isn't strictly a decimal from [0..255].
"""
if not octet_str:
raise ValueError("Empty octet not permitted")
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._DECIMAL_DIGITS.issuperset(octet_str):
msg = "Only decimal digits permitted in %r"
raise ValueError(msg % octet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(octet_str) > 3:
msg = "At most 3 characters permitted in %r"
raise ValueError(msg % octet_str)
# Convert to integer (we know digits are legal)
octet_int = int(octet_str, 10)
# Any octets that look like they *might* be written in octal,
# and which don't look exactly the same in both octal and
# decimal are rejected as ambiguous
if octet_int > 7 and octet_str[0] == '0':
msg = "Ambiguous (octal/decimal) value in %r not permitted"
raise ValueError(msg % octet_str)
if octet_int > 255:
raise ValueError("Octet %d (> 255) not permitted" % octet_int)
return octet_int
@classmethod
def _string_from_ip_int(cls, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
return '.'.join(_compat_str(struct.unpack(b'!B', b)[0]
if isinstance(b, bytes)
else b)
for b in _compat_to_bytes(ip_int, 4, 'big'))
def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
bits = ip_str.split('.')
try:
parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
except ValueError:
return False
if len(parts) != len(bits):
return False
if parts[0] < parts[-1]:
return True
return False
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv4 address.
This implements the method described in RFC1035 3.5.
"""
reverse_octets = _compat_str(self).split('.')[::-1]
return '.'.join(reverse_octets) + '.in-addr.arpa'
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv4Address(_BaseV4, _BaseAddress):
"""Represent and manipulate single IPv4 Addresses."""
__slots__ = ('_ip', '__weakref__')
def __init__(self, address):
"""
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv4Address('192.0.2.1') == IPv4Address(3221225985).
or, more generally
IPv4Address(int(IPv4Address('192.0.2.1'))) ==
IPv4Address('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 4)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if '/' in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v4_int_to_packed(self._ip)
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within the
reserved IPv4 Network range.
"""
return self in self._constants._reserved_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_global(self):
return (
self not in self._constants._public_network and
not self.is_private)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
return self in self._constants._multicast_network
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 5735 3.
"""
return self == self._constants._unspecified_address
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
return self in self._constants._loopback_network
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
return self in self._constants._linklocal_network
class IPv4Interface(IPv4Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv4Address.__init__(self, address)
self.network = IPv4Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv4Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv4Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv4Address.__init__(self, addr[0])
self.network = IPv4Network(address, strict=False)
self._prefixlen = self.network._prefixlen
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv4Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv4Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return self.network < other.network
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv4Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
class IPv4Network(_BaseV4, _BaseNetwork):
"""This class represents and manipulates 32-bit IPv4 network + addresses..
Attributes: [examples for IPv4Network('192.0.2.0/27')]
.network_address: IPv4Address('192.0.2.0')
.hostmask: IPv4Address('0.0.0.31')
.broadcast_address: IPv4Address('192.0.2.32')
.netmask: IPv4Address('255.255.255.224')
.prefixlen: 27
"""
# Class to use when creating address objects
_address_class = IPv4Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv4 network object.
Args:
address: A string or integer representing the IP [& network].
'192.0.2.0/24'
'192.0.2.0/255.255.255.0'
'192.0.0.2/0.0.0.255'
are all functionally the same in IPv4. Similarly,
'192.0.2.1'
'192.0.2.1/255.255.255.255'
'192.0.2.1/32'
are also functionally equivalent. That is to say, failing to
provide a subnetmask will create an object with a mask of /32.
If the mask (portion after the / in the argument) is given in
dotted quad form, it is treated as a netmask if it starts with a
non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
starts with a zero field (e.g. 0.255.255.255 == /8), with the
single exception of an all-zero mask which is treated as a
netmask == /0. If no mask is given, a default of /32 is used.
Additionally, an integer can be passed, so
IPv4Network('192.0.2.1') == IPv4Network(3221225985)
or, more generally
IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
IPv4Interface('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
NetmaskValueError: If the netmask isn't valid for
an IPv4 address.
ValueError: If strict is True and a network address is not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Constructing from a packed address or integer
if isinstance(address, (_compat_int_types, bytes)):
self.network_address = IPv4Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen)
# fixme: address/network test here.
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
# We weren't given an address[1]
arg = self._max_prefixlen
self.network_address = IPv4Address(address[0])
self.netmask, self._prefixlen = self._make_netmask(arg)
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError('%s has host bits set' % self)
else:
self.network_address = IPv4Address(packed &
int(self.netmask))
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (IPv4Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv4Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry.
"""
return (not (self.network_address in IPv4Network('100.64.0.0/10') and
self.broadcast_address in IPv4Network('100.64.0.0/10')) and
not self.is_private)
class _IPv4Constants(object):
_linklocal_network = IPv4Network('169.254.0.0/16')
_loopback_network = IPv4Network('127.0.0.0/8')
_multicast_network = IPv4Network('224.0.0.0/4')
_public_network = IPv4Network('100.64.0.0/10')
_private_networks = [
IPv4Network('0.0.0.0/8'),
IPv4Network('10.0.0.0/8'),
IPv4Network('127.0.0.0/8'),
IPv4Network('169.254.0.0/16'),
IPv4Network('172.16.0.0/12'),
IPv4Network('192.0.0.0/29'),
IPv4Network('192.0.0.170/31'),
IPv4Network('192.0.2.0/24'),
IPv4Network('192.168.0.0/16'),
IPv4Network('198.18.0.0/15'),
IPv4Network('198.51.100.0/24'),
IPv4Network('203.0.113.0/24'),
IPv4Network('240.0.0.0/4'),
IPv4Network('255.255.255.255/32'),
]
_reserved_network = IPv4Network('240.0.0.0/4')
_unspecified_address = IPv4Address('0.0.0.0')
IPv4Address._constants = _IPv4Constants
class _BaseV6(object):
"""Base IPv6 object.
The following methods are used by IPv6 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 6
_ALL_ONES = (2 ** IPV6LENGTH) - 1
_HEXTET_COUNT = 8
_HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
_max_prefixlen = IPV6LENGTH
# There are only a bunch of valid v6 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
prefixlen = cls._prefix_from_prefix_string(arg)
netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
An int, the IPv6 address
Raises:
AddressValueError: if ip_str isn't a valid IPv6 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
parts = ip_str.split(':')
# An IPv6 address needs at least 2 colons (3 parts).
_min_parts = 3
if len(parts) < _min_parts:
msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
raise AddressValueError(msg)
# If the address has an IPv4-style suffix, convert it to hexadecimal.
if '.' in parts[-1]:
try:
ipv4_int = IPv4Address(parts.pop())._ip
except AddressValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
parts.append('%x' % (ipv4_int & 0xFFFF))
# An IPv6 address can't have more than 8 colons (9 parts).
# The extra colon comes from using the "::" notation for a single
# leading or trailing zero part.
_max_parts = cls._HEXTET_COUNT + 1
if len(parts) > _max_parts:
msg = "At most %d colons permitted in %r" % (
_max_parts - 1, ip_str)
raise AddressValueError(msg)
# Disregarding the endpoints, find '::' with nothing in between.
# This indicates that a run of zeroes has been skipped.
skip_index = None
for i in _compat_range(1, len(parts) - 1):
if not parts[i]:
if skip_index is not None:
# Can't have more than one '::'
msg = "At most one '::' permitted in %r" % ip_str
raise AddressValueError(msg)
skip_index = i
# parts_hi is the number of parts to copy from above/before the '::'
# parts_lo is the number of parts to copy from below/after the '::'
if skip_index is not None:
# If we found a '::', then check if it also covers the endpoints.
parts_hi = skip_index
parts_lo = len(parts) - skip_index - 1
if not parts[0]:
parts_hi -= 1
if parts_hi:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
parts_lo -= 1
if parts_lo:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
if parts_skipped < 1:
msg = "Expected at most %d other parts with '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str))
else:
# Otherwise, allocate the entire address to parts_hi. The
# endpoints could still be empty, but _parse_hextet() will check
# for that.
if len(parts) != cls._HEXTET_COUNT:
msg = "Exactly %d parts expected without '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
if not parts[0]:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_hi = len(parts)
parts_lo = 0
parts_skipped = 0
try:
# Now, parse the hextets into a 128-bit integer.
ip_int = 0
for i in range(parts_hi):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
ip_int <<= 16 * parts_skipped
for i in range(-parts_lo, 0):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
return ip_int
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_hextet(cls, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from
[0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._HEX_DIGITS.issuperset(hextet_str):
raise ValueError("Only hex digits permitted in %r" % hextet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(hextet_str) > 4:
msg = "At most 4 characters permitted in %r"
raise ValueError(msg % hextet_str)
# Length check means we can skip checking the integer value
return int(hextet_str, 16)
@classmethod
def _compress_hextets(cls, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index, hextet in enumerate(hextets):
if hextet == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
return hextets
@classmethod
def _string_from_ip_int(cls, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if ip_int is None:
ip_int = int(cls._ip)
if ip_int > cls._ALL_ONES:
raise ValueError('IPv6 address is too large')
hex_str = '%032x' % ip_int
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
hextets = cls._compress_hextets(hextets)
return ':'.join(hextets)
def _explode_shorthand_ip_string(self):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if isinstance(self, IPv6Network):
ip_str = _compat_str(self.network_address)
elif isinstance(self, IPv6Interface):
ip_str = _compat_str(self.ip)
else:
ip_str = _compat_str(self)
ip_int = self._ip_int_from_string(ip_str)
hex_str = '%032x' % ip_int
parts = [hex_str[x:x + 4] for x in range(0, 32, 4)]
if isinstance(self, (_BaseNetwork, IPv6Interface)):
return '%s/%d' % (':'.join(parts), self._prefixlen)
return ':'.join(parts)
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv6 address.
This implements the method described in RFC3596 2.5.
"""
reverse_chars = self.exploded[::-1].replace(':', '')
return '.'.join(reverse_chars) + '.ip6.arpa'
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv6Address(_BaseV6, _BaseAddress):
"""Represent and manipulate single IPv6 Addresses."""
__slots__ = ('_ip', '__weakref__')
def __init__(self, address):
"""Instantiate a new IPv6 address object.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:db8::') ==
IPv6Address(42540766411282592856903984951653826560)
or, more generally
IPv6Address(int(IPv6Address('2001:db8::'))) ==
IPv6Address('2001:db8::')
Raises:
AddressValueError: If address isn't a valid IPv6 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 16)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if '/' in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v6_int_to_packed(self._ip)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return self in self._constants._multicast_network
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return any(self in x for x in self._constants._reserved_networks)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return self in self._constants._linklocal_network
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return self in self._constants._sitelocal_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv6-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, true if the address is not reserved per
iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self._ip == 0
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self._ip == 1
@property
def ipv4_mapped(self):
"""Return the IPv4 mapped address.
Returns:
If the IPv6 address is a v4 mapped address, return the
IPv4 mapped address. Return None otherwise.
"""
if (self._ip >> 32) != 0xFFFF:
return None
return IPv4Address(self._ip & 0xFFFFFFFF)
@property
def teredo(self):
"""Tuple of embedded teredo IPs.
Returns:
Tuple of the (server, client) IPs or None if the address
doesn't appear to be a teredo address (doesn't start with
2001::/32)
"""
if (self._ip >> 96) != 0x20010000:
return None
return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
IPv4Address(~self._ip & 0xFFFFFFFF))
@property
def sixtofour(self):
"""Return the IPv4 6to4 embedded address.
Returns:
The IPv4 6to4-embedded address if present or None if the
address doesn't appear to contain a 6to4 embedded address.
"""
if (self._ip >> 112) != 0x2002:
return None
return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
class IPv6Interface(IPv6Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv6Address.__init__(self, address)
self.network = IPv6Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv6Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv6Address.__init__(self, addr[0])
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self._prefixlen = self.network._prefixlen
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv6Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv6Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return self.network < other.network
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv6Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
@property
def is_unspecified(self):
return self._ip == 0 and self.network.is_unspecified
@property
def is_loopback(self):
return self._ip == 1 and self.network.is_loopback
class IPv6Network(_BaseV6, _BaseNetwork):
"""This class represents and manipulates 128-bit IPv6 networks.
Attributes: [examples for IPv6('2001:db8::1000/124')]
.network_address: IPv6Address('2001:db8::1000')
.hostmask: IPv6Address('::f')
.broadcast_address: IPv6Address('2001:db8::100f')
.netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
.prefixlen: 124
"""
# Class to use when creating address objects
_address_class = IPv6Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv6 Network object.
Args:
address: A string or integer representing the IPv6 network or the
IP and prefix/netmask.
'2001:db8::/128'
'2001:db8:0000:0000:0000:0000:0000:0000/128'
'2001:db8::'
are all functionally the same in IPv6. That is to say,
failing to provide a subnetmask will create an object with
a mask of /128.
Additionally, an integer can be passed, so
IPv6Network('2001:db8::') ==
IPv6Network(42540766411282592856903984951653826560)
or, more generally
IPv6Network(int(IPv6Network('2001:db8::'))) ==
IPv6Network('2001:db8::')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 2001:db8::1000/124 and not an
IP address on a network, eg, 2001:db8::1/124.
Raises:
AddressValueError: If address isn't a valid IPv6 address.
NetmaskValueError: If the netmask isn't valid for
an IPv6 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Efficient constructor from integer or packed address
if isinstance(address, (bytes, _compat_int_types)):
self.network_address = IPv6Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen)
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
self.network_address = IPv6Address(address[0])
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError('%s has host bits set' % self)
else:
self.network_address = IPv6Address(packed &
int(self.netmask))
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (IPv6Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv6Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the
Subnet-Router anycast address.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast + 1):
yield self._address_class(x)
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return (self.network_address.is_site_local and
self.broadcast_address.is_site_local)
class _IPv6Constants(object):
_linklocal_network = IPv6Network('fe80::/10')
_multicast_network = IPv6Network('ff00::/8')
_private_networks = [
IPv6Network('::1/128'),
IPv6Network('::/128'),
IPv6Network('::ffff:0:0/96'),
IPv6Network('100::/64'),
IPv6Network('2001::/23'),
IPv6Network('2001:2::/48'),
IPv6Network('2001:db8::/32'),
IPv6Network('2001:10::/28'),
IPv6Network('fc00::/7'),
IPv6Network('fe80::/10'),
]
_reserved_networks = [
IPv6Network('::/8'), IPv6Network('100::/8'),
IPv6Network('200::/7'), IPv6Network('400::/6'),
IPv6Network('800::/5'), IPv6Network('1000::/4'),
IPv6Network('4000::/3'), IPv6Network('6000::/3'),
IPv6Network('8000::/3'), IPv6Network('A000::/3'),
IPv6Network('C000::/3'), IPv6Network('E000::/4'),
IPv6Network('F000::/5'), IPv6Network('F800::/6'),
IPv6Network('FE00::/9'),
]
_sitelocal_network = IPv6Network('fec0::/10')
IPv6Address._constants = _IPv6Constants
| mit | -5,227,305,051,057,775,000 | 32.062268 | 86 | 0.568387 | false |
bubichain/blockchain | src/3rd/src/jsoncpp/scons-2.1.0/engine/SCons/Script/__init__.py | 21 | 14158 | """SCons.Script
This file implements the main() function used by the scons script.
Architecturally, this *is* the scons script, and will likely only be
called from the external "scons" wrapper. Consequently, anything here
should not be, or be considered, part of the build engine. If it's
something that we expect other software to want to use, it should go in
some other module. If it's specific to the "scons" script invocation,
it goes here.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Script/__init__.py 5357 2011/09/09 21:31:03 bdeegan"
import time
start_time = time.time()
import collections
import os
import sys
# Special chicken-and-egg handling of the "--debug=memoizer" flag:
#
# SCons.Memoize contains a metaclass implementation that affects how
# the other classes are instantiated. The Memoizer may add shim methods
# to classes that have methods that cache computed values in order to
# count and report the hits and misses.
#
# If we wait to enable the Memoization until after we've parsed the
# command line options normally, it will be too late, because the Memoizer
# will have already analyzed the classes that it's Memoizing and decided
# to not add the shims. So we use a special-case, up-front check for
# the "--debug=memoizer" flag and enable Memoizer before we import any
# of the other modules that use it.
_args = sys.argv + os.environ.get('SCONSFLAGS', '').split()
if "--debug=memoizer" in _args:
import SCons.Memoize
import SCons.Warnings
try:
SCons.Memoize.EnableMemoization()
except SCons.Warnings.Warning:
# Some warning was thrown. Arrange for it to be displayed
# or not after warnings are configured.
import Main
exc_type, exc_value, tb = sys.exc_info()
Main.delayed_warnings.append((exc_type, exc_value))
del _args
import SCons.Action
import SCons.Builder
import SCons.Environment
import SCons.Node.FS
import SCons.Options
import SCons.Platform
import SCons.Scanner
import SCons.SConf
import SCons.Subst
import SCons.Tool
import SCons.Util
import SCons.Variables
import SCons.Defaults
import Main
main = Main.main
# The following are global class definitions and variables that used to
# live directly in this module back before 0.96.90, when it contained
# a lot of code. Some SConscript files in widely-distributed packages
# (Blender is the specific example) actually reached into SCons.Script
# directly to use some of these. Rather than break those SConscript
# files, we're going to propagate these names into the SCons.Script
# namespace here.
#
# Some of these are commented out because it's *really* unlikely anyone
# used them, but we're going to leave the comment here to try to make
# it obvious what to do if the situation arises.
BuildTask = Main.BuildTask
CleanTask = Main.CleanTask
QuestionTask = Main.QuestionTask
#PrintHelp = Main.PrintHelp
#SConscriptSettableOptions = Main.SConscriptSettableOptions
AddOption = Main.AddOption
GetOption = Main.GetOption
SetOption = Main.SetOption
Progress = Main.Progress
GetBuildFailures = Main.GetBuildFailures
#keep_going_on_error = Main.keep_going_on_error
#print_dtree = Main.print_dtree
#print_explanations = Main.print_explanations
#print_includes = Main.print_includes
#print_objects = Main.print_objects
#print_time = Main.print_time
#print_tree = Main.print_tree
#memory_stats = Main.memory_stats
#ignore_errors = Main.ignore_errors
#sconscript_time = Main.sconscript_time
#command_time = Main.command_time
#exit_status = Main.exit_status
#profiling = Main.profiling
#repositories = Main.repositories
#
import SConscript
_SConscript = SConscript
call_stack = _SConscript.call_stack
#
Action = SCons.Action.Action
AddMethod = SCons.Util.AddMethod
AllowSubstExceptions = SCons.Subst.SetAllowableExceptions
Builder = SCons.Builder.Builder
Configure = _SConscript.Configure
Environment = SCons.Environment.Environment
#OptParser = SCons.SConsOptions.OptParser
FindPathDirs = SCons.Scanner.FindPathDirs
Platform = SCons.Platform.Platform
Return = _SConscript.Return
Scanner = SCons.Scanner.Base
Tool = SCons.Tool.Tool
WhereIs = SCons.Util.WhereIs
#
BoolVariable = SCons.Variables.BoolVariable
EnumVariable = SCons.Variables.EnumVariable
ListVariable = SCons.Variables.ListVariable
PackageVariable = SCons.Variables.PackageVariable
PathVariable = SCons.Variables.PathVariable
# Deprecated names that will go away some day.
BoolOption = SCons.Options.BoolOption
EnumOption = SCons.Options.EnumOption
ListOption = SCons.Options.ListOption
PackageOption = SCons.Options.PackageOption
PathOption = SCons.Options.PathOption
# Action factories.
Chmod = SCons.Defaults.Chmod
Copy = SCons.Defaults.Copy
Delete = SCons.Defaults.Delete
Mkdir = SCons.Defaults.Mkdir
Move = SCons.Defaults.Move
Touch = SCons.Defaults.Touch
# Pre-made, public scanners.
CScanner = SCons.Tool.CScanner
DScanner = SCons.Tool.DScanner
DirScanner = SCons.Defaults.DirScanner
ProgramScanner = SCons.Tool.ProgramScanner
SourceFileScanner = SCons.Tool.SourceFileScanner
# Functions we might still convert to Environment methods.
CScan = SCons.Defaults.CScan
DefaultEnvironment = SCons.Defaults.DefaultEnvironment
# Other variables we provide.
class TargetList(collections.UserList):
def _do_nothing(self, *args, **kw):
pass
def _add_Default(self, list):
self.extend(list)
def _clear(self):
del self[:]
ARGUMENTS = {}
ARGLIST = []
BUILD_TARGETS = TargetList()
COMMAND_LINE_TARGETS = []
DEFAULT_TARGETS = []
# BUILD_TARGETS can be modified in the SConscript files. If so, we
# want to treat the modified BUILD_TARGETS list as if they specified
# targets on the command line. To do that, though, we need to know if
# BUILD_TARGETS was modified through "official" APIs or by hand. We do
# this by updating two lists in parallel, the documented BUILD_TARGETS
# list, above, and this internal _build_plus_default targets list which
# should only have "official" API changes. Then Script/Main.py can
# compare these two afterwards to figure out if the user added their
# own targets to BUILD_TARGETS.
_build_plus_default = TargetList()
def _Add_Arguments(alist):
for arg in alist:
a, b = arg.split('=', 1)
ARGUMENTS[a] = b
ARGLIST.append((a, b))
def _Add_Targets(tlist):
if tlist:
COMMAND_LINE_TARGETS.extend(tlist)
BUILD_TARGETS.extend(tlist)
BUILD_TARGETS._add_Default = BUILD_TARGETS._do_nothing
BUILD_TARGETS._clear = BUILD_TARGETS._do_nothing
_build_plus_default.extend(tlist)
_build_plus_default._add_Default = _build_plus_default._do_nothing
_build_plus_default._clear = _build_plus_default._do_nothing
def _Set_Default_Targets_Has_Been_Called(d, fs):
return DEFAULT_TARGETS
def _Set_Default_Targets_Has_Not_Been_Called(d, fs):
if d is None:
d = [fs.Dir('.')]
return d
_Get_Default_Targets = _Set_Default_Targets_Has_Not_Been_Called
def _Set_Default_Targets(env, tlist):
global DEFAULT_TARGETS
global _Get_Default_Targets
_Get_Default_Targets = _Set_Default_Targets_Has_Been_Called
for t in tlist:
if t is None:
# Delete the elements from the list in-place, don't
# reassign an empty list to DEFAULT_TARGETS, so that the
# variables will still point to the same object we point to.
del DEFAULT_TARGETS[:]
BUILD_TARGETS._clear()
_build_plus_default._clear()
elif isinstance(t, SCons.Node.Node):
DEFAULT_TARGETS.append(t)
BUILD_TARGETS._add_Default([t])
_build_plus_default._add_Default([t])
else:
nodes = env.arg2nodes(t, env.fs.Entry)
DEFAULT_TARGETS.extend(nodes)
BUILD_TARGETS._add_Default(nodes)
_build_plus_default._add_Default(nodes)
#
help_text = None
def HelpFunction(text):
global help_text
if SCons.Script.help_text is None:
SCons.Script.help_text = text
else:
help_text = help_text + text
#
# Will be non-zero if we are reading an SConscript file.
sconscript_reading = 0
#
def Variables(files=[], args=ARGUMENTS):
return SCons.Variables.Variables(files, args)
def Options(files=[], args=ARGUMENTS):
return SCons.Options.Options(files, args)
# The list of global functions to add to the SConscript name space
# that end up calling corresponding methods or Builders in the
# DefaultEnvironment().
GlobalDefaultEnvironmentFunctions = [
# Methods from the SConsEnvironment class, above.
'Default',
'EnsurePythonVersion',
'EnsureSConsVersion',
'Exit',
'Export',
'GetLaunchDir',
'Help',
'Import',
#'SConscript', is handled separately, below.
'SConscriptChdir',
# Methods from the Environment.Base class.
'AddPostAction',
'AddPreAction',
'Alias',
'AlwaysBuild',
'BuildDir',
'CacheDir',
'Clean',
#The Command() method is handled separately, below.
'Decider',
'Depends',
'Dir',
'NoClean',
'NoCache',
'Entry',
'Execute',
'File',
'FindFile',
'FindInstalledFiles',
'FindSourceFiles',
'Flatten',
'GetBuildPath',
'Glob',
'Ignore',
'Install',
'InstallAs',
'Literal',
'Local',
'ParseDepends',
'Precious',
'Repository',
'Requires',
'SConsignFile',
'SideEffect',
'SourceCode',
'SourceSignatures',
'Split',
'Tag',
'TargetSignatures',
'Value',
'VariantDir',
]
GlobalDefaultBuilders = [
# Supported builders.
'CFile',
'CXXFile',
'DVI',
'Jar',
'Java',
'JavaH',
'Library',
'M4',
'MSVSProject',
'Object',
'PCH',
'PDF',
'PostScript',
'Program',
'RES',
'RMIC',
'SharedLibrary',
'SharedObject',
'StaticLibrary',
'StaticObject',
'Tar',
'TypeLibrary',
'Zip',
'Package',
]
for name in GlobalDefaultEnvironmentFunctions + GlobalDefaultBuilders:
exec "%s = _SConscript.DefaultEnvironmentCall(%s)" % (name, repr(name))
del name
# There are a handful of variables that used to live in the
# Script/SConscript.py module that some SConscript files out there were
# accessing directly as SCons.Script.SConscript.*. The problem is that
# "SConscript" in this namespace is no longer a module, it's a global
# function call--or more precisely, an object that implements a global
# function call through the default Environment. Nevertheless, we can
# maintain backwards compatibility for SConscripts that were reaching in
# this way by hanging some attributes off the "SConscript" object here.
SConscript = _SConscript.DefaultEnvironmentCall('SConscript')
# Make SConscript look enough like the module it used to be so
# that pychecker doesn't barf.
SConscript.__name__ = 'SConscript'
SConscript.Arguments = ARGUMENTS
SConscript.ArgList = ARGLIST
SConscript.BuildTargets = BUILD_TARGETS
SConscript.CommandLineTargets = COMMAND_LINE_TARGETS
SConscript.DefaultTargets = DEFAULT_TARGETS
# The global Command() function must be handled differently than the
# global functions for other construction environment methods because
# we want people to be able to use Actions that must expand $TARGET
# and $SOURCE later, when (and if) the Action is invoked to build
# the target(s). We do this with the subst=1 argument, which creates
# a DefaultEnvironmentCall instance that wraps up a normal default
# construction environment that performs variable substitution, not a
# proxy that doesn't.
#
# There's a flaw here, though, because any other $-variables on a command
# line will *also* be expanded, each to a null string, but that should
# only be a problem in the unusual case where someone was passing a '$'
# on a command line and *expected* the $ to get through to the shell
# because they were calling Command() and not env.Command()... This is
# unlikely enough that we're going to leave this as is and cross that
# bridge if someone actually comes to it.
Command = _SConscript.DefaultEnvironmentCall('Command', subst=1)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 | 1,617,008,442,936,210,700 | 33.364078 | 101 | 0.681876 | false |
abligh/xen | tools/python/xen/xend/XendTask.py | 48 | 7801 | #===========================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2007 XenSource Ltd
#============================================================================
from xen.xend.XendAPIConstants import XEN_API_TASK_STATUS_TYPE
from xen.xend.XendLogging import log
import thread
import threading
class XendTask(threading.Thread):
"""Represents a Asynchronous Task used by Xen API.
Basically proxies the callable object in a thread and returns the
results via self.{type,result,error_info}.
@cvar task_progress: Thread local storage for progress tracking.
It is a dict indexed by thread_id. Note that the
thread_id may be reused when the previous
thread with the thread_id ends.
@cvar task_progress_lock: lock on thread access to task_progress
"""
# progress stack:
# thread_id : [(start_task, end_task),
# (start_sub_task, end_sub_task)..]
# example : (0, 100), (50, 100) (50, 100) ...
# That would mean that the task is 75% complete.
# as it is 50% of the last 50% of the task.
task_progress = {}
task_progress_lock = threading.Lock()
def __init__(self, uuid, func, args, func_name, return_type, label, desc,
session):
"""
@param uuid: UUID of the task
@type uuid: string
@param func: Method to call (from XendAPI)
@type func: callable object
@param args: arguments to pass to function
@type args: list or tuple
@param label: name label of the task.
@type label: string
@param desc: name description of the task.
@type desc: string
@param func_name: function name, eg ('VM.start')
@type desc: string
"""
threading.Thread.__init__(self)
self.status_lock = threading.Lock()
self.status = XEN_API_TASK_STATUS_TYPE[0]
self.progress = 0
self.type = return_type
self.uuid = uuid
self.result = None
self.error_info = []
self.name_label = label or func.__name__
self.name_description = desc
self.thread_id = 0
self.func_name = func_name
self.func = func
self.args = args
self.session = session
def set_status(self, new_status):
self.status_lock.acquire()
try:
self.status = new_status
finally:
self.status_lock.release()
def get_status(self):
self.status_lock.acquire()
try:
return self.status
finally:
self.status_lock.release()
def run(self):
"""Runs the method and stores the result for later access.
Is invoked by threading.Thread.start().
"""
self.thread_id = thread.get_ident()
self.task_progress_lock.acquire()
try:
self.task_progress[self.thread_id] = {}
self.progress = 0
finally:
self.task_progress_lock.release()
try:
result = self.func(*self.args)
if result['Status'] == 'Success':
self.result = result['Value']
self.set_status(XEN_API_TASK_STATUS_TYPE[1])
else:
self.error_info = result['ErrorDescription']
self.set_status(XEN_API_TASK_STATUS_TYPE[2])
except Exception, e:
log.exception('Error running Async Task')
self.error_info = ['INTERNAL ERROR', str(e)]
self.set_status(XEN_API_TASK_STATUS_TYPE[2])
self.task_progress_lock.acquire()
try:
del self.task_progress[self.thread_id]
self.progress = 100
finally:
self.task_progress_lock.release()
def get_record(self):
"""Returns a Xen API compatible record."""
return {
'uuid': self.uuid,
'name_label': self.name_label,
'name_description': self.name_description,
'status': self.status,
'progress': self.get_progress(),
'type': self.type,
'result': self.result,
'error_info': self.error_info,
'allowed_operations': {},
'session': self.session,
}
def get_progress(self):
""" Checks the thread local progress storage. """
if self.status != XEN_API_TASK_STATUS_TYPE[0]:
return 100
self.task_progress_lock.acquire()
try:
# Pop each progress range in the stack and map it on to
# the next progress range until we find out cumulative
# progress based on the (start, end) range of each level
start = 0
prog_stack = self.task_progress.get(self.thread_id, [])[:]
if len(prog_stack) > 0:
start, stop = prog_stack.pop()
while prog_stack:
new_start, new_stop = prog_stack.pop()
start = new_start + ((new_stop - new_start)/100.0 * start)
# only update progress if it increases, this will prevent
# progress from going backwards when tasks are popped off
# the stack
if start > self.progress:
self.progress = int(start)
finally:
self.task_progress_lock.release()
return self.progress
def log_progress(cls, progress_min, progress_max,
func, *args, **kwds):
""" Callable function wrapper that logs the progress of the
function to thread local storage for task progress calculation.
This is a class method so other parts of Xend will update
the task progress by calling:
XendTask.push_progress(progress_min, progress_max,
func, *args, **kwds)
The results of the progress is stored in thread local storage
and the result of the func(*args, **kwds) is returned back
to the caller.
"""
thread_id = thread.get_ident()
retval = None
# Log the start of the method
cls.task_progress_lock.acquire()
try:
if type(cls.task_progress.get(thread_id)) != list:
cls.task_progress[thread_id] = []
cls.task_progress[thread_id].append((progress_min,
progress_max))
finally:
cls.task_progress_lock.release()
# Execute the method
retval = func(*args, **kwds)
# Log the end of the method by popping the progress range
# off the stack.
cls.task_progress_lock.acquire()
try:
cls.task_progress[thread_id].pop()
finally:
cls.task_progress_lock.release()
return retval
log_progress = classmethod(log_progress)
| gpl-2.0 | 4,239,047,939,605,134,000 | 33.825893 | 78 | 0.548391 | false |
tcheehow/MissionPlanner | Lib/distutils/command/bdist_dumb.py | 50 | 5259 | """distutils.command.bdist_dumb
Implements the Distutils 'bdist_dumb' command (create a "dumb" built
distribution -- i.e., just an archive to be unpacked under $prefix or
$exec_prefix)."""
__revision__ = "$Id$"
import os
from sysconfig import get_python_version
from distutils.util import get_platform
from distutils.core import Command
from distutils.dir_util import remove_tree, ensure_relative
from distutils.errors import DistutilsPlatformError
from distutils import log
class bdist_dumb (Command):
description = 'create a "dumb" built distribution'
user_options = [('bdist-dir=', 'd',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('format=', 'f',
"archive format to create (tar, ztar, gztar, zip)"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths"
"(default: false)"),
('owner=', 'u',
"Owner name used when creating a tar file"
" [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file"
" [default: current group]"),
]
boolean_options = ['keep-temp', 'skip-build', 'relative']
default_format = { 'posix': 'gztar',
'nt': 'zip',
'os2': 'zip' }
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.format = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.relative = 0
self.owner = None
self.group = None
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'dumb')
if self.format is None:
try:
self.format = self.default_format[os.name]
except KeyError:
raise DistutilsPlatformError, \
("don't know how to create dumb built distributions " +
"on platform %s") % os.name
self.set_undefined_options('bdist',
('dist_dir', 'dist_dir'),
('plat_name', 'plat_name'))
def run(self):
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
log.info("installing to %s" % self.bdist_dir)
self.run_command('install')
# And make an archive relative to the root of the
# pseudo-installation tree.
archive_basename = "%s.%s" % (self.distribution.get_fullname(),
self.plat_name)
# OS/2 objects to any ":" characters in a filename (such as when
# a timestamp is used in a version) so change them to hyphens.
if os.name == "os2":
archive_basename = archive_basename.replace(":", "-")
pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
if not self.relative:
archive_root = self.bdist_dir
else:
if (self.distribution.has_ext_modules() and
(install.install_base != install.install_platbase)):
raise DistutilsPlatformError, \
("can't make a dumb built distribution where "
"base and platbase are different (%s, %s)"
% (repr(install.install_base),
repr(install.install_platbase)))
else:
archive_root = os.path.join(self.bdist_dir,
ensure_relative(install.install_base))
# Make the archive
filename = self.make_archive(pseudoinstall_root,
self.format, root_dir=archive_root,
owner=self.owner, group=self.group)
if self.distribution.has_ext_modules():
pyversion = get_python_version()
else:
pyversion = 'any'
self.distribution.dist_files.append(('bdist_dumb', pyversion,
filename))
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
| gpl-3.0 | 5,214,269,814,029,726,000 | 37.840909 | 77 | 0.508652 | false |
pypingou/mock | docs/conf.py | 7 | 6297 | # -*- coding: utf-8 -*-
#
# Mock documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 17 18:12:00 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
sys.path.insert(0, os.path.abspath('..'))
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.doctest']
doctest_global_setup = """
import os
import sys
import mock
from mock import * # yeah, I know :-/
import unittest2
import __main__
if os.getcwd() not in sys.path:
sys.path.append(os.getcwd())
# keep a reference to __main__
sys.modules['__main'] = __main__
class ProxyModule(object):
def __init__(self):
self.__dict__ = globals()
sys.modules['__main__'] = ProxyModule()
"""
doctest_global_cleanup = """
sys.modules['__main__'] = sys.modules['__main']
"""
html_theme = 'nature'
html_theme_options = {}
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = u'Mock'
copyright = u'2007-2015, Michael Foord & the mock team'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents. Supplied by pbr.
#
# The short X.Y version.
version = None
# The full version, including alpha/beta/rc tags.
release = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used: (Set from pbr)
today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'adctheme.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Mockdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '12pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Mock.tex', u'Mock Documentation',
u'Michael Foord', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
| bsd-2-clause | 96,406,130,217,954,750 | 29.274038 | 82 | 0.711768 | false |
wtmmac/airflow | airflow/operators/hive_stats_operator.py | 38 | 6082 | from builtins import str
from builtins import zip
from collections import OrderedDict
import json
import logging
from airflow.utils import AirflowException
from airflow.hooks import PrestoHook, HiveMetastoreHook, MySqlHook
from airflow.models import BaseOperator
from airflow.utils import apply_defaults
class HiveStatsCollectionOperator(BaseOperator):
"""
Gathers partition statistics using a dynamically generated Presto
query, inserts the stats into a MySql table with this format. Stats
overwrite themselves if you rerun the same date/partition.
``
CREATE TABLE hive_stats (
ds VARCHAR(16),
table_name VARCHAR(500),
metric VARCHAR(200),
value BIGINT
);
``
:param table: the source table, in the format ``database.table_name``
:type table: str
:param partition: the source partition
:type partition: dict of {col:value}
:param extra_exprs: dict of expression to run against the table where
keys are metric names and values are Presto compatible expressions
:type extra_exprs: dict
:param col_blacklist: list of columns to blacklist, consider
blacklisting blobs, large json columns, ...
:type col_blacklist: list
:param assignment_func: a function that receives a column name and
a type, and returns a dict of metric names and an Presto expressions.
If None is returned, the global defaults are applied. If an
empty dictionary is returned, no stats are computed for that
column.
:type assignment_func: function
"""
template_fields = ('table', 'partition', 'ds', 'dttm')
ui_color = '#aff7a6'
@apply_defaults
def __init__(
self,
table,
partition,
extra_exprs=None,
col_blacklist=None,
assignment_func=None,
metastore_conn_id='metastore_default',
presto_conn_id='presto_default',
mysql_conn_id='airflow_db',
*args, **kwargs):
super(HiveStatsCollectionOperator, self).__init__(*args, **kwargs)
self.table = table
self.partition = partition
self.extra_exprs = extra_exprs or {}
self.col_blacklist = col_blacklist or {}
self.metastore_conn_id = metastore_conn_id
self.presto_conn_id = presto_conn_id
self.mysql_conn_id = mysql_conn_id
self.assignment_func = assignment_func
self.ds = '{{ ds }}'
self.dttm = '{{ execution_date.isoformat() }}'
def get_default_exprs(self, col, col_type):
if col in self.col_blacklist:
return {}
d = {}
d[(col, 'non_null')] = "COUNT({col})"
if col_type in ['double', 'int', 'bigint', 'float', 'double']:
d[(col, 'sum')] = 'SUM({col})'
d[(col, 'min')] = 'MIN({col})'
d[(col, 'max')] = 'MAX({col})'
d[(col, 'avg')] = 'AVG({col})'
elif col_type == 'boolean':
d[(col, 'true')] = 'SUM(CASE WHEN {col} THEN 1 ELSE 0 END)'
d[(col, 'false')] = 'SUM(CASE WHEN NOT {col} THEN 1 ELSE 0 END)'
elif col_type in ['string']:
d[(col, 'len')] = 'SUM(CAST(LENGTH({col}) AS BIGINT))'
d[(col, 'approx_distinct')] = 'APPROX_DISTINCT({col})'
return {k: v.format(col=col) for k, v in d.items()}
def execute(self, context=None):
metastore = HiveMetastoreHook(metastore_conn_id=self.metastore_conn_id)
table = metastore.get_table(table_name=self.table)
field_types = {col.name: col.type for col in table.sd.cols}
exprs = {
('', 'count'): 'COUNT(*)'
}
for col, col_type in list(field_types.items()):
d = {}
if self.assignment_func:
d = self.assignment_func(col, col_type)
if d is None:
d = self.get_default_exprs(col, col_type)
else:
d = self.get_default_exprs(col, col_type)
exprs.update(d)
exprs.update(self.extra_exprs)
exprs = OrderedDict(exprs)
exprs_str = ",\n ".join([
v + " AS " + k[0] + '__' + k[1]
for k, v in exprs.items()])
where_clause = [
"{0} = '{1}'".format(k, v) for k, v in self.partition.items()]
where_clause = " AND\n ".join(where_clause)
sql = """
SELECT
{exprs_str}
FROM {self.table}
WHERE
{where_clause};
""".format(**locals())
hook = PrestoHook(presto_conn_id=self.presto_conn_id)
logging.info('Executing SQL check: ' + sql)
row = hook.get_first(hql=sql)
logging.info("Record: " + str(row))
if not row:
raise AirflowException("The query returned None")
part_json = json.dumps(self.partition, sort_keys=True)
logging.info("Deleting rows from previous runs if they exist")
mysql = MySqlHook(self.mysql_conn_id)
sql = """
SELECT 1 FROM hive_stats
WHERE
table_name='{self.table}' AND
partition_repr='{part_json}' AND
dttm='{self.dttm}'
LIMIT 1;
""".format(**locals())
if mysql.get_records(sql):
sql = """
DELETE FROM hive_stats
WHERE
table_name='{self.table}' AND
partition_repr='{part_json}' AND
dttm='{self.dttm}';
""".format(**locals())
mysql.run(sql)
logging.info("Pivoting and loading cells into the Airflow db")
rows = [
(self.ds, self.dttm, self.table, part_json) +
(r[0][0], r[0][1], r[1])
for r in zip(exprs, row)]
mysql.insert_rows(
table='hive_stats',
rows=rows,
target_fields=[
'ds',
'dttm',
'table_name',
'partition_repr',
'col',
'metric',
'value',
]
)
| apache-2.0 | -445,765,531,088,700,800 | 34.156069 | 79 | 0.546695 | false |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/flower/events.py | 16 | 4189 | from __future__ import absolute_import
from __future__ import with_statement
import time
import shelve
import logging
import threading
import collections
from functools import partial
import celery
from tornado.ioloop import PeriodicCallback
from tornado.ioloop import IOLoop
from celery.events import EventReceiver
from celery.events.state import State
from . import api
try:
from collections import Counter
except ImportError:
from .utils.backports.collections import Counter
logger = logging.getLogger(__name__)
class EventsState(State):
# EventsState object is created and accessed only from ioloop thread
def __init__(self, *args, **kwargs):
super(EventsState, self).__init__(*args, **kwargs)
self.counter = collections.defaultdict(Counter)
def event(self, event):
worker_name = event['hostname']
event_type = event['type']
self.counter[worker_name][event_type] += 1
# Send event to api subscribers (via websockets)
classname = api.events.getClassName(event_type)
cls = getattr(api.events, classname, None)
if cls:
cls.send_message(event)
# Save the event
super(EventsState, self).event(event)
class Events(threading.Thread):
events_enable_interval = 5000
def __init__(self, capp, db=None, persistent=False,
enable_events=True, io_loop=None, **kwargs):
threading.Thread.__init__(self)
self.daemon = True
self.io_loop = io_loop or IOLoop.instance()
self.capp = capp
self.db = db
self.persistent = persistent
self.enable_events = enable_events
self.state = None
if self.persistent and celery.__version__ < '3.0.15':
logger.warning('Persistent mode is available with '
'Celery 3.0.15 and later')
self.persistent = False
if self.persistent:
logger.debug("Loading state from '%s'...", self.db)
state = shelve.open(self.db)
if state:
self.state = state['events']
state.close()
if not self.state:
self.state = EventsState(**kwargs)
self.timer = PeriodicCallback(self.on_enable_events,
self.events_enable_interval)
def start(self):
threading.Thread.start(self)
# Celery versions prior to 2 don't support enable_events
if self.enable_events and celery.VERSION[0] > 2:
self.timer.start()
def stop(self):
if self.persistent:
logger.debug("Saving state to '%s'...", self.db)
state = shelve.open(self.db)
state['events'] = self.state
state.close()
def run(self):
try_interval = 1
while True:
try:
try_interval *= 2
with self.capp.connection() as conn:
recv = EventReceiver(conn,
handlers={"*": self.on_event},
app=self.capp)
try_interval = 1
recv.capture(limit=None, timeout=None, wakeup=True)
except (KeyboardInterrupt, SystemExit):
try:
import _thread as thread
except ImportError:
import thread
thread.interrupt_main()
except Exception as e:
logger.error("Failed to capture events: '%s', "
"trying again in %s seconds.",
e, try_interval)
logger.debug(e, exc_info=True)
time.sleep(try_interval)
def on_enable_events(self):
# Periodically enable events for workers
# launched after flower
try:
self.capp.control.enable_events()
except Exception as e:
logger.debug("Failed to enable events: '%s'", e)
def on_event(self, event):
# Call EventsState.event in ioloop thread to avoid synchronization
self.io_loop.add_callback(partial(self.state.event, event))
| gpl-3.0 | 6,311,138,550,801,763,000 | 29.576642 | 74 | 0.571258 | false |
bbenko/shinkicker | django/core/management/commands/diffsettings.py | 411 | 1296 | from django.core.management.base import NoArgsCommand
def module_to_dict(module, omittable=lambda k: k.startswith('_')):
"Converts a module namespace to a Python dictionary. Used by get_settings_diff."
return dict([(k, repr(v)) for k, v in module.__dict__.items() if not omittable(k)])
class Command(NoArgsCommand):
help = """Displays differences between the current settings.py and Django's
default settings. Settings that don't appear in the defaults are
followed by "###"."""
requires_model_validation = False
def handle_noargs(self, **options):
# Inspired by Postfix's "postconf -n".
from django.conf import settings, global_settings
# Because settings are imported lazily, we need to explicitly load them.
settings._setup()
user_settings = module_to_dict(settings._wrapped)
default_settings = module_to_dict(global_settings)
output = []
keys = user_settings.keys()
keys.sort()
for key in keys:
if key not in default_settings:
output.append("%s = %s ###" % (key, user_settings[key]))
elif user_settings[key] != default_settings[key]:
output.append("%s = %s" % (key, user_settings[key]))
return '\n'.join(output)
| bsd-3-clause | 2,969,228,023,142,232,600 | 39.5 | 87 | 0.635031 | false |
RackSec/ansible | lib/ansible/modules/network/lenovo/cnos_vlag.py | 59 | 13338 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send VLAG commands to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_vlag
author: "Dave Kasberg (@dkasberg)"
short_description: Manage VLAG resources and attributes on devices running Lenovo CNOS
description:
- This module allows you to work with virtual Link Aggregation Groups
(vLAG) related configurations. The operators used are overloaded to ensure
control over switch vLAG configurations. Apart from the regular device
connection related attributes, there are four vLAG arguments which are
overloaded variables that will perform further configurations. They are
vlagArg1, vlagArg2, vlagArg3, and vlagArg4. For more details on how to use
these arguments, see [Overloaded Variables].
This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_vlag.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
vlagArg1:
description:
- This is an overloaded vlag first argument. Usage of this argument can be found is the User Guide referenced above.
required: Yes
default: Null
choices: [enable, auto-recovery,config-consistency,isl,mac-address-table,peer-gateway,priority,startup-delay,tier-id,vrrp,instance,hlthchk]
vlagArg2:
description:
- This is an overloaded vlag second argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [Interval in seconds,disable or strict,Port Aggregation Number,VLAG priority,Delay time in seconds,VLAG tier-id value,
VLAG instance number,keepalive-attempts,keepalive-interval,retry-interval,peer-ip]
vlagArg3:
description:
- This is an overloaded vlag third argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [enable or port-aggregation,Number of keepalive attempts,Interval in seconds,Interval in seconds,VLAG health check peer IP4 address]
vlagArg4:
description:
- This is an overloaded vlag fourth argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [Port Aggregation Number,default or management]
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_vlag. These are written in the main.yml file of the tasks directory.
---
- name: Test Vlag - enable
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "enable"
- name: Test Vlag - autorecovery
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "auto-recovery"
vlagArg2: 266
- name: Test Vlag - config-consistency
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "config-consistency"
vlagArg2: "strict"
- name: Test Vlag - isl
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "isl"
vlagArg2: 23
- name: Test Vlag - mac-address-table
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "mac-address-table"
- name: Test Vlag - peer-gateway
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "peer-gateway"
- name: Test Vlag - priority
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "priority"
vlagArg2: 1313
- name: Test Vlag - startup-delay
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "startup-delay"
vlagArg2: 323
- name: Test Vlag - tier-id
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "tier-id"
vlagArg2: 313
- name: Test Vlag - vrrp
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "vrrp"
- name: Test Vlag - instance
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "instance"
vlagArg2: 33
vlagArg3: 333
- name: Test Vlag - instance2
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "instance"
vlagArg2: "33"
- name: Test Vlag - keepalive-attempts
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "hlthchk"
vlagArg2: "keepalive-attempts"
vlagArg3: 13
- name: Test Vlag - keepalive-interval
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "hlthchk"
vlagArg2: "keepalive-interval"
vlagArg3: 131
- name: Test Vlag - retry-interval
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "hlthchk"
vlagArg2: "retry-interval"
vlagArg3: 133
- name: Test Vlag - peer ip
cnos_vlag:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username']}}"
password: "{{ hostvars[inventory_hostname]['password']}}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType']}}"
outputfile: "./results/cnos_vlag_{{ inventory_hostname }}_output.txt"
vlagArg1: "hlthchk"
vlagArg2: "peer-ip"
vlagArg3: "1.2.3.4"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "vLAG configurations accomplished"
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
#
# Define parameters for vlag creation entry
#
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),
vlagArg1=dict(required=True),
vlagArg2=dict(required=False),
vlagArg3=dict(required=False),
vlagArg4=dict(required=False),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
outputfile = module.params['outputfile']
hostIP = module.params['host']
deviceType = module.params['deviceType']
vlagArg1 = module.params['vlagArg1']
vlagArg2 = module.params['vlagArg2']
vlagArg3 = module.params['vlagArg3']
vlagArg4 = module.params['vlagArg4']
output = ""
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in
# your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + \
cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + \
cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Go to config mode
output = output + \
cnos.waitForDeviceResponse(
"configure d\n", "(config)#", 2, remote_conn)
# Send the CLi command
output = output + cnos.vlagConfig(
remote_conn, deviceType, "(config)#", 2, vlagArg1, vlagArg2, vlagArg3,
vlagArg4)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
# need to add logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="vlag configurations accomplished")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| gpl-3.0 | 9,027,011,899,004,421,000 | 37.108571 | 145 | 0.670715 | false |
jbbskinny/sympy | sympy/series/kauers.py | 94 | 1807 | from __future__ import print_function, division
def finite_diff(expression, variable, increment=1):
"""
Takes as input a polynomial expression and the variable used to construct
it and returns the difference between function's value when the input is
incremented to 1 and the original function value. If you want an increment
other than one supply it as a third argument.
Examples
========
>>> from sympy.abc import x, y, z, k, n
>>> from sympy.series.kauers import finite_diff
>>> from sympy import Sum
>>> finite_diff(x**2, x)
2*x + 1
>>> finite_diff(y**3 + 2*y**2 + 3*y + 4, y)
3*y**2 + 7*y + 6
>>> finite_diff(x**2 + 3*x + 8, x, 2)
4*x + 10
>>> finite_diff(z**3 + 8*z, z, 3)
9*z**2 + 27*z + 51
"""
expression = expression.expand()
expression2 = expression.subs(variable, variable + increment)
expression2 = expression2.expand()
return expression2 - expression
def finite_diff_kauers(sum):
"""
Takes as input a Sum instance and returns the difference between the sum
with the upper index incremented by 1 and the original sum. For example,
if S(n) is a sum, then finite_diff_kauers will return S(n + 1) - S(n).
Examples
========
>>> from sympy.series.kauers import finite_diff_kauers
>>> from sympy import Sum
>>> from sympy.abc import x, y, m, n, k
>>> finite_diff_kauers(Sum(k, (k, 1, n)))
n + 1
>>> finite_diff_kauers(Sum(1/k, (k, 1, n)))
1/(n + 1)
>>> finite_diff_kauers(Sum((x*y**2), (x, 1, n), (y, 1, m)))
(m + 1)**2*(n + 1)
>>> finite_diff_kauers(Sum((x*y), (x, 1, m), (y, 1, n)))
(m + 1)*(n + 1)
"""
function = sum.function
for l in sum.limits:
function = function.subs(l[0], l[- 1] + 1)
return function
| bsd-3-clause | -1,509,883,331,748,980,500 | 31.854545 | 78 | 0.589928 | false |
smscannell/crazyflie-clients-python | lib/cfclient/utils/input/inputinterfaces/__init__.py | 5 | 3515 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2014 Bitcraze AB
#
# Crazyflie Nano Quadcopter Client
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Find all the available input interfaces and try to initialize them.
"""
import os
import glob
import logging
from ..inputreaderinterface import InputReaderInterface
__author__ = 'Bitcraze AB'
__all__ = ['InputInterface']
logger = logging.getLogger(__name__)
found_interfaces = [os.path.splitext(os.path.basename(f))[0] for f in
glob.glob(os.path.dirname(__file__) + "/[A-Za-z]*.py")]
if len(found_interfaces) == 0:
found_interfaces = [os.path.splitext(os.path.basename(f))[0] for
f in glob.glob(os.path.dirname(__file__) +
"/[A-Za-z]*.pyc")]
logger.info("Found interfaces: {}".format(found_interfaces))
initialized_interfaces = []
available_interfaces = []
for interface in found_interfaces:
try:
module = __import__(interface, globals(), locals(), [interface], 1)
main_name = getattr(module, "MODULE_MAIN")
initialized_interfaces.append(getattr(module, main_name)())
logger.info("Successfully initialized [{}]".format(interface))
except Exception as e:
logger.info("Could not initialize [{}]: {}".format(interface, e))
def devices():
# Todo: Support rescanning and adding/removing devices
if len(available_interfaces) == 0:
for reader in initialized_interfaces:
devs = reader.devices()
for dev in devs:
available_interfaces.append(InputInterface(
dev["name"], dev["id"], reader))
return available_interfaces
class InputInterface(InputReaderInterface):
def __init__(self, dev_name, dev_id, dev_reader):
super(InputInterface, self).__init__(dev_name, dev_id, dev_reader)
# These devices cannot be mapped and configured
self.supports_mapping = False
# Ask the reader if it wants to limit
# roll/pitch/yaw/thrust for all devices
self.limit_rp = dev_reader.limit_rp
self.limit_thrust = dev_reader.limit_thrust
self.limit_yaw = dev_reader.limit_yaw
def open(self):
self._reader.open(self.id)
def close(self):
self._reader.close(self.id)
def read(self, include_raw=False):
mydata = self._reader.read(self.id)
# Merge interface returned data into InputReader Data Item
for key in list(mydata.keys()):
self.data.set(key, mydata[key])
return self.data
| gpl-2.0 | -1,196,066,169,088,873,500 | 33.460784 | 75 | 0.607966 | false |
RubikonAlpha/RIOT | dist/tools/pyterm/pytermcontroller/pytermcontroller.py | 100 | 4769 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Philipp Rosenkranz <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import sys, signal, threading
from twisted.internet import reactor
from twisted.internet.protocol import Protocol, Factory
class PubProtocol(Protocol):
def __init__(self, factory):
self.factory = factory
def connectionMade(self):
print("new connection made")
def connectionLost(self, reason):
self.factory.numProtocols = self.factory.numProtocols - 1
def connectionLost(self, reason):
self.factory.clients = {key: value for key, value in self.factory.clients.items()
if value is not self.transport}
def dataReceived(self, data):
if data.startswith("hostname: "):
remoteHostname = data.split()[1].strip()
print("dataReceived added: " + remoteHostname)
self.factory.clients[remoteHostname] = self.transport
else:
print("received some useless data...")
class PubFactory(Factory):
def __init__(self):
self.clients = dict()
def buildProtocol(self, addr):
return PubProtocol(self)
class ExperimentRunner():
def __init__(self, experiment, testbed):
self.publisher = PubFactory()
self.port = int(testbed.serverPort)
self.experiment = experiment(self.publisher, self)
self.testbed = testbed
def run(self):
signal.signal(signal.SIGINT, self.handle_sigint)
self.experiment.run()
reactor.listenTCP(self.port, self.publisher)
# clean logfiles and start nodes but don't flash nodes
self.testbed.initClean()
reactor.run()
def stop(self):
self.testbed.stop()
reactor.callFromThread(self.safeReactorStop)
def safeReactorStop(self):
if reactor.running:
try:
reactor.stop()
except:
print("tried to shutdown reactor twice!")
def handle_sigint(self, signal, frame):
self.experiment.stop()
self.testbed.stop()
self.stop() # shutdown if experiment didn't already
class Experiment():
def __init__(self, factory, runner):
self.factory = factory
self.runner = runner
self.hostid = dict()
self.sumDelay = 0
def run(self):
print("Running preHook")
self.preHook()
print("Running experiment")
self.start()
def start(self):
raise NotImplementedError("Inherit from Experiment and implement start")
def stop(self):
print("Running postHook")
self.postHook()
self.runner.stop()
def preHook(self):
pass
def postHook(self):
pass
def readHostFile(self, path):
id = 1
with open(path) as f:
for line in f:
self.hostid[line.strip()] = id
id += 1
def sendToHost(self, host=None, cmd=""):
if host in self.factory.clients:
self.factory.clients[host].write(cmd + "\n")
else:
print("sendToHost: no such host known: " + host + " !")
def sendToAll(self, cmd=""):
for host, transport in self.factory.clients.items():
self.sendToHost(host, cmd)
def pauseInSeconds(self, seconds=0):
from time import time, sleep
start = time()
while (time() - start < seconds):
sleep(seconds - (time() - start))
def callLater(self, absoluteDelay = 0.0, function = None):
reactor.callLater(absoluteDelay, function)
def waitAndCall(self, relativeDelay = 0.0, function = None):
self.sumDelay += relativeDelay
self.callLater(self.sumDelay, function)
def clientIterator(self):
return self.factory.clients.items()
def connectionByHostname(self, host=None):
if host in self.factory.clients:
return self.factory.clients[host]
@staticmethod
def sendToConnection(connection, line):
connection.write(line + "\n")
| lgpl-2.1 | -1,344,511,049,823,811,000 | 29.375796 | 89 | 0.634515 | false |
dmillington/ansible-modules-core | files/template.py | 11 | 3875 | # this is a virtual module that is entirely implemented server side
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: template
version_added: historical
short_description: Templates a file out to a remote server.
description:
- Templates are processed by the Jinja2 templating language
(U(http://jinja.pocoo.org/docs/)) - documentation on the template
formatting can be found in the Template Designer Documentation
(U(http://jinja.pocoo.org/docs/templates/)).
- "Six additional variables can be used in templates: C(ansible_managed)
(configurable via the C(defaults) section of C(ansible.cfg)) contains a string
which can be used to describe the template name, host, modification time of the
template file and the owner uid, C(template_host) contains the node name of
the template's machine, C(template_uid) the owner, C(template_path) the
absolute path of the template, C(template_fullpath) is the absolute path of the
template, and C(template_run_date) is the date that the template was rendered. Note that including
a string that uses a date in the template will result in the template being marked 'changed'
each time."
options:
src:
description:
- Path of a Jinja2 formatted template on the Ansible controller. This can be a relative or absolute path.
required: true
dest:
description:
- Location to render the template to on the remote machine.
required: true
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
required: false
choices: [ "yes", "no" ]
default: "no"
force:
description:
- the default is C(yes), which will replace the remote file when contents
are different than the source. If C(no), the file will only be transferred
if the destination does not exist.
required: false
choices: [ "yes", "no" ]
default: "yes"
notes:
- "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)."
- "Also, you can override jinja2 settings by adding a special header to template file.
i.e. C(#jinja2:variable_start_string:'[%' , variable_end_string:'%]', trim_blocks: False)
which changes the variable interpolation markers to [% var %] instead of {{ var }}.
This is the best way to prevent evaluation of things that look like, but should not be Jinja2.
raw/endraw in Jinja2 will not work as you expect because templates in Ansible are recursively evaluated."
author:
- Ansible Core Team
- Michael DeHaan
extends_documentation_fragment:
- files
- validate
'''
EXAMPLES = '''
# Example from Ansible Playbooks
- template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: 0644
# The same example, but using symbolic modes equivalent to 0644
- template:
src: /mytemplates/foo.j2
dest: /etc/file.conf
owner: bin
group: wheel
mode: "u=rw,g=r,o=r"
# Copy a new "sudoers" file into place, after passing validation with visudo
- template:
src: /mine/sudoers
dest: /etc/sudoers
validate: 'visudo -cf %s'
'''
| gpl-3.0 | -1,277,305,428,280,286,000 | 37.75 | 111 | 0.707355 | false |
spasovski/zamboni | apps/versions/views.py | 3 | 4674 | import posixpath
from django import http
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404, redirect
import caching.base as caching
import commonware.log
import jingo
from mobility.decorators import mobile_template
import amo
from amo.urlresolvers import reverse
from amo.utils import HttpResponseSendFile, urlparams
from access import acl
from addons.decorators import addon_view_factory
from addons.models import Addon
from files.models import File
from versions.models import Version
# The version detail page redirects to the version within pagination, so we
# need to enforce the number of versions per page.
PER_PAGE = 30
addon_view = addon_view_factory(Addon.objects.valid)
log = commonware.log.getLogger('z.versions')
@addon_view
@mobile_template('versions/{mobile/}version_list.html')
def version_list(request, addon, template):
qs = (addon.versions.filter(files__status__in=amo.VALID_STATUSES)
.distinct().order_by('-created'))
versions = amo.utils.paginate(request, qs, PER_PAGE)
versions.object_list = list(versions.object_list)
Version.transformer(versions.object_list)
return jingo.render(request, template,
{'addon': addon, 'versions': versions})
@addon_view
def version_detail(request, addon, version_num):
qs = (addon.versions.filter(files__status__in=amo.VALID_STATUSES)
.distinct().order_by('-created'))
# Use cached_with since values_list won't be cached.
f = lambda: _find_version_page(qs, addon, version_num)
return caching.cached_with(qs, f, 'vd:%s:%s' % (addon.id, version_num))
def _find_version_page(qs, addon, version_num):
ids = list(qs.values_list('version', flat=True))
url = reverse('addons.versions', args=[addon.slug])
if version_num in ids:
page = 1 + ids.index(version_num) / PER_PAGE
to = urlparams(url, 'version-%s' % version_num, page=page)
return http.HttpResponseRedirect(to)
else:
raise http.Http404()
@addon_view
def update_info(request, addon, version_num):
qs = addon.versions.filter(version=version_num,
files__status__in=amo.VALID_STATUSES)
if not qs:
raise http.Http404()
serve_xhtml = ('application/xhtml+xml' in
request.META.get('HTTP_ACCEPT', '').lower())
return jingo.render(request, 'versions/update_info.html',
{'version': qs[0], 'serve_xhtml': serve_xhtml},
content_type='application/xhtml+xml')
def update_info_redirect(request, version_id):
version = get_object_or_404(Version, pk=version_id)
return redirect(reverse('addons.versions.update_info',
args=(version.addon.id, version.version)),
permanent=True)
# Should accept junk at the end for filename goodness.
def download_file(request, file_id, type=None):
file = get_object_or_404(File.objects, pk=file_id)
addon = get_object_or_404(Addon.objects, pk=file.version.addon_id)
if addon.is_premium():
raise PermissionDenied
if addon.is_disabled or file.status == amo.STATUS_DISABLED:
if (acl.check_addon_ownership(request, addon, viewer=True,
ignore_disabled=True) or
acl.check_reviewer(request)):
return HttpResponseSendFile(request, file.guarded_file_path,
content_type='application/xp-install')
else:
raise http.Http404()
attachment = (type == 'attachment' or not request.APP.browser)
loc = file.get_mirror(addon, attachment=attachment)
response = http.HttpResponseRedirect(loc)
response['X-Target-Digest'] = file.hash
return response
guard = lambda: Addon.objects.filter(_current_version__isnull=False)
@addon_view_factory(guard)
def download_latest(request, addon, type='xpi', platform=None):
platforms = [amo.PLATFORM_ALL.id]
if platform is not None and int(platform) in amo.PLATFORMS:
platforms.append(int(platform))
files = File.objects.filter(platform__in=platforms,
version=addon._current_version_id)
try:
# If there's a file matching our platform, it'll float to the end.
file = sorted(files, key=lambda f: f.platform_id == platforms[-1])[-1]
except IndexError:
raise http.Http404()
args = [file.id, type] if type else [file.id]
url = posixpath.join(reverse('downloads.file', args=args), file.filename)
if request.GET:
url += '?' + request.GET.urlencode()
return http.HttpResponseRedirect(url)
| bsd-3-clause | -6,525,775,521,087,783,000 | 36.095238 | 78 | 0.665811 | false |
AbsentMoniker/ECE463Honors | web2py/gluon/contrib/feedparser.py | 22 | 165632 | """Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit https://code.google.com/p/feedparser/ for the latest version
Visit http://packages.python.org/feedparser/ for the latest documentation
Required: Python 2.4 or later
Recommended: iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "5.1.2"
__license__ = """
Copyright (c) 2010-2012 Kurt McKee <[email protected]>
Copyright (c) 2002-2008 Mark Pilgrim
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>",
"Sam Ruby <http://intertwingly.net/>",
"Ade Oshineye <http://blog.oshineye.com/>",
"Martin Pool <http://sourcefrog.net/>",
"Kurt McKee <http://kurtmckee.org/>"]
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +https://code.google.com/p/feedparser/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
# If you want feedparser to automatically parse microformat content embedded
# in entry contents, set this to 1
PARSE_MICROFORMATS = 1
# ---------- Python 3 modules (make it work if possible) ----------
try:
import rfc822
except ImportError:
from email import _parseaddr as rfc822
try:
# Python 3.1 introduces bytes.maketrans and simultaneously
# deprecates string.maketrans; use bytes.maketrans if possible
_maketrans = bytes.maketrans
except (NameError, AttributeError):
import string
_maketrans = string.maketrans
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except ImportError:
base64 = binascii = None
else:
# Python 3.1 deprecates decodestring in favor of decodebytes
_base64decode = getattr(base64, 'decodebytes', base64.decodestring)
# _s2bytes: convert a UTF-8 str to bytes if the interpreter is Python 3
# _l2bytes: convert a list of ints to bytes if the interpreter is Python 3
try:
if bytes is str:
# In Python 2.5 and below, bytes doesn't exist (NameError)
# In Python 2.6 and above, bytes and str are the same type
raise NameError
except NameError:
# Python 2
def _s2bytes(s):
return s
def _l2bytes(l):
return ''.join(map(chr, l))
else:
# Python 3
def _s2bytes(s):
return bytes(s, 'utf8')
def _l2bytes(l):
return bytes(l)
# If you want feedparser to allow all URL schemes, set this to ()
# List culled from Python's urlparse documentation at:
# http://docs.python.org/library/urlparse.html
# as well as from "URI scheme" at Wikipedia:
# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
# Many more will likely need to be added!
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
#ACCEPTABLE_URI_SCHEMES = ()
# ---------- required modules (should come with any Python distribution) ----------
import cgi
import codecs
import copy
import datetime
import re
import struct
import time
import types
import urllib
import urllib2
import urlparse
import warnings
from htmlentitydefs import name2codepoint, codepoint2name, entitydefs
try:
from io import BytesIO as _StringIO
except ImportError:
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except ImportError:
gzip = None
try:
import zlib
except ImportError:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
from xml.sax.saxutils import escape as _xmlescape
except ImportError:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
else:
try:
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
except xml.sax.SAXReaderNotAvailable:
_XML_AVAILABLE = 0
else:
_XML_AVAILABLE = 1
# sgmllib is not available by default in Python 3; if the end user doesn't have
# it available then we'll lose illformed XML parsing, content santizing, and
# microformat support (at least while feedparser depends on BeautifulSoup).
try:
import sgmllib
except ImportError:
# This is probably Python 3, which doesn't include sgmllib anymore
_SGML_AVAILABLE = 0
# Mock sgmllib enough to allow subclassing later on
class sgmllib(object):
class SGMLParser(object):
def goahead(self, i):
pass
def parse_starttag(self, i):
pass
else:
_SGML_AVAILABLE = 1
# sgmllib defines a number of module-level regular expressions that are
# insufficient for the XML parsing feedparser needs. Rather than modify
# the variables directly in sgmllib, they're defined here using the same
# names, and the compiled code objects of several sgmllib.SGMLParser
# methods are copied into _BaseHTMLProcessor so that they execute in
# feedparser's scope instead of sgmllib's scope.
charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?'
)
# Unfortunately, these must be copied over to prevent NameError exceptions
entityref = sgmllib.entityref
incomplete = sgmllib.incomplete
interesting = sgmllib.interesting
shorttag = sgmllib.shorttag
shorttagopen = sgmllib.shorttagopen
starttagopen = sgmllib.starttagopen
class _EndBracketRegEx:
def __init__(self):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self, target, index=0):
match = self.endbracket.match(target, index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
return EndBracketMatch(match)
return None
class EndBracketMatch:
def __init__(self, match):
self.match = match
def start(self, n):
return self.match.end(n)
endbracket = _EndBracketRegEx()
# iconv_codec provides support for more character encodings.
# It's available from http://cjkpython.i18n.org/
try:
import iconv_codec
except ImportError:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
except ImportError:
chardet = None
# BeautifulSoup is used to extract microformat content from HTML
# feedparser is tested using BeautifulSoup 3.2.0
# http://www.crummy.com/software/BeautifulSoup/
try:
import BeautifulSoup
except ImportError:
BeautifulSoup = None
PARSE_MICROFORMATS = False
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
SUPPORTED_VERSIONS = {'': u'unknown',
'rss090': u'RSS 0.90',
'rss091n': u'RSS 0.91 (Netscape)',
'rss091u': u'RSS 0.91 (Userland)',
'rss092': u'RSS 0.92',
'rss093': u'RSS 0.93',
'rss094': u'RSS 0.94',
'rss20': u'RSS 2.0',
'rss10': u'RSS 1.0',
'rss': u'RSS (unknown version)',
'atom01': u'Atom 0.1',
'atom02': u'Atom 0.2',
'atom03': u'Atom 0.3',
'atom10': u'Atom 1.0',
'atom': u'Atom (unknown version)',
'cdf': u'CDF',
}
class FeedParserDict(dict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'description_detail': ['summary_detail', 'subtitle_detail'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
try:
return dict.__getitem__(self, 'tags')[0]['term']
except IndexError:
raise KeyError, "object doesn't have key 'category'"
elif key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']==u'enclosure']
elif key == 'license':
for link in dict.__getitem__(self, 'links'):
if link['rel']==u'license' and 'href' in link:
return link['href']
elif key == 'updated':
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
if not dict.__contains__(self, 'updated') and \
dict.__contains__(self, 'published'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated` to `published` if `updated` doesn't "
"exist. This fallback will be removed in a future version "
"of feedparser.", DeprecationWarning)
return dict.__getitem__(self, 'published')
return dict.__getitem__(self, 'updated')
elif key == 'updated_parsed':
if not dict.__contains__(self, 'updated_parsed') and \
dict.__contains__(self, 'published_parsed'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated_parsed` to `published_parsed` if "
"`updated_parsed` doesn't exist. This fallback will be "
"removed in a future version of feedparser.",
DeprecationWarning)
return dict.__getitem__(self, 'published_parsed')
return dict.__getitem__(self, 'updated_parsed')
else:
realkey = self.keymap.get(key, key)
if isinstance(realkey, list):
for k in realkey:
if dict.__contains__(self, k):
return dict.__getitem__(self, k)
elif dict.__contains__(self, realkey):
return dict.__getitem__(self, realkey)
return dict.__getitem__(self, key)
def __contains__(self, key):
if key in ('updated', 'updated_parsed'):
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
return dict.__contains__(self, key)
try:
self.__getitem__(key)
except KeyError:
return False
else:
return True
has_key = __contains__
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
key = self.keymap.get(key, key)
if isinstance(key, list):
key = key[0]
return dict.__setitem__(self, key, value)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return value
return self[key]
def __getattr__(self, key):
# __getattribute__() is called first; this will be called
# only if an attribute was not already found
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError, "object has no attribute '%s'" % key
def __hash__(self):
return id(self)
_cp1252 = {
128: unichr(8364), # euro sign
130: unichr(8218), # single low-9 quotation mark
131: unichr( 402), # latin small letter f with hook
132: unichr(8222), # double low-9 quotation mark
133: unichr(8230), # horizontal ellipsis
134: unichr(8224), # dagger
135: unichr(8225), # double dagger
136: unichr( 710), # modifier letter circumflex accent
137: unichr(8240), # per mille sign
138: unichr( 352), # latin capital letter s with caron
139: unichr(8249), # single left-pointing angle quotation mark
140: unichr( 338), # latin capital ligature oe
142: unichr( 381), # latin capital letter z with caron
145: unichr(8216), # left single quotation mark
146: unichr(8217), # right single quotation mark
147: unichr(8220), # left double quotation mark
148: unichr(8221), # right double quotation mark
149: unichr(8226), # bullet
150: unichr(8211), # en dash
151: unichr(8212), # em dash
152: unichr( 732), # small tilde
153: unichr(8482), # trade mark sign
154: unichr( 353), # latin small letter s with caron
155: unichr(8250), # single right-pointing angle quotation mark
156: unichr( 339), # latin small ligature oe
158: unichr( 382), # latin small letter z with caron
159: unichr( 376), # latin capital letter y with diaeresis
}
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
#try:
if not isinstance(uri, unicode):
uri = uri.decode('utf-8', 'ignore')
uri = urlparse.urljoin(base, uri)
if not isinstance(uri, unicode):
return uri.decode('utf-8', 'ignore')
return uri
#except:
# uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)])
# return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {
'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
# Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
'http://search.yahoo.com/mrss/': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/': 'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://www.w3.org/XML/1998/namespace': 'xml',
}
_matchnamespaces = {}
can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'])
can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
html_types = [u'text/html', u'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding=u'utf-8'):
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = u'' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or u''
self.lang = baselang or None
self.svgOK = 0
self.title_depth = -1
self.depth = 0
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
# A map of the following form:
# {
# object_that_value_is_set_on: {
# property_name: depth_of_node_property_was_extracted_from,
# other_property: depth_of_node_property_was_extracted_from,
# },
# }
self.property_depth_map = {}
def _normalize_attributes(self, kv):
k = kv[0].lower()
v = k in ('rel', 'type') and kv[1].lower() or kv[1]
# the sgml parser doesn't handle entities in attributes, nor
# does it pass the attribute values through as unicode, while
# strict xml parsers do -- account for this difference
if isinstance(self, _LooseFeedParser):
v = v.replace('&', '&')
if not isinstance(v, unicode):
v = v.decode('utf-8')
return (k, v)
def unknown_starttag(self, tag, attrs):
# increment depth counter
self.depth += 1
# normalize attrs
attrs = map(self._normalize_attributes, attrs)
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
if not isinstance(baseuri, unicode):
baseuri = baseuri.decode(self.encoding, 'ignore')
# ensure that self.baseuri is always an absolute URI that
# uses a whitelisted URI scheme (e.g. not `javscript:`)
if self.baseuri:
self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
else:
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
if tag == 'svg':
self.svgOK += 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
# Since there's no handler or something has gone wrong we explicitly add the element and its attributes
unknown_tag = prefix + suffix
if len(attrsD) == 0:
# No attributes so merge it into the encosing dictionary
return self.push(unknown_tag, 1)
else:
# Has attributes so create it in its own dictionary
context = self._getContext()
context[unknown_tag] = attrsD
def unknown_endtag(self, tag):
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg' and self.svgOK:
self.svgOK -= 1
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
if self.svgOK:
raise AttributeError()
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
# element declared itself as escaped markup, but it isn't really
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
self.depth -= 1
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack:
return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack:
return
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities:
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try:
name2codepoint[ref]
except KeyError:
text = '&%s;' % ref
else:
text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack:
return
if escape and self.contentparams.get('type') == u'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
# CDATA block began but didn't finish
k = len(self.rawdata)
return k
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
if k >= 0:
return k+1
else:
# We have an incomplete CDATA block.
return k
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text' or contentType == 'plain':
contentType = u'text/plain'
elif contentType == 'html':
contentType = u'text/html'
elif contentType == 'xhtml':
contentType = u'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if not self.version:
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'):
self.version = u'rss090'
elif loweruri == 'http://purl.org/rss/1.0/':
self.version = u'rss10'
elif loweruri == 'http://www.w3.org/2005/atom':
self.version = u'atom10'
if loweruri.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = u'http://backend.userland.com/rss'
loweruri = uri
if loweruri in self._matchnamespaces:
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or u'', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack:
return
if self.elementstack[-1][0] != element:
return
element, expectingText, pieces = self.elementstack.pop()
if self.version == u'atom10' and self.contentparams.get('type', u'text') == u'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0:
break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
# Ensure each piece is a str for Python 3
for (i, v) in enumerate(pieces):
if not isinstance(v, unicode):
pieces[i] = v.decode('utf-8')
output = u''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText:
return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = _base64decode(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
except TypeError:
# In Python 3, base64 takes and outputs bytes, not str
# This may not be the most correct way to accomplish this
output = _base64decode(output.encode('utf-8')).decode('utf-8')
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# some feed formats require consumers to guess
# whether the content is html or plain text
if not self.version.startswith(u'atom') and self.contentparams.get('type') == u'text/plain':
if self.lookslikehtml(output):
self.contentparams['type'] = u'text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', u'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', u'text/html'))
# parse microformats
# (must do this before sanitizing because some microformats
# rely on elements that we sanitize)
if PARSE_MICROFORMATS and is_htmlish and element in ['content', 'description', 'summary']:
mfresults = _parseMicroformats(output, self.baseuri, self.encoding)
if mfresults:
for tag in mfresults.get('tags', []):
self._addTag(tag['term'], tag['scheme'], tag['label'])
for enclosure in mfresults.get('enclosures', []):
self._start_enclosure(enclosure)
for xfn in mfresults.get('xfn', []):
self._addXFN(xfn['relationships'], xfn['href'], xfn['name'])
vcard = mfresults.get('vcard')
if vcard:
self._getContext()['vcard'] = vcard
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', u'text/html'))
if self.encoding and not isinstance(output, unicode):
output = output.decode(self.encoding, 'ignore')
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding in (u'utf-8', u'utf-8_INVALID_PYTHON_3') and isinstance(output, unicode):
try:
output = output.encode('iso-8859-1').decode('utf-8')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
# map win-1252 extensions to the proper code points
if isinstance(output, unicode):
output = output.translate(_cp1252)
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
if element == 'title' and -1 < self.title_depth <= self.depth:
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
if not self.inimage:
# query variables in urls in link elements are improperly
# converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
# unhandled character references. fix this special case.
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element)
if old_value_depth is None or self.depth <= old_value_depth:
self.property_depth_map[self.entries[-1]][element] = self.depth
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
# fix query variables; see above for the explanation
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
context[element] = output
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang:
self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
@staticmethod
def lookslikehtml(s):
# must have a close tag or an entity reference to qualify
if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)):
return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',s)):
return
# all entities must have been defined as valid HTML entities
if filter(lambda e: e not in entitydefs.keys(), re.findall(r'&(\w+);', s)):
return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith(u'text/'):
return 0
if self.contentparams['type'].endswith(u'+xml'):
return 0
if self.contentparams['type'].endswith(u'/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value, overwrite=False):
context = self._getContext()
if overwrite:
context[key] = value
else:
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': u'rss091u',
'0.92': u'rss092',
'0.93': u'rss093',
'0.94': u'rss094'}
#If we're here then this is an RSS feed.
#If we don't have a version or have a version that starts with something
#other than RSS then there's been a mistake. Correct it.
if not self.version or not self.version.startswith(u'rss'):
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = u'rss20'
else:
self.version = u'rss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
def _cdf_common(self, attrsD):
if 'lastmod' in attrsD:
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if 'href' in attrsD:
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': u'atom01',
'0.2': u'atom02',
'0.3': u'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = u'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
if not self.inentry:
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.title_depth = -1
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.title_depth = -1
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
# Append a new FeedParserDict when expecting an author
context = self._getContext()
context.setdefault('authors', [])
context['authors'].append(FeedParserDict())
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage and 'image' in self.feeddata:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
context.setdefault('authors', [FeedParserDict()])
context['authors'][-1][key] = value
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = u'%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author:
return
emailmatch = re.search(ur'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, u'')
author = author.replace(u'()', u'')
author = author.replace(u'<>', u'')
author = author.replace(u'<>', u'')
author = author.strip()
if author and (author[0] == u'('):
author = author[1:]
if author and (author[-1] == u')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, FeedParserDict())
if author:
context['%s_detail' % key]['name'] = author
if email:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, u'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, u'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
self.title_depth = -1
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
_start_pubdate = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value), overwrite=True)
_end_dcterms_issued = _end_published
_end_issued = _end_published
_end_pubdate = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_dc_date = _start_updated
_start_lastbuilddate = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value, overwrite=True)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_dc_date = _end_updated
_end_lastbuilddate = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value), overwrite=True)
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
def _start_cc_license(self, attrsD):
context = self._getContext()
value = self._getAttribute(attrsD, 'rdf:resource')
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href']=value
context.setdefault('links', []).append(attrsD)
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
_start_creativeCommons_license = _start_creativecommons_license
def _end_creativecommons_license(self):
value = self.pop('license')
context = self._getContext()
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href'] = value
context.setdefault('links', []).append(attrsD)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
def _addXFN(self, relationships, href, name):
context = self._getContext()
xfn = context.setdefault('xfn', [])
value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name})
if value not in xfn:
xfn.append(value)
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label):
return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _start_media_category(self, attrsD):
attrsD.setdefault('scheme', u'http://search.yahoo.com/mrss/category_schema')
self._start_category(attrsD)
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split(','):
if term.strip():
self._addTag(term.strip(), u'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), u'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value:
return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
_end_media_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', u'alternate')
if attrsD['rel'] == u'self':
attrsD.setdefault('type', u'application/atom+xml')
else:
attrsD.setdefault('type', u'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
if not (self.inentry and self.inimage):
context['links'].append(FeedParserDict(attrsD))
if 'href' in attrsD:
expectingText = 0
if (attrsD.get('rel') == u'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
def _end_link(self):
value = self.pop('link')
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
_start_id = _start_guid
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and 'link' not in self._getContext())
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
_end_id = _end_guid
def _start_title(self, attrsD):
if self.svgOK:
return self.unknown_starttag('title', attrsD.items())
self.pushContent('title', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
if self.svgOK:
return
value = self.popContent('title')
if not value:
return
self.title_depth = self.depth
_end_dc_title = _end_title
def _end_media_title(self):
title_depth = self.title_depth
self._end_title()
self.title_depth = title_depth
def _start_description(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, u'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, u'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if 'generator_detail' in context:
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, u'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel'] = u'enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
def _start_source(self, attrsD):
if 'url' in attrsD:
# This means that we're processing a source element from an RSS 2.0 feed
self.sourcedata['href'] = attrsD[u'url']
self.push('source', 1)
self.insource = 1
self.title_depth = -1
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
self.sourcedata['title'] = value
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, u'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, u'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, u'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToSummary = self.mapContentType(self.contentparams.get('type')) in ([u'text/plain'] + self.html_types)
value = self.popContent('content')
if copyToSummary:
self._save('summary', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
if attrsD.get('href'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
elif attrsD.get('url'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('url')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
# Convert 'yes' -> True, 'clean' to False, and any other value to None
# False and None both evaluate as False, so the difference can be ignored
# by applications that only need to know if the content is explicit.
self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0]
def _start_media_content(self, attrsD):
context = self._getContext()
context.setdefault('media_content', [])
context['media_content'].append(attrsD)
def _start_media_thumbnail(self, attrsD):
context = self._getContext()
context.setdefault('media_thumbnail', [])
self.push('url', 1) # new
context['media_thumbnail'].append(attrsD)
def _end_media_thumbnail(self):
url = self.pop('url')
context = self._getContext()
if url != None and len(url.strip()) != 0:
if 'url' not in context['media_thumbnail'][-1]:
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrsD):
self.push('media_player', 0)
self._getContext()['media_player'] = FeedParserDict(attrsD)
def _end_media_player(self):
value = self.pop('media_player')
context = self._getContext()
context['media_player']['content'] = value
def _start_newlocation(self, attrsD):
self.push('newlocation', 1)
def _end_newlocation(self):
url = self.pop('newlocation')
context = self._getContext()
# don't set newlocation if the context isn't right
if context is not self.feeddata:
return
context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip())
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
self.decls = {}
def startPrefixMapping(self, prefix, uri):
if not uri:
return
# Jython uses '' instead of None; standardize on None
prefix = prefix or None
self.trackNamespace(prefix, uri)
if prefix and uri == 'http://www.w3.org/1999/xlink':
self.decls['xmlns:' + prefix] = uri
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = u'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespacesInUse:
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD, self.decls = self.decls, {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
for (namespace, attrlocalname), attrvalue in attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
# drv_libxml2 calls warning() in some cases
warning = error
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = set([
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
'source', 'track', 'wbr'
])
def __init__(self, encoding, _type):
self.encoding = encoding
self._type = _type
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
# By declaring these methods and overriding their compiled code
# with the code from sgmllib, the original code will execute in
# feedparser's scope instead of sgmllib's. This means that the
# `tagfind` and `charref` regular expressions will be found as
# they're declared above, not as they're declared in sgmllib.
def goahead(self, i):
pass
goahead.func_code = sgmllib.SGMLParser.goahead.func_code
def __parse_starttag(self, i):
pass
__parse_starttag.func_code = sgmllib.SGMLParser.parse_starttag.func_code
def parse_starttag(self,i):
j = self.__parse_starttag(i)
if self._type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
try:
bytes
if bytes is str:
raise NameError
self.encoding = self.encoding + u'_INVALID_PYTHON_3'
except NameError:
if self.encoding and isinstance(data, unicode):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs:
return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if not isinstance(value, unicode):
value = value.decode(self.encoding, 'ignore')
try:
# Currently, in Python 3 the key is already a str, and cannot be decoded again
uattrs.append((unicode(key, self.encoding), value))
except TypeError:
uattrs.append((key, value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs = strattrs.encode(self.encoding)
except (UnicodeEncodeError, LookupError):
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%s%s />' % (tag, strattrs))
else:
self.pieces.append('<%s%s>' % (tag, strattrs))
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%s>" % tag)
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
if ref.startswith('x'):
value = int(ref[1:], 16)
else:
value = int(ref)
if value in _cp1252:
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%s;' % ref)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if ref in name2codepoint or ref == 'apos':
self.pieces.append('&%s;' % ref)
else:
self.pieces.append('&%s' % ref)
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%s-->' % text)
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%s>' % text)
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%s>' % text)
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
def parse_declaration(self, i):
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
# escape the doctype declaration and continue parsing
self.handle_data('<')
return i+1
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if not self.contentparams.get('type', u'xml').endswith(u'xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _MicroformatsParser:
STRING = 1
DATE = 2
URI = 3
NODE = 4
EMAIL = 5
known_xfn_relationships = set(['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me'])
known_binary_extensions = set(['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv'])
def __init__(self, data, baseuri, encoding):
self.document = BeautifulSoup.BeautifulSoup(data)
self.baseuri = baseuri
self.encoding = encoding
if isinstance(data, unicode):
data = data.encode(encoding)
self.tags = []
self.enclosures = []
self.xfn = []
self.vcard = None
def vcardEscape(self, s):
if isinstance(s, basestring):
s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n')
return s
def vcardFold(self, s):
s = re.sub(';+$', '', s)
sFolded = ''
iMax = 75
sPrefix = ''
while len(s) > iMax:
sFolded += sPrefix + s[:iMax] + '\n'
s = s[iMax:]
sPrefix = ' '
iMax = 74
sFolded += sPrefix + s
return sFolded
def normalize(self, s):
return re.sub(r'\s+', ' ', s).strip()
def unique(self, aList):
results = []
for element in aList:
if element not in results:
results.append(element)
return results
def toISO8601(self, dt):
return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt)
def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0):
all = lambda x: 1
sProperty = sProperty.lower()
bFound = 0
bNormalize = 1
propertyMatch = {'class': re.compile(r'\b%s\b' % sProperty)}
if bAllowMultiple and (iPropertyType != self.NODE):
snapResults = []
containers = elmRoot(['ul', 'ol'], propertyMatch)
for container in containers:
snapResults.extend(container('li'))
bFound = (len(snapResults) != 0)
if not bFound:
snapResults = elmRoot(all, propertyMatch)
bFound = (len(snapResults) != 0)
if (not bFound) and (sProperty == 'value'):
snapResults = elmRoot('pre')
bFound = (len(snapResults) != 0)
bNormalize = not bFound
if not bFound:
snapResults = [elmRoot]
bFound = (len(snapResults) != 0)
arFilter = []
if sProperty == 'vcard':
snapFilter = elmRoot(all, propertyMatch)
for node in snapFilter:
if node.findParent(all, propertyMatch):
arFilter.append(node)
arResults = []
for node in snapResults:
if node not in arFilter:
arResults.append(node)
bFound = (len(arResults) != 0)
if not bFound:
if bAllowMultiple:
return []
elif iPropertyType == self.STRING:
return ''
elif iPropertyType == self.DATE:
return None
elif iPropertyType == self.URI:
return ''
elif iPropertyType == self.NODE:
return None
else:
return None
arValues = []
for elmResult in arResults:
sValue = None
if iPropertyType == self.NODE:
if bAllowMultiple:
arValues.append(elmResult)
continue
else:
return elmResult
sNodeName = elmResult.name.lower()
if (iPropertyType == self.EMAIL) and (sNodeName == 'a'):
sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0]
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'abbr'):
sValue = elmResult.get('title')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (iPropertyType == self.URI):
if sNodeName == 'a':
sValue = elmResult.get('href')
elif sNodeName == 'img':
sValue = elmResult.get('src')
elif sNodeName == 'object':
sValue = elmResult.get('data')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'img'):
sValue = elmResult.get('alt')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
sValue = elmResult.renderContents()
sValue = re.sub(r'<\S[^>]*>', '', sValue)
sValue = sValue.replace('\r\n', '\n')
sValue = sValue.replace('\r', '\n')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
continue
if iPropertyType == self.DATE:
sValue = _parse_date_iso8601(sValue)
if bAllowMultiple:
arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue)
else:
return bAutoEscape and self.vcardEscape(sValue) or sValue
return arValues
def findVCards(self, elmRoot, bAgentParsing=0):
sVCards = ''
if not bAgentParsing:
arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1)
else:
arCards = [elmRoot]
for elmCard in arCards:
arLines = []
def processSingleString(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1).decode(self.encoding)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue))
return sValue or u''
def processSingleURI(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.URI)
if sValue:
sContentType = ''
sEncoding = ''
sValueKey = ''
if sValue.startswith('data:'):
sEncoding = ';ENCODING=b'
sContentType = sValue.split(';')[0].split('/').pop()
sValue = sValue.split(',', 1).pop()
else:
elmValue = self.getPropertyValue(elmCard, sProperty)
if elmValue:
if sProperty != 'url':
sValueKey = ';VALUE=uri'
sContentType = elmValue.get('type', '').strip().split('/').pop().strip()
sContentType = sContentType.upper()
if sContentType == 'OCTET-STREAM':
sContentType = ''
if sContentType:
sContentType = ';TYPE=' + sContentType.upper()
arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue))
def processTypeValue(sProperty, arDefaultType, arForceType=None):
arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1)
for elmResult in arResults:
arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1)
if arForceType:
arType = self.unique(arForceType + arType)
if not arType:
arType = arDefaultType
sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue))
# AGENT
# must do this before all other properties because it is destructive
# (removes nested class="vcard" nodes so they don't interfere with
# this vcard's other properties)
arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1)
for elmAgent in arAgent:
if re.compile(r'\bvcard\b').search(elmAgent.get('class')):
sAgentValue = self.findVCards(elmAgent, 1) + '\n'
sAgentValue = sAgentValue.replace('\n', '\\n')
sAgentValue = sAgentValue.replace(';', '\\;')
if sAgentValue:
arLines.append(self.vcardFold('AGENT:' + sAgentValue))
# Completely remove the agent element from the parse tree
elmAgent.extract()
else:
sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1);
if sAgentValue:
arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue))
# FN (full name)
sFN = processSingleString('fn')
# N (name)
elmName = self.getPropertyValue(elmCard, 'n')
if elmName:
sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1)
sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1)
arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1)
arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1)
arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1)
arLines.append(self.vcardFold('N:' + sFamilyName + ';' +
sGivenName + ';' +
','.join(arAdditionalNames) + ';' +
','.join(arHonorificPrefixes) + ';' +
','.join(arHonorificSuffixes)))
elif sFN:
# implied "N" optimization
# http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization
arNames = self.normalize(sFN).split()
if len(arNames) == 2:
bFamilyNameFirst = (arNames[0].endswith(',') or
len(arNames[1]) == 1 or
((len(arNames[1]) == 2) and (arNames[1].endswith('.'))))
if bFamilyNameFirst:
arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1]))
else:
arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0]))
# SORT-STRING
sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1)
if sSortString:
arLines.append(self.vcardFold('SORT-STRING:' + sSortString))
# NICKNAME
arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1)
if arNickname:
arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname)))
# PHOTO
processSingleURI('photo')
# BDAY
dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE)
if dtBday:
arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday)))
# ADR (address)
arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1)
for elmAdr in arAdr:
arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1)
if not arType:
arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1
sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1)
sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1)
sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1)
sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1)
sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1)
sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1)
sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1)
arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' +
sPostOfficeBox + ';' +
sExtendedAddress + ';' +
sStreetAddress + ';' +
sLocality + ';' +
sRegion + ';' +
sPostalCode + ';' +
sCountryName))
# LABEL
processTypeValue('label', ['intl','postal','parcel','work'])
# TEL (phone number)
processTypeValue('tel', ['voice'])
# EMAIL
processTypeValue('email', ['internet'], ['internet'])
# MAILER
processSingleString('mailer')
# TZ (timezone)
processSingleString('tz')
# GEO (geographical information)
elmGeo = self.getPropertyValue(elmCard, 'geo')
if elmGeo:
sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1)
sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1)
arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude))
# TITLE
processSingleString('title')
# ROLE
processSingleString('role')
# LOGO
processSingleURI('logo')
# ORG (organization)
elmOrg = self.getPropertyValue(elmCard, 'org')
if elmOrg:
sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1)
if not sOrganizationName:
# implied "organization-name" optimization
# http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization
sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1)
if sOrganizationName:
arLines.append(self.vcardFold('ORG:' + sOrganizationName))
else:
arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1)
arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit)))
# CATEGORY
arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1)
if arCategory:
arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory)))
# NOTE
processSingleString('note')
# REV
processSingleString('rev')
# SOUND
processSingleURI('sound')
# UID
processSingleString('uid')
# URL
processSingleURI('url')
# CLASS
processSingleString('class')
# KEY
processSingleURI('key')
if arLines:
arLines = [u'BEGIN:vCard',u'VERSION:3.0'] + arLines + [u'END:vCard']
# XXX - this is super ugly; properly fix this with issue 148
for i, s in enumerate(arLines):
if not isinstance(s, unicode):
arLines[i] = s.decode('utf-8', 'ignore')
sVCards += u'\n'.join(arLines) + u'\n'
return sVCards.strip()
def isProbablyDownloadable(self, elm):
attrsD = elm.attrMap
if 'href' not in attrsD:
return 0
linktype = attrsD.get('type', '').strip()
if linktype.startswith('audio/') or \
linktype.startswith('video/') or \
(linktype.startswith('application/') and not linktype.endswith('xml')):
return 1
path = urlparse.urlparse(attrsD['href'])[2]
if path.find('.') == -1:
return 0
fileext = path.split('.').pop().lower()
return fileext in self.known_binary_extensions
def findTags(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile(r'\btag\b')}):
href = elm.get('href')
if not href:
continue
urlscheme, domain, path, params, query, fragment = \
urlparse.urlparse(_urljoin(self.baseuri, href))
segments = path.split('/')
tag = segments.pop()
if not tag:
if segments:
tag = segments.pop()
else:
# there are no tags
continue
tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', ''))
if not tagscheme.endswith('/'):
tagscheme += '/'
self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''}))
def findEnclosures(self):
all = lambda x: 1
enclosure_match = re.compile(r'\benclosure\b')
for elm in self.document(all, {'href': re.compile(r'.+')}):
if not enclosure_match.search(elm.get('rel', u'')) and not self.isProbablyDownloadable(elm):
continue
if elm.attrMap not in self.enclosures:
self.enclosures.append(elm.attrMap)
if elm.string and not elm.get('title'):
self.enclosures[-1]['title'] = elm.string
def findXFN(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}):
rels = elm.get('rel', u'').split()
xfn_rels = [r for r in rels if r in self.known_xfn_relationships]
if xfn_rels:
self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string})
def _parseMicroformats(htmlSource, baseURI, encoding):
if not BeautifulSoup:
return
try:
p = _MicroformatsParser(htmlSource, baseURI, encoding)
except UnicodeEncodeError:
# sgmllib throws this exception when performing lookups of tags
# with non-ASCII characters in them.
return
p.vcard = p.findVCards(p.document)
p.findTags()
p.findEnclosures()
p.findXFN()
return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = set([('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')])
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _makeSafeAbsoluteURI(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _RelativeURIResolver(baseURI, encoding, _type)
p.feed(htmlSource)
return p.output()
def _makeSafeAbsoluteURI(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
try:
return _urljoin(base, rel or u'')
except ValueError:
return u''
if not base:
return rel or u''
if not rel:
try:
scheme = urlparse.urlparse(base)[0]
except ValueError:
return u''
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return u''
try:
uri = _urljoin(base, rel)
except ValueError:
return u''
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return u''
return uri
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = set(['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript'])
acceptable_attributes = set(['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'prompt', 'pqg', 'radiogroup', 'readonly', 'rel', 'repeat-max',
'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows',
'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src',
'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template',
'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign',
'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap',
'xml:lang'])
unacceptable_elements_with_end_tag = set(['script', 'applet', 'style'])
acceptable_css_properties = set(['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width'])
# survey of common keywords found in feeds
acceptable_css_keywords = set(['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow'])
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = set(['annotation', 'annotation-xml', 'maction', 'math',
'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none', 'semantics'])
mathml_attributes = set(['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'])
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = set(['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use'])
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = set(['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
'stop-color', 'stop-opacity', 'strikethrough-position',
'strikethrough-thickness', 'stroke', 'stroke-dasharray',
'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
'y2', 'zoomAndPan'])
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = set([ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity'])
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# add implicit namespaces to html5 inline svg/mathml
if self._type.endswith('html'):
if not dict(attrs).get('xmlns'):
if tag=='svg':
attrs.append( ('xmlns','http://www.w3.org/2000/svg') )
if tag=='math':
attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') )
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK += 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK += 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
elif not tag in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter(lambda (n,v): n.startswith('xlink:'),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
# make sure the uri uses an acceptable uri scheme
if key == u'href':
value = _makeSafeAbsoluteURI(value)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value:
clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK:
self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg' and self.svgOK:
self.svgOK -= 1
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
# This replaced a regexp that used re.match and was prone to pathological back-tracking.
if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip():
return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value:
continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def parse_comment(self, i, report=1):
ret = _BaseHTMLProcessor.parse_comment(self, i, report)
if ret >= 0:
return ret
# if ret == -1, this may be a malicious attempt to circumvent
# sanitization, or a page-destroying unclosed comment
match = re.compile(r'--[^>]*>').search(self.rawdata, i+4)
if match:
return match.end()
# unclosed comment; deliberately fail to handle_data()
return len(self.rawdata)
def _sanitizeHTML(htmlSource, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _HTMLSanitizer(encoding, _type)
htmlSource = htmlSource.replace('<![CDATA[', '<![CDATA[')
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = isinstance(data, unicode)
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
# The default implementation just raises HTTPError.
# Forget that.
fp.status = code
return fp
def http_error_301(self, req, fp, code, msg, hdrs):
result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp,
code, msg, hdrs)
result.status = code
result.newurl = result.geturl()
return result
# The default implementations in urllib2.HTTPRedirectHandler
# are identical, so hardcoding a http_error_301 call above
# won't affect anything
http_error_300 = http_error_301
http_error_302 = http_error_301
http_error_303 = http_error_301
http_error_307 = http_error_301
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
if base64 is None or 'Authorization' not in req.headers \
or 'WWW-Authenticate' not in headers:
return self.http_error_default(req, fp, code, msg, headers)
auth = _base64decode(req.headers['Authorization'].split(' ')[1])
user, passw = auth.split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
if request_headers is supplied it is a dictionary of HTTP request headers
that will override the values generated by FeedParser.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if isinstance(url_file_stream_or_string, basestring) \
and urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
# Deal with the feed URI scheme
if url_file_stream_or_string.startswith('feed:http'):
url_file_stream_or_string = url_file_stream_or_string[5:]
elif url_file_stream_or_string.startswith('feed:'):
url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:]
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.standard_b64encode(user_passwd).strip()
# iri support
if isinstance(url_file_stream_or_string, unicode):
url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string)
# try to open with urllib2 (to use optional headers)
request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers)
opener = urllib2.build_opener(*tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string, 'rb')
except (IOError, UnicodeEncodeError, TypeError):
# if url_file_stream_or_string is a unicode object that
# cannot be converted to the encoding returned by
# sys.getfilesystemencoding(), a UnicodeEncodeError
# will be thrown
# If url_file_stream_or_string is a string that contains NULL
# (such as an XML document encoded in UTF-32), TypeError will
# be thrown.
pass
# treat url_file_stream_or_string as string
if isinstance(url_file_stream_or_string, unicode):
return _StringIO(url_file_stream_or_string.encode('utf-8'))
return _StringIO(url_file_stream_or_string)
def _convert_to_idn(url):
"""Convert a URL to IDN notation"""
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
parts = list(urlparse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
# the url needs to be converted to idn notation
host = parts[1].rsplit(':', 1)
newhost = []
port = u''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urlparse.urlunsplit(parts)
else:
return url
def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers):
request = urllib2.Request(url)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if isinstance(modified, basestring):
modified = _parse_date(modified)
elif isinstance(modified, datetime.datetime):
modified = modified.utctimetuple()
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
# use this for whatever -- cookies, special headers, etc
# [('Cookie','Something'),('x-special-header','Another Value')]
for header_name, header_value in request_headers.items():
request.add_header(header_name, header_value)
request.add_header('A-IM', 'feed') # RFC 3229 support
return request
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(\.(?P<fracsecond>\d+))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
try:
del tmpl
except NameError:
pass
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
try:
del regex
except NameError:
pass
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m:
break
if not m:
return
if m.span() == (0, 0):
return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params:
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tuple(tm)))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m:
return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m:
return
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m or m.group(2) not in _hungarian_months:
return None
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
# Modified to also support MSSQL-style datetimes as defined at:
# http://msdn.microsoft.com/en-us/library/ms186724.aspx
# (which basically means allowing a space as a date/time/timezone separator)
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?'
'|(?P<julian>\d\d\d)))?')
__tzd_re = ' ?(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)?'
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d)(?:[.,]\d+)?)?'
+ __tzd_re)
__datetime_re = '%s(?:[T ]%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString):
return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0:
return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
# Define the strings used by the RFC822 datetime parser
_rfc822_months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
_rfc822_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# Only the first three letters of the month name matter
_rfc822_month = "(?P<month>%s)(?:[a-z]*,?)" % ('|'.join(_rfc822_months))
# The year may be 2 or 4 digits; capture the century if it exists
_rfc822_year = "(?P<year>(?:\d{2})?\d{2})"
_rfc822_day = "(?P<day> *\d{1,2})"
_rfc822_date = "%s %s %s" % (_rfc822_day, _rfc822_month, _rfc822_year)
_rfc822_hour = "(?P<hour>\d{2}):(?P<minute>\d{2})(?::(?P<second>\d{2}))?"
_rfc822_tz = "(?P<tz>ut|gmt(?:[+-]\d{2}:\d{2})?|[aecmp][sd]?t|[zamny]|[+-]\d{4})"
_rfc822_tznames = {
'ut': 0, 'gmt': 0, 'z': 0,
'adt': -3, 'ast': -4, 'at': -4,
'edt': -4, 'est': -5, 'et': -5,
'cdt': -5, 'cst': -6, 'ct': -6,
'mdt': -6, 'mst': -7, 'mt': -7,
'pdt': -7, 'pst': -8, 'pt': -8,
'a': -1, 'n': 1,
'm': -12, 'y': 12,
}
# The timezone may be prefixed by 'Etc/'
_rfc822_time = "%s (?:etc/)?%s" % (_rfc822_hour, _rfc822_tz)
_rfc822_dayname = "(?P<dayname>%s)" % ('|'.join(_rfc822_daynames))
_rfc822_match = re.compile(
"(?:%s, )?%s(?: %s)?" % (_rfc822_dayname, _rfc822_date, _rfc822_time)
).match
def _parse_date_rfc822(dt):
"""Parse RFC 822 dates and times, with one minor
difference: years may be 4DIGIT or 2DIGIT.
http://tools.ietf.org/html/rfc822#section-5"""
try:
m = _rfc822_match(dt.lower()).groupdict(0)
except AttributeError:
return None
# Calculate a date and timestamp
for k in ('year', 'day', 'hour', 'minute', 'second'):
m[k] = int(m[k])
m['month'] = _rfc822_months.index(m['month']) + 1
# If the year is 2 digits, assume everything in the 90's is the 1990's
if m['year'] < 100:
m['year'] += (1900, 2000)[m['year'] < 90]
stamp = datetime.datetime(*[m[i] for i in
('year', 'month', 'day', 'hour', 'minute', 'second')])
# Use the timezone information to calculate the difference between
# the given date and timestamp and Universal Coordinated Time
tzhour = 0
tzmin = 0
if m['tz'] and m['tz'].startswith('gmt'):
# Handle GMT and GMT+hh:mm timezone syntax (the trailing
# timezone info will be handled by the next `if` block)
m['tz'] = ''.join(m['tz'][3:].split(':')) or 'gmt'
if not m['tz']:
pass
elif m['tz'].startswith('+'):
tzhour = int(m['tz'][1:3])
tzmin = int(m['tz'][3:])
elif m['tz'].startswith('-'):
tzhour = int(m['tz'][1:3]) * -1
tzmin = int(m['tz'][3:]) * -1
else:
tzhour = _rfc822_tznames[m['tz']]
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in UTC
return (stamp - delta).utctimetuple()
registerDateHandler(_parse_date_rfc822)
def _parse_date_asctime(dt):
"""Parse asctime-style dates"""
dayname, month, day, remainder = dt.split(None, 3)
# Convert month and day into zero-padded integers
month = '%02i ' % (_rfc822_months.index(month.lower()) + 1)
day = '%02i ' % (int(day),)
dt = month + day + remainder
return time.strptime(dt, '%m %d %H:%M:%S %Y')[:-1] + (0, )
registerDateHandler(_parse_date_asctime)
def _parse_date_perforce(aDateString):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile( \
r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
m = _my_date_pattern.search(aDateString)
if m is None:
return None
dow, year, month, day, hour, minute, second, tz = m.groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
registerDateHandler(_parse_date_perforce)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
if not dateString:
return None
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
except (KeyError, OverflowError, ValueError):
continue
if not date9tuple:
continue
if len(date9tuple) != 9:
continue
return date9tuple
return None
# Each marker represents some of the characters of the opening XML
# processing instruction ('<?xm') in the specified encoding.
EBCDIC_MARKER = _l2bytes([0x4C, 0x6F, 0xA7, 0x94])
UTF16BE_MARKER = _l2bytes([0x00, 0x3C, 0x00, 0x3F])
UTF16LE_MARKER = _l2bytes([0x3C, 0x00, 0x3F, 0x00])
UTF32BE_MARKER = _l2bytes([0x00, 0x00, 0x00, 0x3C])
UTF32LE_MARKER = _l2bytes([0x3C, 0x00, 0x00, 0x00])
ZERO_BYTES = _l2bytes([0x00, 0x00])
# Match the opening XML declaration.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_DECLARATION = re.compile('^<\?xml[^>]*?>')
# Capture the value of the XML processing instruction's encoding attribute.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_PI_ENCODING = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>'))
def convert_to_utf8(http_headers, data):
'''Detect and convert the character encoding to UTF-8.
http_headers is a dictionary
data is a raw string (not Unicode)'''
# This is so much trickier than it sounds, it's not even funny.
# According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
# is application/xml, application/*+xml,
# application/xml-external-parsed-entity, or application/xml-dtd,
# the encoding given in the charset parameter of the HTTP Content-Type
# takes precedence over the encoding given in the XML prefix within the
# document, and defaults to 'utf-8' if neither are specified. But, if
# the HTTP Content-Type is text/xml, text/*+xml, or
# text/xml-external-parsed-entity, the encoding given in the XML prefix
# within the document is ALWAYS IGNORED and only the encoding given in
# the charset parameter of the HTTP Content-Type header should be
# respected, and it defaults to 'us-ascii' if not specified.
# Furthermore, discussion on the atom-syntax mailing list with the
# author of RFC 3023 leads me to the conclusion that any document
# served with a Content-Type of text/* and no charset parameter
# must be treated as us-ascii. (We now do this.) And also that it
# must always be flagged as non-well-formed. (We now do this too.)
# If Content-Type is unspecified (input was local file or non-HTTP source)
# or unrecognized (server just got it totally wrong), then go by the
# encoding given in the XML prefix of the document and default to
# 'iso-8859-1' as per the HTTP specification (RFC 2616).
# Then, assuming we didn't find a character encoding in the HTTP headers
# (and the HTTP Content-type allowed us to look in the body), we need
# to sniff the first few bytes of the XML data and try to determine
# whether the encoding is ASCII-compatible. Section F of the XML
# specification shows the way here:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# If the sniffed encoding is not ASCII-compatible, we need to make it
# ASCII compatible so that we can sniff further into the XML declaration
# to find the encoding attribute, which will tell us the true encoding.
# Of course, none of this guarantees that we will be able to parse the
# feed in the declared character encoding (assuming it was declared
# correctly, which many are not). iconv_codec can help a lot;
# you should definitely install it if you can.
# http://cjkpython.i18n.org/
bom_encoding = u''
xml_encoding = u''
rfc3023_encoding = u''
# Look at the first few bytes of the document to guess what
# its encoding may be. We only need to decode enough of the
# document that we can use an ASCII-compatible regular
# expression to search for an XML encoding declaration.
# The heuristic follows the XML specification, section F:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# Check for BOMs first.
if data[:4] == codecs.BOM_UTF32_BE:
bom_encoding = u'utf-32be'
data = data[4:]
elif data[:4] == codecs.BOM_UTF32_LE:
bom_encoding = u'utf-32le'
data = data[4:]
elif data[:2] == codecs.BOM_UTF16_BE and data[2:4] != ZERO_BYTES:
bom_encoding = u'utf-16be'
data = data[2:]
elif data[:2] == codecs.BOM_UTF16_LE and data[2:4] != ZERO_BYTES:
bom_encoding = u'utf-16le'
data = data[2:]
elif data[:3] == codecs.BOM_UTF8:
bom_encoding = u'utf-8'
data = data[3:]
# Check for the characters '<?xm' in several encodings.
elif data[:4] == EBCDIC_MARKER:
bom_encoding = u'cp037'
elif data[:4] == UTF16BE_MARKER:
bom_encoding = u'utf-16be'
elif data[:4] == UTF16LE_MARKER:
bom_encoding = u'utf-16le'
elif data[:4] == UTF32BE_MARKER:
bom_encoding = u'utf-32be'
elif data[:4] == UTF32LE_MARKER:
bom_encoding = u'utf-32le'
tempdata = data
try:
if bom_encoding:
tempdata = data.decode(bom_encoding).encode('utf-8')
except (UnicodeDecodeError, LookupError):
# feedparser recognizes UTF-32 encodings that aren't
# available in Python 2.4 and 2.5, so it's possible to
# encounter a LookupError during decoding.
xml_encoding_match = None
else:
xml_encoding_match = RE_XML_PI_ENCODING.match(tempdata)
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
# Normalize the xml_encoding if necessary.
if bom_encoding and (xml_encoding in (
u'u16', u'utf-16', u'utf16', u'utf_16',
u'u32', u'utf-32', u'utf32', u'utf_32',
u'iso-10646-ucs-2', u'iso-10646-ucs-4',
u'csucs4', u'csunicode', u'ucs-2', u'ucs-4'
)):
xml_encoding = bom_encoding
# Find the HTTP Content-Type and, hopefully, a character
# encoding provided by the server. The Content-Type is used
# to choose the "correct" encoding among the BOM encoding,
# XML declaration encoding, and HTTP encoding, following the
# heuristic defined in RFC 3023.
http_content_type = http_headers.get('content-type') or ''
http_content_type, params = cgi.parse_header(http_content_type)
http_encoding = params.get('charset', '').replace("'", "")
if not isinstance(http_encoding, unicode):
http_encoding = http_encoding.decode('utf-8', 'ignore')
acceptable_content_type = 0
application_content_types = (u'application/xml', u'application/xml-dtd',
u'application/xml-external-parsed-entity')
text_content_types = (u'text/xml', u'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith(u'application/') and
http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or xml_encoding or u'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith(u'text/') and
http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or u'us-ascii'
elif http_content_type.startswith(u'text/'):
rfc3023_encoding = http_encoding or u'us-ascii'
elif http_headers and 'content-type' not in http_headers:
rfc3023_encoding = xml_encoding or u'iso-8859-1'
else:
rfc3023_encoding = xml_encoding or u'utf-8'
# gb18030 is a superset of gb2312, so always replace gb2312
# with gb18030 for greater compatibility.
if rfc3023_encoding.lower() == u'gb2312':
rfc3023_encoding = u'gb18030'
if xml_encoding.lower() == u'gb2312':
xml_encoding = u'gb18030'
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - bom_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - rfc3023_encoding is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
error = None
if http_headers and (not acceptable_content_type):
if 'content-type' in http_headers:
msg = '%s is not an XML media type' % http_headers['content-type']
else:
msg = 'no Content-type specified'
error = NonXMLContentType(msg)
# determine character encoding
known_encoding = 0
chardet_encoding = None
tried_encodings = []
if chardet:
chardet_encoding = unicode(chardet.detect(data)['encoding'] or '', 'ascii', 'ignore')
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (rfc3023_encoding, xml_encoding, bom_encoding,
chardet_encoding, u'utf-8', u'windows-1252', u'iso-8859-2'):
if not proposed_encoding:
continue
if proposed_encoding in tried_encodings:
continue
tried_encodings.append(proposed_encoding)
try:
data = data.decode(proposed_encoding)
except (UnicodeDecodeError, LookupError):
pass
else:
known_encoding = 1
# Update the encoding in the opening XML processing instruction.
new_declaration = '''<?xml version='1.0' encoding='utf-8'?>'''
if RE_XML_DECLARATION.search(data):
data = RE_XML_DECLARATION.sub(new_declaration, data)
else:
data = new_declaration + u'\n' + data
data = data.encode('utf-8')
break
# if still no luck, give up
if not known_encoding:
error = CharacterEncodingUnknown(
'document encoding unknown, I tried ' +
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' %
(rfc3023_encoding, xml_encoding))
rfc3023_encoding = u''
elif proposed_encoding != rfc3023_encoding:
error = CharacterEncodingOverride(
'document declared as %s, but parsed as %s' %
(rfc3023_encoding, proposed_encoding))
rfc3023_encoding = proposed_encoding
return data, rfc3023_encoding, error
# Match XML entity declarations.
# Example: <!ENTITY copyright "(C)">
RE_ENTITY_PATTERN = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE)
# Match XML DOCTYPE declarations.
# Example: <!DOCTYPE feed [ ]>
RE_DOCTYPE_PATTERN = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE)
# Match safe entity declarations.
# This will allow hexadecimal character references through,
# as well as text, but not arbitrary nested entities.
# Example: cubed "³"
# Example: copyright "(C)"
# Forbidden: explode1 "&explode2;&explode2;"
RE_SAFE_ENTITY_PATTERN = re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"'))
def replace_doctype(data):
'''Strips and replaces the DOCTYPE, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document with a replaced DOCTYPE
'''
# Divide the document into two groups by finding the location
# of the first element that doesn't begin with '<?' or '<!'.
start = re.search(_s2bytes('<\w'), data)
start = start and start.start() or -1
head, data = data[:start+1], data[start+1:]
# Save and then remove all of the ENTITY declarations.
entity_results = RE_ENTITY_PATTERN.findall(head)
head = RE_ENTITY_PATTERN.sub(_s2bytes(''), head)
# Find the DOCTYPE declaration and check the feed type.
doctype_results = RE_DOCTYPE_PATTERN.findall(head)
doctype = doctype_results and doctype_results[0] or _s2bytes('')
if _s2bytes('netscape') in doctype.lower():
version = u'rss091n'
else:
version = None
# Re-insert the safe ENTITY declarations if a DOCTYPE was found.
replacement = _s2bytes('')
if len(doctype_results) == 1 and entity_results:
match_safe_entities = lambda e: RE_SAFE_ENTITY_PATTERN.match(e)
safe_entities = filter(match_safe_entities, entity_results)
if safe_entities:
replacement = _s2bytes('<!DOCTYPE feed [\n<!ENTITY') \
+ _s2bytes('>\n<!ENTITY ').join(safe_entities) \
+ _s2bytes('>\n]>')
data = RE_DOCTYPE_PATTERN.sub(replacement, head) + data
# Precompute the safe entities for the loose parser.
safe_entities = dict((k.decode('utf-8'), v.decode('utf-8'))
for k, v in RE_SAFE_ENTITY_PATTERN.findall(replacement))
return version, data, safe_entities
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None):
'''Parse a feed from a URL, file, stream, or string.
request_headers, if given, is a dict from http header name to value to add
to the request; this overrides internally generated values.
'''
if handlers is None:
handlers = []
if request_headers is None:
request_headers = {}
if response_headers is None:
response_headers = {}
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
result['bozo'] = 0
if not isinstance(handlers, list):
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = None
f = None
if hasattr(f, 'headers'):
result['headers'] = dict(f.headers)
# overwrite existing headers using response_headers
if 'headers' in result:
result['headers'].update(response_headers)
elif response_headers:
result['headers'] = copy.deepcopy(response_headers)
# lowercase all of the HTTP headers for comparisons per RFC 2616
if 'headers' in result:
http_headers = dict((k.lower(), v) for k, v in result['headers'].items())
else:
http_headers = {}
# if feed is gzip-compressed, decompress it
if f and data and http_headers:
if gzip and 'gzip' in http_headers.get('content-encoding', ''):
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except (IOError, struct.error), e:
# IOError can occur if the gzip header is bad.
# struct.error can occur if the data is damaged.
result['bozo'] = 1
result['bozo_exception'] = e
if isinstance(e, struct.error):
# A gzip header was found but the data is corrupt.
# Ideally, we should re-request the feed without the
# 'Accept-encoding: gzip' header, but we don't.
data = None
elif zlib and 'deflate' in http_headers.get('content-encoding', ''):
try:
data = zlib.decompress(data)
except zlib.error, e:
try:
# The data may have no headers and no checksum.
data = zlib.decompress(data, -15)
except zlib.error, e:
result['bozo'] = 1
result['bozo_exception'] = e
# save HTTP headers
if http_headers:
if 'etag' in http_headers:
etag = http_headers.get('etag', u'')
if not isinstance(etag, unicode):
etag = etag.decode('utf-8', 'ignore')
if etag:
result['etag'] = etag
if 'last-modified' in http_headers:
modified = http_headers.get('last-modified', u'')
if modified:
result['modified'] = modified
result['modified_parsed'] = _parse_date(modified)
if hasattr(f, 'url'):
if not isinstance(f.url, unicode):
result['href'] = f.url.decode('utf-8', 'ignore')
else:
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'close'):
f.close()
if data is None:
return result
# Stop processing if the server sent HTTP 304 Not Modified.
if getattr(f, 'code', 0) == 304:
result['version'] = u''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
data, result['encoding'], error = convert_to_utf8(http_headers, data)
use_strict_parser = result['encoding'] and True or False
if error is not None:
result['bozo'] = 1
result['bozo_exception'] = error
result['version'], data, entities = replace_doctype(data)
# Ensure that baseuri is an absolute URI using an acceptable URI scheme.
contentloc = http_headers.get('content-location', u'')
href = result.get('href', u'')
baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href
baselang = http_headers.get('content-language', None)
if not isinstance(baselang, unicode) and baselang is not None:
baselang = baselang.decode('utf-8', 'ignore')
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
try:
# disable downloading external doctype references, if possible
saxparser.setFeature(xml.sax.handler.feature_external_ges, 0)
except xml.sax.SAXNotSupportedException:
pass
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
try:
saxparser.parse(source)
except xml.sax.SAXException, e:
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser and _SGML_AVAILABLE:
feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities)
feedparser.feed(data.decode('utf-8', 'replace'))
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
| gpl-2.0 | 768,292,287,514,934,000 | 40.543015 | 302 | 0.566455 | false |
eadgarchen/tensorflow | tensorflow/python/kernel_tests/numerics_test.py | 45 | 5057 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.numerics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.platform import test
class VerifyTensorAllFiniteTest(test.TestCase):
def testVerifyTensorAllFiniteSucceeds(self):
x_shape = [5, 4]
x = np.random.random_sample(x_shape).astype(np.float32)
with self.test_session(use_gpu=True):
t = constant_op.constant(x, shape=x_shape, dtype=dtypes.float32)
t_verified = numerics.verify_tensor_all_finite(t,
"Input is not a number.")
self.assertAllClose(x, t_verified.eval())
def testVerifyTensorAllFiniteFails(self):
x_shape = [5, 4]
x = np.random.random_sample(x_shape).astype(np.float32)
my_msg = "Input is not a number."
# Test NaN.
x[0] = np.nan
with self.test_session(use_gpu=True):
with self.assertRaisesOpError(my_msg):
t = constant_op.constant(x, shape=x_shape, dtype=dtypes.float32)
t_verified = numerics.verify_tensor_all_finite(t, my_msg)
t_verified.eval()
# Test Inf.
x[0] = np.inf
with self.test_session(use_gpu=True):
with self.assertRaisesOpError(my_msg):
t = constant_op.constant(x, shape=x_shape, dtype=dtypes.float32)
t_verified = numerics.verify_tensor_all_finite(t, my_msg)
t_verified.eval()
class NumericsTest(test.TestCase):
def testInf(self):
with self.test_session(graph=ops.Graph()):
t1 = constant_op.constant(1.0)
t2 = constant_op.constant(0.0)
a = math_ops.div(t1, t2)
check = numerics.add_check_numerics_ops()
a = control_flow_ops.with_dependencies([check], a)
with self.assertRaisesOpError("Inf"):
a.eval()
def testNaN(self):
with self.test_session(graph=ops.Graph()):
t1 = constant_op.constant(0.0)
t2 = constant_op.constant(0.0)
a = math_ops.div(t1, t2)
check = numerics.add_check_numerics_ops()
a = control_flow_ops.with_dependencies([check], a)
with self.assertRaisesOpError("NaN"):
a.eval()
def testBoth(self):
with self.test_session(graph=ops.Graph()):
t1 = constant_op.constant([1.0, 0.0])
t2 = constant_op.constant([0.0, 0.0])
a = math_ops.div(t1, t2)
check = numerics.add_check_numerics_ops()
a = control_flow_ops.with_dependencies([check], a)
with self.assertRaisesOpError("Inf and NaN"):
a.eval()
def testPassThrough(self):
with self.test_session(graph=ops.Graph()):
t1 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
checked = array_ops.check_numerics(t1, message="pass through test")
value = checked.eval()
self.assertAllEqual(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), value)
self.assertEqual([2, 3], checked.get_shape())
def testControlFlowCond(self):
predicate = array_ops.placeholder(dtypes.bool, shape=[])
_ = control_flow_ops.cond(predicate,
lambda: constant_op.constant([37.]),
lambda: constant_op.constant([42.]))
with self.assertRaisesRegexp(
ValueError,
r"`tf\.add_check_numerics_ops\(\) is not compatible with "
r"TensorFlow control flow operations such as `tf\.cond\(\)` "
r"or `tf.while_loop\(\)`\."):
numerics.add_check_numerics_ops()
def testControlFlowWhile(self):
predicate = array_ops.placeholder(dtypes.bool, shape=[])
_ = control_flow_ops.while_loop(lambda _: predicate,
lambda _: constant_op.constant([37.]),
[constant_op.constant([42.])])
with self.assertRaisesRegexp(
ValueError,
r"`tf\.add_check_numerics_ops\(\) is not compatible with "
r"TensorFlow control flow operations such as `tf\.cond\(\)` "
r"or `tf.while_loop\(\)`\."):
numerics.add_check_numerics_ops()
if __name__ == "__main__":
test.main()
| apache-2.0 | -1,354,743,083,804,603,600 | 37.310606 | 80 | 0.637334 | false |
alexmilesyounger/ds_basics | src/numpy_utils.py | 2 | 3188 | # coding: utf-8
# numpy_utils for Intro to Data Science with Python
# Author: Kat Chuang
# Created: Nov 2014
# --------------------------------------
import numpy
## Stage 2 begin
fieldNames = ['', 'id', 'priceLabel', 'name','brandId', 'brandName', 'imageLink',
'desc', 'vendor', 'patterned', 'material']
dataTypes = [('myint', 'i'), ('myid', 'i'), ('price', 'f8'), ('name', 'a200'),
('brandId', '<i8'), ('brandName', 'a200'), ('imageUrl', '|S500'),
('description', '|S900'), ('vendor', '|S100'), ('pattern', '|S50'), ('material', '|S50'), ]
def load_data(filename):
my_csv = numpy.genfromtxt(filename, delimiter='\t', skip_header=1,
names=fieldNames, invalid_raise=False,
dtype=dataTypes)
return my_csv
#2.a count
def size(my_csv):
print("Length (numpy): {}".format(my_csv.size))
#2.b sum
def calculate_numpy_sum(my_field):
field_in_float = [float(item) for item in my_field]
total = numpy.sum(field_in_float)
return total
#2.c mean
def find_numpy_average(my_field):
field_in_float = [float(item) for item in my_field]
total = calculate_numpy_sum(field_in_float)
size = len(my_field)
average = total / size
return average
#2.d max, min
def numpy_max(my_field_in_float):
return numpy.amax(my_field_in_float)
def numpy_min(my_field_in_float):
return numpy.amin(my_field_in_float)
## Stage 2 end
# --------------------------------------
## Stage 3 begin
from my_utils import filter_col_by_string, filter_col_by_float
## Stage 3 end
# --------------------------------------
## Stage 4 begin
from my_utils import write_to_file, write_brand_and_price_to_file
## Stage 4 end
# --------------------------------------
## Stage 5 begin
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def plot_all_bars(prices_in_float, exported_figure_filename):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
prices = list(map(int, prices_in_float))
X = numpy.arange(len(prices))
width = 0.25
ax.bar(X+width, prices, width)
ax.set_xlim([0, 5055])
fig.savefig(exported_figure_filename)
def create_chart_for_embed(sample, title):
prices = sorted(map(int, sample))
x_axis_ticks = list( range(len(sample)) )
plt.plot(x_axis_ticks, prices, 'g', label='price points', linewidth=2)
def export_chart(sample, title):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
prices = sorted(map(int, sample))
x_axis_ticks = list( range(len(sample)) )
ax.plot(x_axis_ticks, prices, 'g', label='price points', linewidth=2)
ax.set_title(title)
ax.set_xlabel(title)
ax.set_ylabel('Number of Ties')
if len(prices) > 20:
ax.set_xlim([0, round(len(prices), -1)])
else:
ax.set_xlim([0, len(prices)])
fig.savefig('_charts/' + title + '.png')
def prices_of_list(sampleData):
temp_list = []
for row in sampleData[1:]:
priceCol = float(row[2])
temp_list.append(priceCol)
return temp_list
## Stage 5 end
# --------------------------------------
## Stage 6 begin
## Stage 6 end
# --------------------------------------
| mit | 2,955,030,263,153,201,700 | 24.709677 | 107 | 0.568381 | false |
sameetb-cuelogic/edx-platform-test | common/djangoapps/student/management/commands/add_to_group.py | 182 | 1968 | from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User, Group
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--list',
action='store_true',
dest='list',
default=False,
help='List available groups'),
make_option('--create',
action='store_true',
dest='create',
default=False,
help='Create the group if it does not exist'),
make_option('--remove',
action='store_true',
dest='remove',
default=False,
help='Remove the user from the group instead of adding it'),
)
args = '<user|email> <group>'
help = 'Add a user to a group'
def print_groups(self):
print 'Groups available:'
for group in Group.objects.all().distinct():
print ' ', group.name
def handle(self, *args, **options):
if options['list']:
self.print_groups()
return
if len(args) != 2:
raise CommandError('Usage is add_to_group {0}'.format(self.args))
name_or_email, group_name = args
if '@' in name_or_email:
user = User.objects.get(email=name_or_email)
else:
user = User.objects.get(username=name_or_email)
try:
group = Group.objects.get(name=group_name)
except Group.DoesNotExist:
if options['create']:
group = Group(name=group_name)
group.save()
else:
raise CommandError('Group {} does not exist'.format(group_name))
if options['remove']:
user.groups.remove(group)
else:
user.groups.add(group)
print 'Success!'
| agpl-3.0 | 3,640,178,042,292,162,000 | 30.238095 | 80 | 0.519309 | false |
flavour/tldrmp | modules/tests/inv/create_item.py | 25 | 2382 | # -*- coding: utf-8 -*-
""" Sahana Eden Automated Tests - INV005 Create Item
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from tests.web2unittest import SeleniumUnitTest
class CreateItem(SeleniumUnitTest):
def test_inv005_create_item(self):
"""
@case: INV005
@description: Create an Item
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
print "\n"
# Login, if not-already done so
self.login(account="admin", nexturl="asset/item/create")
self.browser.find_element_by_id("supply_item_um").clear()
self.create("supply_item",
[( "name",
"Soup" ),
( "um",
"litre" ),
( "item_category_id",
"Standard > Food"),
( "model",
"Tomato" ),
( "year",
"2012" ),
( "comments",
"This is a Test Item" )]
)
| mit | -2,625,177,039,953,814,000 | 38.065574 | 110 | 0.600336 | false |
XiaodunServerGroup/ddyedx | cms/envs/common.py | 1 | 18114 | # -*- coding: utf-8 -*-
"""
This is the common settings file, intended to set sane defaults. If you have a
piece of configuration that's dependent on a set of feature flags being set,
then create a function that returns the calculated value based on the value of
FEATURES[...]. Modules that extend this one can change the feature
configuration in an environment specific config file and re-calculate those
values.
We should make a method that calls all these config methods so that you just
make one call at the end of your site-specific dev file to reset all the
dependent variables (like INSTALLED_APPS) for you.
Longer TODO:
1. Right now our treatment of static content in general and in particular
course-specific static content is haphazard.
2. We should have a more disciplined approach to feature flagging, even if it
just means that we stick them in a dict called FEATURES.
3. We need to handle configuration for multiple courses. This could be as
multiple sites, but we do need a way to map their data assets.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0611, W0614
import sys
import json
import lms.envs.common
from lms.envs.common import (
USE_TZ, TECH_SUPPORT_EMAIL, PLATFORM_NAME, BUGS_EMAIL, DOC_STORE_CONFIG, ALL_LANGUAGES
)
from path import path
from lms.lib.xblock.mixin import LmsBlockMixin
from cms.lib.xblock.mixin import CmsBlockMixin
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.x_module import XModuleMixin, prefer_xmodules
from dealer.git import git
############################ FEATURE CONFIGURATION #############################
FEATURES = {
'USE_DJANGO_PIPELINE': True,
'GITHUB_PUSH': False,
'ENABLE_DISCUSSION_SERVICE': False,
'AUTH_USE_CERTIFICATES': False,
# email address for studio staff (eg to request course creation)
'STUDIO_REQUEST_EMAIL': '',
'STUDIO_NPS_SURVEY': True,
# Segment.io - must explicitly turn it on for production
'SEGMENT_IO': False,
# Enable URL that shows information about the status of various services
'ENABLE_SERVICE_STATUS': False,
# Don't autoplay videos for course authors
'AUTOPLAY_VIDEOS': False,
# If set to True, new Studio users won't be able to author courses unless
# edX has explicitly added them to the course creator group.
'ENABLE_CREATOR_GROUP': False,
# whether to use password policy enforcement or not
'ENFORCE_PASSWORD_POLICY': False,
# If set to True, Studio won't restrict the set of advanced components
# to just those pre-approved by edX
'ALLOW_ALL_ADVANCED_COMPONENTS': False,
# Turn off account locking if failed login attempts exceeds a limit
'ENABLE_MAX_FAILED_LOGIN_ATTEMPTS': False,
# Allow editing of short description in course settings in cms
'EDITABLE_SHORT_DESCRIPTION': True,
# Hide any Personally Identifiable Information from application logs
'SQUELCH_PII_IN_LOGS': False,
# Toggles embargo functionality
'EMBARGO': False,
# Turn on/off Microsites feature
'USE_MICROSITES': False,
}
ENABLE_JASMINE = False
########### course fields #############
# COURSE_EXTEND_FIELDS = lms.envs.common.COURSE_EXTEND_FIELDS
############################# SET PATH INFORMATION #############################
PROJECT_ROOT = path(__file__).abspath().dirname().dirname() # /edx-platform/cms
REPO_ROOT = PROJECT_ROOT.dirname()
COMMON_ROOT = REPO_ROOT / "common"
LMS_ROOT = REPO_ROOT / "lms"
ENV_ROOT = REPO_ROOT.dirname() # virtualenv dir /edx-platform is in
GITHUB_REPO_ROOT = ENV_ROOT / "data"
sys.path.append(REPO_ROOT)
sys.path.append(PROJECT_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'lib')
# For geolocation ip database
GEOIP_PATH = REPO_ROOT / "common/static/data/geoip/GeoIP.dat"
############################# WEB CONFIGURATION #############################
# This is where we stick our compiled template files.
from tempdir import mkdtemp_clean
MAKO_MODULE_DIR = mkdtemp_clean('mako')
MAKO_TEMPLATES = {}
MAKO_TEMPLATES['main'] = [
PROJECT_ROOT / 'templates',
COMMON_ROOT / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_js' / 'templates',
]
for namespace, template_dirs in lms.envs.common.MAKO_TEMPLATES.iteritems():
MAKO_TEMPLATES['lms.' + namespace] = template_dirs
TEMPLATE_DIRS = MAKO_TEMPLATES['main']
EDX_ROOT_URL = ''
LOGIN_REDIRECT_URL = EDX_ROOT_URL + '/signin'
LOGIN_URL = EDX_ROOT_URL + '/signin'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.i18n',
'django.contrib.auth.context_processors.auth', # this is required for admin
'django.core.context_processors.csrf',
'dealer.contrib.django.staff.context_processor', # access git revision
'contentstore.context_processors.doc_url',
)
# use the ratelimit backend to prevent brute force attacks
AUTHENTICATION_BACKENDS = (
'ratelimitbackend.backends.RateLimitModelBackend',
)
LMS_BASE = None
#################### CAPA External Code Evaluation #############################
XQUEUE_INTERFACE = {
'url': 'http://localhost:8888',
'django_auth': {'username': 'local',
'password': 'local'},
'basic_auth': None,
}
################################# Middleware ###################################
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'staticfiles.finders.FileSystemFinder',
'staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'request_cache.middleware.RequestCache',
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'method_override.middleware.MethodOverrideMiddleware',
# Instead of AuthenticationMiddleware, we use a cache-backed version
'cache_toolbox.middleware.CacheBackedAuthenticationMiddleware',
'student.middleware.UserStandingMiddleware',
'contentserver.middleware.StaticContentServer',
'crum.CurrentRequestUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'track.middleware.TrackMiddleware',
# Allows us to dark-launch particular languages
'dark_lang.middleware.DarkLangMiddleware',
'embargo.middleware.EmbargoMiddleware',
# Detects user-requested locale from 'accept-language' header in http request
'django.middleware.locale.LocaleMiddleware',
'django.middleware.transaction.TransactionMiddleware',
# needs to run after locale middleware (or anything that modifies the request context)
'edxmako.middleware.MakoMiddleware',
# catches any uncaught RateLimitExceptions and returns a 403 instead of a 500
'ratelimitbackend.middleware.RateLimitMiddleware',
# for expiring inactive sessions
'session_inactivity_timeout.middleware.SessionInactivityTimeout',
# use Django built in clickjacking protection
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# Clickjacking protection can be enabled by setting this to 'DENY'
X_FRAME_OPTIONS = 'ALLOW'
############# XBlock Configuration ##########
# This should be moved into an XBlock Runtime/Application object
# once the responsibility of XBlock creation is moved out of modulestore - cpennington
XBLOCK_MIXINS = (LmsBlockMixin, CmsBlockMixin, InheritanceMixin, XModuleMixin)
# Allow any XBlock in Studio
# You should also enable the ALLOW_ALL_ADVANCED_COMPONENTS feature flag, so that
# xblocks can be added via advanced settings
XBLOCK_SELECT_FUNCTION = prefer_xmodules
############################ SIGNAL HANDLERS ################################
# This is imported to register the exception signal handling that logs exceptions
import monitoring.exceptions # noqa
############################ DJANGO_BUILTINS ################################
# Change DEBUG/TEMPLATE_DEBUG in your environment settings files, not here
DEBUG = False
TEMPLATE_DEBUG = False
# Site info
SITE_ID = 1
SITE_NAME = "0.0.0.0:8001"
HTTPS = 'on'
ROOT_URLCONF = 'cms.urls'
IGNORABLE_404_ENDS = ('favicon.ico')
# Email
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.126.com'
EMAIL_PORT = 25
EMAIL_USE_TLS = False
EMAIL_HOST_USER = 'xiaodunxin'
EMAIL_HOST_PASSWORD = '123456qr'
DEFAULT_FROM_EMAIL = '[email protected]'
DEFAULT_FEEDBACK_EMAIL = '[email protected]'
SERVER_EMAIL = '[email protected]'
ADMINS = ()
MANAGERS = ADMINS
# Static content
STATIC_URL = '/static/' + git.revision + "/"
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATIC_ROOT = ENV_ROOT / "staticfiles" / git.revision
STATICFILES_DIRS = [
COMMON_ROOT / "static",
PROJECT_ROOT / "static",
LMS_ROOT / "static",
# This is how you would use the textbook images locally
# ("book", ENV_ROOT / "book_images"),
]
# Locale/Internationalization
TIME_ZONE = 'Asia/Shanghai' # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
LANGUAGE_CODE = 'zh-cn' # http://www.i18nguy.com/unicode/language-identifiers.html
SITE_NAME = 'mooc.diandiyun.com:18010'
LANGUAGES = lms.envs.common.LANGUAGES
USE_I18N = True
USE_L10N = True
# Localization strings (e.g. django.po) are under this directory
LOCALE_PATHS = (REPO_ROOT + '/conf/locale',) # edx-platform/conf/locale/
# Messages
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
# If this is true, random scores will be generated for the purpose of debugging the profile graphs
GENERATE_PROFILE_SCORES = False
############################### Pipeline #######################################
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
from rooted_paths import rooted_glob
PIPELINE_CSS = {
'style-vendor': {
'source_filenames': [
'css/vendor/normalize.css',
'css/vendor/font-awesome.css',
'css/vendor/html5-input-polyfills/number-polyfill.css',
'js/vendor/CodeMirror/codemirror.css',
'css/vendor/ui-lightness/jquery-ui-1.8.22.custom.css',
'css/vendor/jquery.qtip.min.css',
'js/vendor/markitup/skins/simple/style.css',
'js/vendor/markitup/sets/wiki/style.css',
],
'output_filename': 'css/cms-style-vendor.css',
},
'style-app': {
'source_filenames': [
'sass/style-app.css',
],
'output_filename': 'css/cms-style-app.css',
},
'style-app-extend1': {
'source_filenames': [
'sass/style-app-extend1.css',
],
'output_filename': 'css/cms-style-app-extend1.css',
},
'style-xmodule': {
'source_filenames': [
'sass/style-xmodule.css',
],
'output_filename': 'css/cms-style-xmodule.css',
},
'style-calendar-vendor': {
'source_filenames': [
'css/vendor/fullcalendar/fullcalendar.css',
'css/vendor/fullcalendar/fullcalendar_s.css',
'css/vendor/fullcalendar/fullcalendar.print.css',
],
'output_filename': 'css/lms-style-fullcalendar-vendor.css',
}
}
fullcalendar_vendor_js = [
'js/vendor/fullcalendar/moment.min.js',
'js/vendor/fullcalendar/fullcalendar.min.js',
'js/vendor/fullcalendar/jquery-ui.custom.min.js',
'js/vendor/fullcalendar/lang-all.js',
]
# test_order: Determines the position of this chunk of javascript on
# the jasmine test page
PIPELINE_JS = {
'module-js': {
'source_filenames': (
rooted_glob(COMMON_ROOT / 'static/', 'xmodule/descriptors/js/*.js') +
rooted_glob(COMMON_ROOT / 'static/', 'xmodule/modules/js/*.js') +
rooted_glob(COMMON_ROOT / 'static/', 'coffee/src/discussion/*.js')
),
'output_filename': 'js/cms-modules.js',
'test_order': 1
},
'calendar_vendor': {
'source_filenames': fullcalendar_vendor_js,
'output_filename': 'js/lms-fullcalendar_vendor.js',
'test_order': 0,
},
}
PIPELINE_COMPILERS = (
'pipeline.compilers.coffee.CoffeeScriptCompiler',
)
PIPELINE_CSS_COMPRESSOR = None
PIPELINE_JS_COMPRESSOR = None
STATICFILES_IGNORE_PATTERNS = (
"*.py",
"*.pyc"
# it would be nice if we could do, for example, "**/*.scss",
# but these strings get passed down to the `fnmatch` module,
# which doesn't support that. :(
# http://docs.python.org/2/library/fnmatch.html
"sass/*.scss",
"sass/*/*.scss",
"sass/*/*/*.scss",
"sass/*/*/*/*.scss",
"coffee/*.coffee",
"coffee/*/*.coffee",
"coffee/*/*/*.coffee",
"coffee/*/*/*/*.coffee",
# Symlinks used by js-test-tool
"xmodule_js",
"common_static",
)
PIPELINE_YUI_BINARY = 'yui-compressor'
################################# CELERY ######################################
# Message configuration
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_MESSAGE_COMPRESSION = 'gzip'
# Results configuration
CELERY_IGNORE_RESULT = False
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
# Events configuration
CELERY_TRACK_STARTED = True
CELERY_SEND_EVENTS = True
CELERY_SEND_TASK_SENT_EVENT = True
# Exchange configuration
CELERY_DEFAULT_EXCHANGE = 'edx.core'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
# Queues configuration
HIGH_PRIORITY_QUEUE = 'edx.core.high'
DEFAULT_PRIORITY_QUEUE = 'edx.core.default'
LOW_PRIORITY_QUEUE = 'edx.core.low'
CELERY_QUEUE_HA_POLICY = 'all'
CELERY_CREATE_MISSING_QUEUES = True
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {}
}
############################## Video ##########################################
# URL to test YouTube availability
YOUTUBE_TEST_URL = 'https://gdata.youtube.com/feeds/api/videos/'
############################ APPS #####################################
INSTALLED_APPS = (
# Standard apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'djcelery',
'south',
'method_override',
# Database-backed configuration
'config_models',
# Monitor the status of services
'service_status',
# Testing
'django_nose',
# For CMS
'contentstore',
'course_creators',
'student', # misleading name due to sharing with lms
'course_groups', # not used in cms (yet), but tests run
# Tracking
'track',
'eventtracking.django',
# Monitoring
'datadog',
# For asset pipelining
'edxmako',
'pipeline',
'staticfiles',
'static_replace',
# comment common
'django_comment_common',
# for course creator table
'django.contrib.admin',
# XBlocks containing migrations
'mentoring',
# for managing course modes
'course_modes',
# Dark-launching languages
'dark_lang',
# Student identity reverification
'reverification',
# User preferences
'user_api',
'django_openid_auth',
'embargo',
)
################# EDX MARKETING SITE ##################################
EDXMKTG_COOKIE_NAME = 'edxloggedin'
MKTG_URLS = {}
MKTG_URL_LINK_MAP = {
}
COURSES_WITH_UNSAFE_CODE = []
############################## EVENT TRACKING #################################
TRACK_MAX_EVENT = 10000
TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'track.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking'
}
}
}
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = None
PASSWORD_MAX_LENGTH = None
PASSWORD_COMPLEXITY = {}
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = None
PASSWORD_DICTIONARY = []
# We're already logging events, and we don't want to capture user
# names/passwords. Heartbeat events are likely not interesting.
TRACKING_IGNORE_URL_PATTERNS = [r'^/event', r'^/login', r'^/heartbeat']
TRACKING_ENABLED = True
# Current youtube api for requesting transcripts.
# for example: http://video.google.com/timedtext?lang=en&v=j_jEn79vS3g.
YOUTUBE_API = {
'url': "http://video.google.com/timedtext",
'params': {'lang': 'en', 'v': 'set_youtube_id_of_11_symbols_here'}
}
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = 5
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = 15 * 60
### JSdraw (only installed in some instances)
try:
import edx_jsdraw
except ImportError:
pass
else:
INSTALLED_APPS += ('edx_jsdraw',)
############## SSO KEY ################
SSO_KEY = "SSOFOUNDER"
############## user auth ##############
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher'
)
############## SSO KEY ################
SSO_KEY = "SSOFOUNDER"
############## BUSINESS SYSTEM #################
XIAODUN_BACK_HOST = 'http://busi.xiaodun.cn/app'
############## video mettings ##################
VEDIO_MEETING_DOMAIN = "http://passport.guoshi.com/mp"
############## wenjuan domain ##################
WENJUAN_DOMAIN = "http://apitest.wenjuan.com:8000"
####### wenjuan secret_key #######
WENJUAN_SECKEY = "9d15a674a6e621058f1ea9171413b7c0"
| agpl-3.0 | -3,742,054,748,391,241,700 | 28.891089 | 98 | 0.659932 | false |
HonzaKral/django | tests/generic_inline_admin/tests.py | 15 | 22206 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User
from django.contrib.contenttypes.admin import GenericTabularInline
from django.contrib.contenttypes.forms import generic_inlineformset_factory
from django.core.urlresolvers import reverse
from django.forms.formsets import DEFAULT_MAX_NUM
from django.forms.models import ModelForm
from django.test import (
RequestFactory, SimpleTestCase, TestCase, override_settings,
)
from .admin import MediaInline, MediaPermanentInline, site as admin_site
from .models import Category, Episode, EpisodePermanent, Media
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
# password = "secret"
User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='[email protected]',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime.datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
# Set DEBUG to True to ensure {% include %} will raise exceptions.
# That is how inlines are rendered and #9498 will bubble up if it is an issue.
@override_settings(
DEBUG=True,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="generic_inline_admin.urls",
)
class GenericAdminViewTest(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username='super', password='secret')
# Can't load content via a fixture (since the GenericForeignKey
# relies on content type IDs, which will vary depending on what
# other tests have been run), thus we do it here.
e = Episode.objects.create(name='This Week in Django')
self.episode_pk = e.pk
m = Media(content_object=e, url='http://example.com/podcast.mp3')
m.save()
self.mp3_media_pk = m.pk
m = Media(content_object=e, url='http://example.com/logo.png')
m.save()
self.png_media_pk = m.pk
def test_basic_add_GET(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get(reverse('admin:generic_inline_admin_episode_add'))
self.assertEqual(response.status_code, 200)
def test_basic_edit_GET(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get(
reverse('admin:generic_inline_admin_episode_change', args=(self.episode_pk,))
)
self.assertEqual(response.status_code, 200)
def test_basic_add_POST(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
}
response = self.client.post(reverse('admin:generic_inline_admin_episode_add'), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_basic_edit_POST(self):
"""
A smoke test to ensure POST on edit_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "3",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "2",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-0-id": "%d" % self.mp3_media_pk,
"generic_inline_admin-media-content_type-object_id-0-url": "http://example.com/podcast.mp3",
"generic_inline_admin-media-content_type-object_id-1-id": "%d" % self.png_media_pk,
"generic_inline_admin-media-content_type-object_id-1-url": "http://example.com/logo.png",
"generic_inline_admin-media-content_type-object_id-2-id": "",
"generic_inline_admin-media-content_type-object_id-2-url": "",
}
url = reverse('admin:generic_inline_admin_episode_change', args=(self.episode_pk,))
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_generic_inline_formset(self):
EpisodeMediaFormSet = generic_inlineformset_factory(Media, can_delete=False, exclude=['description', 'keywords'], extra=3)
e = Episode.objects.get(name='This Week in Django')
# Works with no queryset
formset = EpisodeMediaFormSet(instance=e)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="url" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# A queryset can be used to alter display ordering
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.order_by('url'))
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" value="http://example.com/podcast.mp3" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>' % self.mp3_media_pk)
self.assertHTMLEqual(formset.forms[2].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-2-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-2-url" type="url" name="generic_inline_admin-media-content_type-object_id-2-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-2-id" id="id_generic_inline_admin-media-content_type-object_id-2-id" /></p>')
# Works with a queryset that omits items
formset = EpisodeMediaFormSet(instance=e, queryset=Media.objects.filter(url__endswith=".png"))
self.assertEqual(len(formset.forms), 4)
self.assertHTMLEqual(formset.forms[0].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-0-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-0-url" type="url" name="generic_inline_admin-media-content_type-object_id-0-url" value="http://example.com/logo.png" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-0-id" value="%s" id="id_generic_inline_admin-media-content_type-object_id-0-id" /></p>' % self.png_media_pk)
self.assertHTMLEqual(formset.forms[1].as_p(), '<p><label for="id_generic_inline_admin-media-content_type-object_id-1-url">Url:</label> <input id="id_generic_inline_admin-media-content_type-object_id-1-url" type="url" name="generic_inline_admin-media-content_type-object_id-1-url" maxlength="200" /><input type="hidden" name="generic_inline_admin-media-content_type-object_id-1-id" id="id_generic_inline_admin-media-content_type-object_id-1-id" /></p>')
def test_generic_inline_formset_factory(self):
# Regression test for #10522.
inline_formset = generic_inlineformset_factory(Media,
exclude=('url',))
# Regression test for #12340.
e = Episode.objects.get(name='This Week in Django')
formset = inline_formset(instance=e)
self.assertTrue(formset.get_queryset().ordered)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="generic_inline_admin.urls")
class GenericInlineAdminParametersTest(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username='super', password='secret')
self.factory = RequestFactory()
def _create_object(self, model):
"""
Create a model with an attached Media object via GFK. We can't
load content via a fixture (since the GenericForeignKey relies on
content type IDs, which will vary depending on what other tests
have been run), thus we do it here.
"""
e = model.objects.create(name='This Week in Django')
Media.objects.create(content_object=e, url='http://example.com/podcast.mp3')
return e
def test_no_param(self):
"""
With one initial form, extra (default) at 3, there should be 4 forms.
"""
e = self._create_object(Episode)
response = self.client.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
formset = response.context['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 4)
self.assertEqual(formset.initial_form_count(), 1)
def test_extra_param(self):
"""
With extra=0, there should be one form.
"""
class ExtraInline(GenericTabularInline):
model = Media
extra = 0
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [ExtraInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 1)
self.assertEqual(formset.initial_form_count(), 1)
def testMaxNumParam(self):
"""
With extra=5 and max_num=2, there should be only 2 forms.
"""
class MaxNumInline(GenericTabularInline):
model = Media
extra = 5
max_num = 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [MaxNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 2)
self.assertEqual(formset.initial_form_count(), 1)
def test_min_num_param(self):
"""
With extra=3 and min_num=2, there should be five forms.
"""
class MinNumInline(GenericTabularInline):
model = Media
extra = 3
min_num = 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [MinNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.total_form_count(), 5)
self.assertEqual(formset.initial_form_count(), 1)
def test_get_extra(self):
class GetExtraInline(GenericTabularInline):
model = Media
extra = 4
def get_extra(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetExtraInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.extra, 2)
def test_get_min_num(self):
class GetMinNumInline(GenericTabularInline):
model = Media
min_num = 5
def get_min_num(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetMinNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.min_num, 2)
def test_get_max_num(self):
class GetMaxNumInline(GenericTabularInline):
model = Media
extra = 5
def get_max_num(self, request, obj):
return 2
modeladmin = admin.ModelAdmin(Episode, admin_site)
modeladmin.inlines = [GetMaxNumInline]
e = self._create_object(Episode)
request = self.factory.get(reverse('admin:generic_inline_admin_episode_change', args=(e.pk,)))
request.user = User(username='super', is_superuser=True)
response = modeladmin.changeform_view(request, object_id=str(e.pk))
formset = response.context_data['inline_admin_formsets'][0].formset
self.assertEqual(formset.max_num, 2)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="generic_inline_admin.urls")
class GenericInlineAdminWithUniqueTogetherTest(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username='super', password='secret')
def test_add(self):
category_id = Category.objects.create(name='male').pk
post_data = {
"name": "John Doe",
# inline data
"generic_inline_admin-phonenumber-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-phonenumber-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-MAX_NUM_FORMS": "0",
"generic_inline_admin-phonenumber-content_type-object_id-0-id": "",
"generic_inline_admin-phonenumber-content_type-object_id-0-phone_number": "555-555-5555",
"generic_inline_admin-phonenumber-content_type-object_id-0-category": "%s" % category_id,
}
response = self.client.get(reverse('admin:generic_inline_admin_contact_add'))
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('admin:generic_inline_admin_contact_add'), post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
@override_settings(ROOT_URLCONF="generic_inline_admin.urls")
class NoInlineDeletionTest(SimpleTestCase):
def test_no_deletion(self):
inline = MediaPermanentInline(EpisodePermanent, admin_site)
fake_request = object()
formset = inline.get_formset(fake_request)
self.assertFalse(formset.can_delete)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
@override_settings(ROOT_URLCONF="generic_inline_admin.urls")
class GenericInlineModelAdminTest(SimpleTestCase):
def setUp(self):
self.site = AdminSite()
def test_get_formset_kwargs(self):
media_inline = MediaInline(Media, AdminSite())
# Create a formset with default arguments
formset = media_inline.get_formset(request)
self.assertEqual(formset.max_num, DEFAULT_MAX_NUM)
self.assertEqual(formset.can_order, False)
# Create a formset with custom keyword arguments
formset = media_inline.get_formset(request, max_num=100, can_order=True)
self.assertEqual(formset.max_num, 100)
self.assertEqual(formset.can_order, True)
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `GenericInlineModelAdmin.readonly_fields`
and when no `ModelAdmin.exclude` is defined.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
readonly_fields = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['keywords', 'id', 'DELETE'])
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected by
`GenericInlineModelAdmin.get_formset`, and overridden if
`ModelAdmin.exclude` or `GenericInlineModelAdmin.exclude` are defined.
Refs #15907.
"""
# First with `GenericInlineModelAdmin` -----------------
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
exclude = ['description']
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['url', 'keywords', 'id', 'DELETE'])
# Then, only with `ModelForm` -----------------
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['description', 'keywords', 'id', 'DELETE'])
def test_get_fieldsets(self):
# Test that get_fieldsets is called when figuring out form fields.
# Refs #18681.
class MediaForm(ModelForm):
class Meta:
model = Media
fields = '__all__'
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['url', 'description']})]
ma = MediaInline(Media, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['url', 'description'])
def test_get_formsets_with_inlines_returns_tuples(self):
"""
Ensure that get_formsets_with_inlines() returns the correct tuples.
"""
class MediaForm(ModelForm):
class Meta:
model = Media
exclude = ['url']
class MediaInline(GenericTabularInline):
form = MediaForm
model = Media
class AlternateInline(GenericTabularInline):
form = MediaForm
model = Media
class EpisodeAdmin(admin.ModelAdmin):
inlines = [
AlternateInline, MediaInline
]
ma = EpisodeAdmin(Episode, self.site)
inlines = ma.get_inline_instances(request)
for (formset, inline), other_inline in zip(ma.get_formsets_with_inlines(request), inlines):
self.assertIsInstance(formset, other_inline.get_formset(request).__class__)
| bsd-3-clause | -8,932,824,797,791,606,000 | 45.749474 | 530 | 0.648383 | false |
insta-code1/Instafit-ecommerce-Django | Scripts/enhancer.py | 1 | 1522 | #!d:\e-commers\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# this demo script creates four windows containing an image and a slider.
# drag the slider to modify the image.
#
try:
from tkinter import Tk, Toplevel, Frame, Label, Scale, HORIZONTAL
except ImportError:
from Tkinter import Tk, Toplevel, Frame, Label, Scale, HORIZONTAL
from PIL import Image, ImageTk, ImageEnhance
import sys
#
# enhancer widget
class Enhance(Frame):
def __init__(self, master, image, name, enhancer, lo, hi):
Frame.__init__(self, master)
# set up the image
self.tkim = ImageTk.PhotoImage(image.mode, image.size)
self.enhancer = enhancer(image)
self.update("1.0") # normalize
# image window
Label(self, image=self.tkim).pack()
# scale
s = Scale(self, label=name, orient=HORIZONTAL,
from_=lo, to=hi, resolution=0.01,
command=self.update)
s.set(self.value)
s.pack()
def update(self, value):
self.value = eval(value)
self.tkim.paste(self.enhancer.enhance(self.value))
#
# main
root = Tk()
im = Image.open(sys.argv[1])
im.thumbnail((200, 200))
Enhance(root, im, "Color", ImageEnhance.Color, 0.0, 4.0).pack()
Enhance(Toplevel(), im, "Sharpness", ImageEnhance.Sharpness, -2.0, 2.0).pack()
Enhance(Toplevel(), im, "Brightness", ImageEnhance.Brightness, -1.0, 3.0).pack()
Enhance(Toplevel(), im, "Contrast", ImageEnhance.Contrast, -1.0, 3.0).pack()
root.mainloop()
| mit | -4,311,511,396,518,780,000 | 24.79661 | 80 | 0.641261 | false |
thesuperzapper/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder_test.py | 71 | 12923 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DataFeeder`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class DataFeederTest(test.TestCase):
# pylint: disable=undefined-variable
"""Tests for `DataFeeder`."""
def _wrap_dict(self, data, prepend=''):
return {prepend + '1': data, prepend + '2': data}
def _assert_raises(self, input_data):
with self.assertRaisesRegexp(TypeError, 'annot convert'):
data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
def test_input_uint32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint32)
self._assert_raises(data)
self._assert_raises(self._wrap_dict(data))
def test_input_uint64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint64)
self._assert_raises(data)
self._assert_raises(self._wrap_dict(data))
def _assert_dtype(self, expected_np_dtype, expected_tf_dtype, input_data):
feeder = data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
if isinstance(input_data, dict):
for k, v in list(feeder.input_dtype.items()):
self.assertEqual(expected_np_dtype, v)
else:
self.assertEqual(expected_np_dtype, feeder.input_dtype)
with ops.Graph().as_default() as g, self.test_session(g):
inp, _ = feeder.input_builder()
if isinstance(inp, dict):
for k, v in list(inp.items()):
self.assertEqual(expected_tf_dtype, v.dtype)
else:
self.assertEqual(expected_tf_dtype, inp.dtype)
def test_input_int8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int8)
self._assert_dtype(np.int8, dtypes.int8, data)
self._assert_dtype(np.int8, dtypes.int8, self._wrap_dict(data))
def test_input_int16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int16)
self._assert_dtype(np.int16, dtypes.int16, data)
self._assert_dtype(np.int16, dtypes.int16, self._wrap_dict(data))
def test_input_int32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int32)
self._assert_dtype(np.int32, dtypes.int32, data)
self._assert_dtype(np.int32, dtypes.int32, self._wrap_dict(data))
def test_input_int64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int64)
self._assert_dtype(np.int64, dtypes.int64, data)
self._assert_dtype(np.int64, dtypes.int64, self._wrap_dict(data))
def test_input_uint8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint8)
self._assert_dtype(np.uint8, dtypes.uint8, data)
self._assert_dtype(np.uint8, dtypes.uint8, self._wrap_dict(data))
def test_input_uint16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint16)
self._assert_dtype(np.uint16, dtypes.uint16, data)
self._assert_dtype(np.uint16, dtypes.uint16, self._wrap_dict(data))
def test_input_float16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float16)
self._assert_dtype(np.float16, dtypes.float16, data)
self._assert_dtype(np.float16, dtypes.float16, self._wrap_dict(data))
def test_input_float32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float32)
self._assert_dtype(np.float32, dtypes.float32, data)
self._assert_dtype(np.float32, dtypes.float32, self._wrap_dict(data))
def test_input_float64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float64)
self._assert_dtype(np.float64, dtypes.float64, data)
self._assert_dtype(np.float64, dtypes.float64, self._wrap_dict(data))
def test_input_bool(self):
data = np.array([[False for _ in xrange(2)] for _ in xrange(2)])
self._assert_dtype(np.bool, dtypes.bool, data)
self._assert_dtype(np.bool, dtypes.bool, self._wrap_dict(data))
def test_input_string(self):
input_data = np.array([['str%d' % i for i in xrange(2)] for _ in xrange(2)])
self._assert_dtype(input_data.dtype, dtypes.string, input_data)
self._assert_dtype(input_data.dtype, dtypes.string,
self._wrap_dict(input_data))
def _assertAllClose(self, src, dest, src_key_of=None, src_prop=None):
def func(x):
val = getattr(x, src_prop) if src_prop else x
return val if src_key_of is None else src_key_of[val]
if isinstance(src, dict):
for k in list(src.keys()):
self.assertAllClose(func(src[k]), dest)
else:
self.assertAllClose(func(src), dest)
def test_unsupervised(self):
def func(feeder):
with self.test_session():
inp, _ = feeder.input_builder()
feed_dict_fn = feeder.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[1, 2]], feed_dict, 'name')
data = np.matrix([[1, 2], [2, 3], [3, 4]])
func(data_feeder.DataFeeder(data, None, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data), None, n_classes=0, batch_size=1))
def test_data_feeder_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [2, 1], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
def test_epoch(self):
def func(feeder):
with self.test_session():
feeder.input_builder()
epoch = feeder.make_epoch_variable()
feed_dict_fn = feeder.get_feed_dict_fn()
# First input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Second input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Third input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Back to the first input again, so new epoch.
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [1])
data = np.matrix([[1, 2], [2, 3], [3, 4]])
labels = np.array([0, 0, 1])
func(data_feeder.DataFeeder(data, labels, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data, 'in'),
self._wrap_dict(labels, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=1))
def test_data_feeder_multioutput_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [[3, 4], [1, 2]], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[1, 2], [3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
def test_data_feeder_multioutput_classification(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(
out, [[[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]], feed_dict,
'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[0, 1, 2], [2, 3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=5, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(5, 'out'),
batch_size=2))
def test_streaming_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[[1, 2]], [[3, 4]]], feed_dict, 'name')
self._assertAllClose(out, [[[1], [2]], [[2], [2]]], feed_dict, 'name')
def x_iter(wrap_dict=False):
yield np.array([[1, 2]]) if not wrap_dict else self._wrap_dict(
np.array([[1, 2]]), 'in')
yield np.array([[3, 4]]) if not wrap_dict else self._wrap_dict(
np.array([[3, 4]]), 'in')
def y_iter(wrap_dict=False):
yield np.array([[1], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[1], [2]]), 'out')
yield np.array([[2], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[2], [2]]), 'out')
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=2))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
# Test non-full batches.
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=10))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=10))
def test_dask_data_feeder(self):
if HAS_PANDAS and HAS_DASK:
x = pd.DataFrame(
dict(
a=np.array([.1, .3, .4, .6, .2, .1, .6]),
b=np.array([.7, .8, .1, .2, .5, .3, .9])))
x = dd.from_pandas(x, npartitions=2)
y = pd.DataFrame(dict(labels=np.array([1, 0, 2, 1, 0, 1, 2])))
y = dd.from_pandas(y, npartitions=2)
# TODO(ipolosukhin): Remove or restore this.
# x = extract_dask_data(x)
# y = extract_dask_labels(y)
df = data_feeder.DaskDataFeeder(x, y, n_classes=2, batch_size=2)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[0.40000001, 0.1],
[0.60000002, 0.2]])
self.assertAllClose(feed_dict[out.name], [[0., 0., 1.], [0., 1., 0.]])
def test_hdf5_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self.assertAllClose(out, [2, 1], feed_dict, 'name')
try:
import h5py # pylint: disable=g-import-not-at-top
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
h5f = h5py.File('test_hdf5.h5', 'w')
h5f.create_dataset('x', data=x)
h5f.create_dataset('y', data=y)
h5f.close()
h5f = h5py.File('test_hdf5.h5', 'r')
x = h5f['x']
y = h5f['y']
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
except ImportError:
print("Skipped test for hdf5 since it's not installed.")
class SetupPredictDataFeederTest(DataFeederTest):
"""Tests for `DataFeeder.setup_predict_data_feeder`."""
def test_iterable_data(self):
# pylint: disable=undefined-variable
def func(df):
self._assertAllClose(six.next(df), [[1, 2], [3, 4]])
self._assertAllClose(six.next(df), [[5, 6]])
data = [[1, 2], [3, 4], [5, 6]]
x = iter(data)
x_dict = iter([self._wrap_dict(v) for v in iter(data)])
func(data_feeder.setup_predict_data_feeder(x, batch_size=2))
func(data_feeder.setup_predict_data_feeder(x_dict, batch_size=2))
if __name__ == '__main__':
test.main()
| apache-2.0 | -3,217,039,612,851,633,000 | 35.609065 | 80 | 0.589337 | false |
creyesp/RF_Estimation | Clustering/clustering/gmm.py | 2 | 6063 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# SpectralClustering.py
#
# Copyright 2014 Carlos "casep" Sepulveda <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# Performs SpectralClustering using scikit-learn
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '../..','LIB'))
import rfestimationLib as rfe
import argparse #argument parsing
import numpy as np
import scipy.ndimage
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from sklearn import mixture
from sklearn import metrics
clustersColours = ['#fcfa00', '#ff0000', '#820c2c', '#ff006f', '#af00ff','#0200ff','#008dff','#00e8ff','#0c820e','#28ea04','#ea8404','#c8628f','#6283ff','#5b6756','#0c8248','k','#820cff','#932c11','#002c11','#829ca7']
def main():
parser = argparse.ArgumentParser(prog='kmeans_scikit.py',
description='Performs K-means using scikit-learn',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--sourceFolder',
help='Source folder',
type=str, required=True)
parser.add_argument('--outputFolder',
help='Output folder',
type=str, required=True)
parser.add_argument('--clustersNumber',
help='Number of clusters',
type=int, default='5', choices=[3,4,5,6,7,8,9,10,11,12,13,14,15], required=False)
parser.add_argument('--framesNumber',
help='Number of frames used in STA analysis',
type=int, default='20', required=False)
parser.add_argument('--blockSize',
help='Size of each block in micrometres',
type=int, default='50', required=False)
args = parser.parse_args()
#Source folder of the files with the timestamps
sourceFolder = rfe.fixPath(args.sourceFolder)
if not os.path.exists(sourceFolder):
print ''
print 'Source folder does not exists ' + sourceFolder
sys.exit()
#Output folder for the graphics
outputFolder = rfe.fixPath(args.outputFolder)
if not os.path.exists(outputFolder):
try:
os.makedirs(outputFolder)
except:
print ''
print 'Unable to create folder ' + outputFolder
sys.exit()
#Clusters number for the kmeans algorithm
clustersNumber = args.clustersNumber
#Frames used in STA analysis
framesNumber = args.framesNumber
#Size of each block in micrometres
blockSize = args.blockSize
#dataCluster stores the data to be used for the clustering process
#the size is equal to the number of frames, aka, the time component
#plus 7 as we are incorporating the 2 dimensions of the ellipse,
#2 dimensions of the ellipse on micrometres,
#x position, y position and angle
dataCluster = np.zeros((1,framesNumber+7))
units=[]
dato=np.zeros((1,1))
for unitFile in os.listdir(sourceFolder):
if os.path.isdir(sourceFolder+unitFile):
unitName = unitFile.rsplit('_', 1)[0]
dataUnit, coordinates = rfe.loadSTACurve(sourceFolder,unitFile,unitName)
xSize = dataUnit.shape[0]
ySize = dataUnit.shape[1]
fitResult = rfe.loadFitMatrix(sourceFolder,unitFile)
#should we use the not-gaussian-fitted data for clustering?
dataUnitGauss = scipy.ndimage.gaussian_filter(dataUnit[coordinates[0][0],[coordinates[1][0]],:],2)
#A radius of the RF ellipse, adjusted to micrometres
dato[0] = blockSize * fitResult[0][2]
dataUnitCompleta = np.concatenate((dataUnitGauss,dato),1)
#B radius of the RF ellipse, adjusted to micrometres
dato[0] = blockSize * fitResult[0][3]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#A radius of the RF ellipse
dato[0] = fitResult[0][2]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#B radius of the RF ellipse
dato[0] = fitResult[0][3]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#angle of the RF ellipse
dato[0] = fitResult[0][1]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#X coordinate of the RF ellipse
dato[0] = fitResult[0][4]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
#Y coordinate of the RF ellipse
dato[0] = fitResult[0][5]
dataUnitCompleta = np.concatenate((dataUnitCompleta,dato),1)
dataCluster = np.append(dataCluster,dataUnitCompleta, axis=0)
units.append(unitName)
# remove the first row of zeroes
dataCluster = dataCluster[1:,:]
data = dataCluster[:,0:framesNumber+2]
gmix = mixture.GMM(n_components=clustersNumber, covariance_type='full')
gmix.fit(data)
labels = gmix.predict(data)
fit = metrics.silhouette_score(data, labels, metric='euclidean')
rfe.graficaCluster(labels, dataCluster[:,0:framesNumber-1], outputFolder+'gmm.png', clustersColours, fit)
# generate graphics of all ellipses
for clusterId in range(clustersNumber):
dataGrilla = np.zeros((1,framesNumber+7))
for unitId in range(dataCluster.shape[0]):
if labels[unitId] == clusterId:
datos=np.zeros((1,framesNumber+7))
datos[0]=dataCluster[unitId,:]
dataGrilla = np.append(dataGrilla,datos, axis=0)
## remove the first row of zeroes
dataGrilla = dataGrilla[1:,:]
rfe.graficaGrilla(dataGrilla, outputFolder+'Grilla_'+str(clusterId)+'.png', clustersColours[clusterId], framesNumber, xSize, ySize)
rfe.graficaCluster(labels, dataGrilla[:,0:framesNumber-1], outputFolder+'cluster_'+str(clusterId)+'.png', clustersColours[clusterId])
rfe.guardaClustersIDs(outputFolder, units, labels, outputFolder+'clusterings.csv')
return 0
if __name__ == '__main__':
main()
| gpl-2.0 | 2,732,410,993,761,357,300 | 37.132075 | 217 | 0.729177 | false |
txamqp/txamqp | src/examples/simple/txconsumer.py | 1 | 2216 | from twisted.internet.defer import inlineCallbacks
from twisted.internet import reactor
from twisted.internet.protocol import ClientCreator
from twisted.python import log
from txamqp.protocol import AMQClient
from txamqp.client import TwistedDelegate
import txamqp.spec
@inlineCallbacks
def gotConnection(conn, username, password):
print("Connected to broker.")
yield conn.authenticate(username, password)
print("Authenticated. Ready to receive messages")
chan = yield conn.channel(1)
yield chan.channel_open()
yield chan.queue_declare(queue="chatrooms", durable=True, exclusive=False, auto_delete=False)
yield chan.exchange_declare(exchange="chatservice", type="direct", durable=True, auto_delete=False)
yield chan.queue_bind(queue="chatrooms", exchange="chatservice", routing_key="txamqp_chatroom")
yield chan.basic_consume(queue='chatrooms', no_ack=True, consumer_tag="testtag")
queue = yield conn.queue("testtag")
while True:
msg = yield queue.get()
print('Received: {0} from channel #{1}'.format(msg.content.body, chan.id))
if msg.content.body == "STOP":
break
yield chan.basic_cancel("testtag")
yield chan.channel_close()
chan0 = yield conn.channel(0)
yield chan0.connection_close()
reactor.stop()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Example consumer",
usage="%(prog)s localhost 5672 / guest guest ../../specs/standard/amqp0-8.stripped.xml"
)
parser.add_argument('host')
parser.add_argument('port', type=int)
parser.add_argument('vhost')
parser.add_argument('username')
parser.add_argument('password')
parser.add_argument('spec_path')
args = parser.parse_args()
spec = txamqp.spec.load(args.spec_path)
delegate = TwistedDelegate()
d = ClientCreator(reactor, AMQClient, delegate=delegate, vhost=args.vhost,
spec=spec).connectTCP(args.host, args.port)
d.addCallback(gotConnection, args.username, args.password)
def whoops(err):
if reactor.running:
log.err(err)
reactor.stop()
d.addErrback(whoops)
reactor.run()
| apache-2.0 | -6,316,527,555,034,640,000 | 28.157895 | 103 | 0.689531 | false |
platformio/platformio-core | platformio/debug/exception.py | 2 | 1138 | # Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from platformio.exception import PlatformioException, UserSideException
class DebugError(PlatformioException):
pass
class DebugSupportError(DebugError, UserSideException):
MESSAGE = (
"Currently, PlatformIO does not support debugging for `{0}`.\n"
"Please request support at https://github.com/platformio/"
"platformio-core/issues \nor visit -> https://docs.platformio.org"
"/page/plus/debugging.html"
)
class DebugInvalidOptionsError(DebugError, UserSideException):
pass
| apache-2.0 | -490,257,605,046,786,700 | 33.484848 | 74 | 0.746924 | false |
Alkemic/yaCBV | yacbv/mixins.py | 1 | 1041 | from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.http.response import HttpResponseNotAllowed
class HttpMethodRestrictMixin(object):
allowed_methods = [
'options', 'get', 'head', 'post',
'put', 'delete', 'trace', 'connect',
]
def dispatch(self, request, *args, **kwargs):
method_name = request.method.lower()
if method_name not in self.allowed_methods:
return HttpResponseNotAllowed(self.allowed_methods)
super(HttpMethodRestrictMixin, self).dispatch(request, *args, **kwargs)
class TemplateMixin(object):
"""In this case, method must return dict, that is used to populate data
in template"""
template_name = None
def dispatch(self, request, *args, **kwargs):
context = super(TemplateMixin, self).dispatch(request, *args, **kwargs)
return render_to_response(
self.template_name,
context,
context_instance=RequestContext(request),
)
| mit | -3,343,326,777,970,065,400 | 31.53125 | 79 | 0.664745 | false |
slipstream/SlipStreamClient | client/src/external/chardet/mbcssm.py | 289 | 25481 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .enums import MachineState
# BIG5
BIG5_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
4,4,4,4,4,4,4,4, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
4,3,3,3,3,3,3,3, # a0 - a7
3,3,3,3,3,3,3,3, # a8 - af
3,3,3,3,3,3,3,3, # b0 - b7
3,3,3,3,3,3,3,3, # b8 - bf
3,3,3,3,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
BIG5_ST = (
MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,#08-0f
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START#10-17
)
BIG5_CHAR_LEN_TABLE = (0, 1, 1, 2, 0)
BIG5_SM_MODEL = {'class_table': BIG5_CLS,
'class_factor': 5,
'state_table': BIG5_ST,
'char_len_table': BIG5_CHAR_LEN_TABLE,
'name': 'Big5'}
# CP949
CP949_CLS = (
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f
1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f
1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f
4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f
1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f
5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f
0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f
6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f
6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af
7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf
7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff
)
CP949_ST = (
#cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START, 4, 5,MachineState.ERROR, 6, # MachineState.START
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, # MachineState.ERROR
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME, # MachineState.ITS_ME
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 3
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 4
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 5
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 6
)
CP949_CHAR_LEN_TABLE = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
CP949_SM_MODEL = {'class_table': CP949_CLS,
'class_factor': 10,
'state_table': CP949_ST,
'char_len_table': CP949_CHAR_LEN_TABLE,
'name': 'CP949'}
# EUC-JP
EUCJP_CLS = (
4,4,4,4,4,4,4,4, # 00 - 07
4,4,4,4,4,4,5,5, # 08 - 0f
4,4,4,4,4,4,4,4, # 10 - 17
4,4,4,5,4,4,4,4, # 18 - 1f
4,4,4,4,4,4,4,4, # 20 - 27
4,4,4,4,4,4,4,4, # 28 - 2f
4,4,4,4,4,4,4,4, # 30 - 37
4,4,4,4,4,4,4,4, # 38 - 3f
4,4,4,4,4,4,4,4, # 40 - 47
4,4,4,4,4,4,4,4, # 48 - 4f
4,4,4,4,4,4,4,4, # 50 - 57
4,4,4,4,4,4,4,4, # 58 - 5f
4,4,4,4,4,4,4,4, # 60 - 67
4,4,4,4,4,4,4,4, # 68 - 6f
4,4,4,4,4,4,4,4, # 70 - 77
4,4,4,4,4,4,4,4, # 78 - 7f
5,5,5,5,5,5,5,5, # 80 - 87
5,5,5,5,5,5,1,3, # 88 - 8f
5,5,5,5,5,5,5,5, # 90 - 97
5,5,5,5,5,5,5,5, # 98 - 9f
5,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,0,5 # f8 - ff
)
EUCJP_ST = (
3, 4, 3, 5,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 3,MachineState.ERROR,#18-1f
3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START#20-27
)
EUCJP_CHAR_LEN_TABLE = (2, 2, 2, 3, 1, 0)
EUCJP_SM_MODEL = {'class_table': EUCJP_CLS,
'class_factor': 6,
'state_table': EUCJP_ST,
'char_len_table': EUCJP_CHAR_LEN_TABLE,
'name': 'EUC-JP'}
# EUC-KR
EUCKR_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,3,3,3, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,3,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,0 # f8 - ff
)
EUCKR_ST = (
MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #08-0f
)
EUCKR_CHAR_LEN_TABLE = (0, 1, 2, 0)
EUCKR_SM_MODEL = {'class_table': EUCKR_CLS,
'class_factor': 4,
'state_table': EUCKR_ST,
'char_len_table': EUCKR_CHAR_LEN_TABLE,
'name': 'EUC-KR'}
# EUC-TW
EUCTW_CLS = (
2,2,2,2,2,2,2,2, # 00 - 07
2,2,2,2,2,2,0,0, # 08 - 0f
2,2,2,2,2,2,2,2, # 10 - 17
2,2,2,0,2,2,2,2, # 18 - 1f
2,2,2,2,2,2,2,2, # 20 - 27
2,2,2,2,2,2,2,2, # 28 - 2f
2,2,2,2,2,2,2,2, # 30 - 37
2,2,2,2,2,2,2,2, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,2, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,6,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,3,4,4,4,4,4,4, # a0 - a7
5,5,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,3,1,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
EUCTW_ST = (
MachineState.ERROR,MachineState.ERROR,MachineState.START, 3, 3, 3, 4,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.ERROR,#10-17
MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,#20-27
MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
)
EUCTW_CHAR_LEN_TABLE = (0, 0, 1, 2, 2, 2, 3)
EUCTW_SM_MODEL = {'class_table': EUCTW_CLS,
'class_factor': 7,
'state_table': EUCTW_ST,
'char_len_table': EUCTW_CHAR_LEN_TABLE,
'name': 'x-euc-tw'}
# GB2312
GB2312_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
3,3,3,3,3,3,3,3, # 30 - 37
3,3,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,4, # 78 - 7f
5,6,6,6,6,6,6,6, # 80 - 87
6,6,6,6,6,6,6,6, # 88 - 8f
6,6,6,6,6,6,6,6, # 90 - 97
6,6,6,6,6,6,6,6, # 98 - 9f
6,6,6,6,6,6,6,6, # a0 - a7
6,6,6,6,6,6,6,6, # a8 - af
6,6,6,6,6,6,6,6, # b0 - b7
6,6,6,6,6,6,6,6, # b8 - bf
6,6,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
6,6,6,6,6,6,6,6, # e0 - e7
6,6,6,6,6,6,6,6, # e8 - ef
6,6,6,6,6,6,6,6, # f0 - f7
6,6,6,6,6,6,6,0 # f8 - ff
)
GB2312_ST = (
MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, 3,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,#10-17
4,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#20-27
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
)
# To be accurate, the length of class 6 can be either 2 or 4.
# But it is not necessary to discriminate between the two since
# it is used for frequency analysis only, and we are validating
# each code range there as well. So it is safe to set it to be
# 2 here.
GB2312_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 1, 2)
GB2312_SM_MODEL = {'class_table': GB2312_CLS,
'class_factor': 7,
'state_table': GB2312_ST,
'char_len_table': GB2312_CHAR_LEN_TABLE,
'name': 'GB2312'}
# Shift_JIS
SJIS_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,2,2,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f
#0xa0 is illegal in sjis encoding, but some pages does
#contain such byte. We need to be more error forgiven.
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,0,0,0) # f8 - ff
SJIS_ST = (
MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START #10-17
)
SJIS_CHAR_LEN_TABLE = (0, 1, 1, 2, 0, 0)
SJIS_SM_MODEL = {'class_table': SJIS_CLS,
'class_factor': 6,
'state_table': SJIS_ST,
'char_len_table': SJIS_CHAR_LEN_TABLE,
'name': 'Shift_JIS'}
# UCS2-BE
UCS2BE_CLS = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2BE_ST = (
5, 7, 7,MachineState.ERROR, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME, 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,#10-17
6, 6, 6, 6, 6,MachineState.ITS_ME, 6, 6,#18-1f
6, 6, 6, 6, 5, 7, 7,MachineState.ERROR,#20-27
5, 8, 6, 6,MachineState.ERROR, 6, 6, 6,#28-2f
6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #30-37
)
UCS2BE_CHAR_LEN_TABLE = (2, 2, 2, 0, 2, 2)
UCS2BE_SM_MODEL = {'class_table': UCS2BE_CLS,
'class_factor': 6,
'state_table': UCS2BE_ST,
'char_len_table': UCS2BE_CHAR_LEN_TABLE,
'name': 'UTF-16BE'}
# UCS2-LE
UCS2LE_CLS = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2LE_ST = (
6, 6, 7, 6, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
MachineState.ITS_ME,MachineState.ITS_ME, 5, 5, 5,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#10-17
5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR, 6, 6,#18-1f
7, 6, 8, 8, 5, 5, 5,MachineState.ERROR,#20-27
5, 5, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5,#28-2f
5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR,MachineState.START,MachineState.START #30-37
)
UCS2LE_CHAR_LEN_TABLE = (2, 2, 2, 2, 2, 2)
UCS2LE_SM_MODEL = {'class_table': UCS2LE_CLS,
'class_factor': 6,
'state_table': UCS2LE_ST,
'char_len_table': UCS2LE_CHAR_LEN_TABLE,
'name': 'UTF-16LE'}
# UTF-8
UTF8_CLS = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
2,2,2,2,3,3,3,3, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
5,5,5,5,5,5,5,5, # a0 - a7
5,5,5,5,5,5,5,5, # a8 - af
5,5,5,5,5,5,5,5, # b0 - b7
5,5,5,5,5,5,5,5, # b8 - bf
0,0,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
7,8,8,8,8,8,8,8, # e0 - e7
8,8,8,8,8,9,8,8, # e8 - ef
10,11,11,11,11,11,11,11, # f0 - f7
12,13,13,13,14,15,0,0 # f8 - ff
)
UTF8_ST = (
MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12, 10,#00-07
9, 11, 8, 7, 6, 5, 4, 3,#08-0f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#20-27
MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#28-2f
MachineState.ERROR,MachineState.ERROR, 5, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#30-37
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#38-3f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#40-47
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#48-4f
MachineState.ERROR,MachineState.ERROR, 7, 7, 7, 7,MachineState.ERROR,MachineState.ERROR,#50-57
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#58-5f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 7, 7,MachineState.ERROR,MachineState.ERROR,#60-67
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#68-6f
MachineState.ERROR,MachineState.ERROR, 9, 9, 9, 9,MachineState.ERROR,MachineState.ERROR,#70-77
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#78-7f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 9,MachineState.ERROR,MachineState.ERROR,#80-87
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#88-8f
MachineState.ERROR,MachineState.ERROR, 12, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,#90-97
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#98-9f
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12,MachineState.ERROR,MachineState.ERROR,#a0-a7
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#a8-af
MachineState.ERROR,MachineState.ERROR, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b0-b7
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b8-bf
MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,#c0-c7
MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR #c8-cf
)
UTF8_CHAR_LEN_TABLE = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
UTF8_SM_MODEL = {'class_table': UTF8_CLS,
'class_factor': 16,
'state_table': UTF8_ST,
'char_len_table': UTF8_CHAR_LEN_TABLE,
'name': 'UTF-8'}
| apache-2.0 | -6,302,124,043,890,133,000 | 43.547203 | 226 | 0.581296 | false |
bbenko/shinkicker | django/contrib/localflavor/in_/forms.py | 309 | 1741 | """
India-specific Form helpers.
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.encoding import smart_unicode
from django.utils.translation import gettext
import re
class INZipCodeField(RegexField):
default_error_messages = {
'invalid': gettext(u'Enter a zip code in the format XXXXXXX.'),
}
def __init__(self, *args, **kwargs):
super(INZipCodeField, self).__init__(r'^\d{6}$',
max_length=None, min_length=None, *args, **kwargs)
class INStateField(Field):
"""
A form field that validates its input is a Indian state name or
abbreviation. It normalizes the input to the standard two-letter vehicle
registration abbreviation for the given state or union territory
"""
default_error_messages = {
'invalid': u'Enter a Indian state or territory.',
}
def clean(self, value):
from in_states import STATES_NORMALIZED
super(INStateField, self).clean(value)
if value in EMPTY_VALUES:
return u''
try:
value = value.strip().lower()
except AttributeError:
pass
else:
try:
return smart_unicode(STATES_NORMALIZED[value.strip().lower()])
except KeyError:
pass
raise ValidationError(self.error_messages['invalid'])
class INStateSelect(Select):
"""
A Select widget that uses a list of Indian states/territories as its
choices.
"""
def __init__(self, attrs=None):
from in_states import STATE_CHOICES
super(INStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
| bsd-3-clause | 5,026,612,602,488,794,000 | 30.089286 | 78 | 0.646755 | false |
michaelwu/servo | components/script/dom/bindings/codegen/Configuration.py | 6 | 13290 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from WebIDL import IDLInterface
autogenerated_comment = "/* THIS FILE IS AUTOGENERATED - DO NOT EDIT */\n"
class Configuration:
"""
Represents global configuration state based on IDL parse data and
the configuration file.
"""
def __init__(self, filename, parseData):
# Read the configuration file.
glbl = {}
execfile(filename, glbl)
config = glbl['DOMInterfaces']
# Build descriptors for all the interfaces we have in the parse data.
# This allows callers to specify a subset of interfaces by filtering
# |parseData|.
self.descriptors = []
self.interfaces = {}
self.maxProtoChainLength = 0;
for thing in parseData:
# Some toplevel things are sadly types, and those have an
# isInterface that doesn't mean the same thing as IDLObject's
# isInterface()...
if not isinstance(thing, IDLInterface):
continue
iface = thing
self.interfaces[iface.identifier.name] = iface
if iface.identifier.name not in config:
# Completely skip consequential interfaces with no descriptor
# if they have no interface object because chances are we
# don't need to do anything interesting with them.
if iface.isConsequential() and not iface.hasInterfaceObject():
continue
entry = {}
else:
entry = config[iface.identifier.name]
if not isinstance(entry, list):
assert isinstance(entry, dict)
entry = [entry]
self.descriptors.extend([Descriptor(self, iface, x) for x in entry])
# Mark the descriptors for which only a single nativeType implements
# an interface.
for descriptor in self.descriptors:
intefaceName = descriptor.interface.identifier.name
otherDescriptors = [d for d in self.descriptors
if d.interface.identifier.name == intefaceName]
descriptor.uniqueImplementation = len(otherDescriptors) == 1
self.enums = [e for e in parseData if e.isEnum()]
self.dictionaries = [d for d in parseData if d.isDictionary()]
self.callbacks = [c for c in parseData if
c.isCallback() and not c.isInterface()]
# Keep the descriptor list sorted for determinism.
self.descriptors.sort(lambda x,y: cmp(x.name, y.name))
def getInterface(self, ifname):
return self.interfaces[ifname]
def getDescriptors(self, **filters):
"""Gets the descriptors that match the given filters."""
curr = self.descriptors
for key, val in filters.iteritems():
if key == 'webIDLFile':
getter = lambda x: x.interface.filename()
elif key == 'hasInterfaceObject':
getter = lambda x: x.interface.hasInterfaceObject()
elif key == 'isCallback':
getter = lambda x: x.interface.isCallback()
elif key == 'isJSImplemented':
getter = lambda x: x.interface.isJSImplemented()
else:
getter = lambda x: getattr(x, key)
curr = filter(lambda x: getter(x) == val, curr)
return curr
def getEnums(self, webIDLFile):
return filter(lambda e: e.filename() == webIDLFile, self.enums)
@staticmethod
def _filterForFile(items, webIDLFile=""):
"""Gets the items that match the given filters."""
if not webIDLFile:
return items
return filter(lambda x: x.filename() == webIDLFile, items)
def getDictionaries(self, webIDLFile=""):
return self._filterForFile(self.dictionaries, webIDLFile=webIDLFile)
def getCallbacks(self, webIDLFile=""):
return self._filterForFile(self.callbacks, webIDLFile=webIDLFile)
def getDescriptor(self, interfaceName):
"""
Gets the appropriate descriptor for the given interface name.
"""
iface = self.getInterface(interfaceName)
descriptors = self.getDescriptors(interface=iface)
# We should have exactly one result.
if len(descriptors) is not 1:
raise NoSuchDescriptorError("For " + interfaceName + " found " +
str(len(matches)) + " matches");
return descriptors[0]
def getDescriptorProvider(self):
"""
Gets a descriptor provider that can provide descriptors as needed.
"""
return DescriptorProvider(self)
class NoSuchDescriptorError(TypeError):
def __init__(self, str):
TypeError.__init__(self, str)
class DescriptorProvider:
"""
A way of getting descriptors for interface names
"""
def __init__(self, config):
self.config = config
def getDescriptor(self, interfaceName):
"""
Gets the appropriate descriptor for the given interface name given the
context of the current descriptor.
"""
return self.config.getDescriptor(interfaceName)
class Descriptor(DescriptorProvider):
"""
Represents a single descriptor for an interface. See Bindings.conf.
"""
def __init__(self, config, interface, desc):
DescriptorProvider.__init__(self, config)
self.interface = interface
# Read the desc, and fill in the relevant defaults.
ifaceName = self.interface.identifier.name
# Callback types do not use JS smart pointers, so we should not use the
# built-in rooting mechanisms for them.
if self.interface.isCallback():
self.needsRooting = False
ty = "%sBinding::%s" % (ifaceName, ifaceName)
self.returnType = ty
self.argumentType = "???"
self.memberType = "???"
self.nativeType = ty
else:
self.needsRooting = True
self.returnType = "Temporary<%s>" % ifaceName
self.argumentType = "JSRef<%s>" % ifaceName
self.memberType = "Root<'a, 'b, %s>" % ifaceName
self.nativeType = "JS<%s>" % ifaceName
self.concreteType = ifaceName
self.register = desc.get('register', True)
self.outerObjectHook = desc.get('outerObjectHook', 'None')
# If we're concrete, we need to crawl our ancestor interfaces and mark
# them as having a concrete descendant.
self.concrete = desc.get('concrete', True)
if self.concrete:
self.proxy = False
operations = {
'IndexedGetter': None,
'IndexedSetter': None,
'IndexedCreator': None,
'IndexedDeleter': None,
'NamedGetter': None,
'NamedSetter': None,
'NamedCreator': None,
'NamedDeleter': None,
'Stringifier': None
}
iface = self.interface
while iface:
for m in iface.members:
if not m.isMethod():
continue
def addOperation(operation, m):
if not operations[operation]:
operations[operation] = m
def addIndexedOrNamedOperation(operation, m):
self.proxy = True
if m.isIndexed():
operation = 'Indexed' + operation
else:
assert m.isNamed()
operation = 'Named' + operation
addOperation(operation, m)
if m.isStringifier():
addOperation('Stringifier', m)
else:
if m.isGetter():
addIndexedOrNamedOperation('Getter', m)
if m.isSetter():
addIndexedOrNamedOperation('Setter', m)
if m.isCreator():
addIndexedOrNamedOperation('Creator', m)
if m.isDeleter():
addIndexedOrNamedOperation('Deleter', m)
iface.setUserData('hasConcreteDescendant', True)
iface = iface.parent
if self.proxy:
self.operations = operations
iface = self.interface
while iface:
iface.setUserData('hasProxyDescendant', True)
iface = iface.parent
self.name = interface.identifier.name
# self.extendedAttributes is a dict of dicts, keyed on
# all/getterOnly/setterOnly and then on member name. Values are an
# array of extended attributes.
self.extendedAttributes = { 'all': {}, 'getterOnly': {}, 'setterOnly': {} }
def addExtendedAttribute(attribute, config):
def add(key, members, attribute):
for member in members:
self.extendedAttributes[key].setdefault(member, []).append(attribute)
if isinstance(config, dict):
for key in ['all', 'getterOnly', 'setterOnly']:
add(key, config.get(key, []), attribute)
elif isinstance(config, list):
add('all', config, attribute)
else:
assert isinstance(config, str)
if config == '*':
iface = self.interface
while iface:
add('all', map(lambda m: m.name, iface.members), attribute)
iface = iface.parent
else:
add('all', [config], attribute)
# Build the prototype chain.
self.prototypeChain = []
parent = interface
while parent:
self.prototypeChain.insert(0, parent.identifier.name)
parent = parent.parent
config.maxProtoChainLength = max(config.maxProtoChainLength,
len(self.prototypeChain))
def getExtendedAttributes(self, member, getter=False, setter=False):
def maybeAppendInfallibleToAttrs(attrs, throws):
if throws is None:
attrs.append("infallible")
elif throws is True:
pass
else:
raise TypeError("Unknown value for 'Throws'")
name = member.identifier.name
if member.isMethod():
attrs = self.extendedAttributes['all'].get(name, [])
throws = member.getExtendedAttribute("Throws")
maybeAppendInfallibleToAttrs(attrs, throws)
return attrs
assert member.isAttr()
assert bool(getter) != bool(setter)
key = 'getterOnly' if getter else 'setterOnly'
attrs = self.extendedAttributes['all'].get(name, []) + self.extendedAttributes[key].get(name, [])
throws = member.getExtendedAttribute("Throws")
if throws is None:
throwsAttr = "GetterThrows" if getter else "SetterThrows"
throws = member.getExtendedAttribute(throwsAttr)
maybeAppendInfallibleToAttrs(attrs, throws)
return attrs
def isGlobal(self):
"""
Returns true if this is the primary interface for a global object
of some sort.
"""
return (self.interface.getExtendedAttribute("Global") or
self.interface.getExtendedAttribute("PrimaryGlobal"))
# Some utility methods
def getTypesFromDescriptor(descriptor):
"""
Get all argument and return types for all members of the descriptor
"""
members = [m for m in descriptor.interface.members]
if descriptor.interface.ctor():
members.append(descriptor.interface.ctor())
members.extend(descriptor.interface.namedConstructors)
signatures = [s for m in members if m.isMethod() for s in m.signatures()]
types = []
for s in signatures:
assert len(s) == 2
(returnType, arguments) = s
types.append(returnType)
types.extend(a.type for a in arguments)
types.extend(a.type for a in members if a.isAttr())
return types
def getFlatTypes(types):
retval = set()
for type in types:
type = type.unroll()
if type.isUnion():
retval |= set(type.flatMemberTypes)
else:
retval.add(type)
return retval
def getTypesFromDictionary(dictionary):
"""
Get all member types for this dictionary
"""
types = []
curDict = dictionary
while curDict:
types.extend([m.type for m in curDict.members])
curDict = curDict.parent
return types
def getTypesFromCallback(callback):
"""
Get the types this callback depends on: its return type and the
types of its arguments.
"""
sig = callback.signatures()[0]
types = [sig[0]] # Return type
types.extend(arg.type for arg in sig[1]) # Arguments
return types
| mpl-2.0 | 5,403,474,975,054,670,000 | 37.746356 | 105 | 0.572009 | false |
Turlough/keyczar | cpp/src/tools/swtoolkit/site_scons/site_tools/collada_dom.py | 12 | 1830 | #!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Collada DOM 1.3.0 tool for SCons."""
import sys
def generate(env):
# NOTE: SCons requires the use of this name, which fails gpylint.
"""SCons entry point for this tool."""
env.Append(CCFLAGS=[
'-I$COLLADA_DIR/include',
'-I$COLLADA_DIR/include/1.4',
])
| apache-2.0 | -2,211,243,350,085,568,800 | 40.590909 | 72 | 0.75847 | false |
wakatime/wakatime | wakatime/packages/py26/pygments/lexers/configs.py | 25 | 28266 | # -*- coding: utf-8 -*-
"""
pygments.lexers.configs
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for configuration file formats.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, default, words, bygroups, include, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace, Literal
from pygments.lexers.shell import BashLexer
__all__ = ['IniLexer', 'RegeditLexer', 'PropertiesLexer', 'KconfigLexer',
'Cfengine3Lexer', 'ApacheConfLexer', 'SquidConfLexer',
'NginxConfLexer', 'LighttpdConfLexer', 'DockerLexer',
'TerraformLexer', 'TermcapLexer', 'TerminfoLexer',
'PkgConfigLexer', 'PacmanConfLexer']
class IniLexer(RegexLexer):
"""
Lexer for configuration files in INI style.
"""
name = 'INI'
aliases = ['ini', 'cfg', 'dosini']
filenames = ['*.ini', '*.cfg', '*.inf']
mimetypes = ['text/x-ini', 'text/inf']
tokens = {
'root': [
(r'\s+', Text),
(r'[;#].*', Comment.Single),
(r'\[.*?\]$', Keyword),
(r'(.*?)([ \t]*)(=)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Text, Operator, Text, String)),
# standalone option, supported by some INI parsers
(r'(.+?)$', Name.Attribute),
],
}
def analyse_text(text):
npos = text.find('\n')
if npos < 3:
return False
return text[0] == '[' and text[npos-1] == ']'
class RegeditLexer(RegexLexer):
"""
Lexer for `Windows Registry
<http://en.wikipedia.org/wiki/Windows_Registry#.REG_files>`_ files produced
by regedit.
.. versionadded:: 1.6
"""
name = 'reg'
aliases = ['registry']
filenames = ['*.reg']
mimetypes = ['text/x-windows-registry']
tokens = {
'root': [
(r'Windows Registry Editor.*', Text),
(r'\s+', Text),
(r'[;#].*', Comment.Single),
(r'(\[)(-?)(HKEY_[A-Z_]+)(.*?\])$',
bygroups(Keyword, Operator, Name.Builtin, Keyword)),
# String keys, which obey somewhat normal escaping
(r'("(?:\\"|\\\\|[^"])+")([ \t]*)(=)([ \t]*)',
bygroups(Name.Attribute, Text, Operator, Text),
'value'),
# Bare keys (includes @)
(r'(.*?)([ \t]*)(=)([ \t]*)',
bygroups(Name.Attribute, Text, Operator, Text),
'value'),
],
'value': [
(r'-', Operator, '#pop'), # delete value
(r'(dword|hex(?:\([0-9a-fA-F]\))?)(:)([0-9a-fA-F,]+)',
bygroups(Name.Variable, Punctuation, Number), '#pop'),
# As far as I know, .reg files do not support line continuation.
(r'.+', String, '#pop'),
default('#pop'),
]
}
def analyse_text(text):
return text.startswith('Windows Registry Editor')
class PropertiesLexer(RegexLexer):
"""
Lexer for configuration files in Java's properties format.
Note: trailing whitespace counts as part of the value as per spec
.. versionadded:: 1.4
"""
name = 'Properties'
aliases = ['properties', 'jproperties']
filenames = ['*.properties']
mimetypes = ['text/x-java-properties']
tokens = {
'root': [
(r'^(\w+)([ \t])(\w+\s*)$', bygroups(Name.Attribute, Text, String)),
(r'^\w+(\\[ \t]\w*)*$', Name.Attribute),
(r'(^ *)([#!].*)', bygroups(Text, Comment)),
# More controversial comments
(r'(^ *)((?:;|//).*)', bygroups(Text, Comment)),
(r'(.*?)([ \t]*)([=:])([ \t]*)(.*(?:(?<=\\)\n.*)*)',
bygroups(Name.Attribute, Text, Operator, Text, String)),
(r'\s', Text),
],
}
def _rx_indent(level):
# Kconfig *always* interprets a tab as 8 spaces, so this is the default.
# Edit this if you are in an environment where KconfigLexer gets expanded
# input (tabs expanded to spaces) and the expansion tab width is != 8,
# e.g. in connection with Trac (trac.ini, [mimeviewer], tab_width).
# Value range here is 2 <= {tab_width} <= 8.
tab_width = 8
# Regex matching a given indentation {level}, assuming that indentation is
# a multiple of {tab_width}. In other cases there might be problems.
if tab_width == 2:
space_repeat = '+'
else:
space_repeat = '{1,%d}' % (tab_width - 1)
if level == 1:
level_repeat = ''
else:
level_repeat = '{%s}' % level
return r'(?:\t| %s\t| {%s})%s.*\n' % (space_repeat, tab_width, level_repeat)
class KconfigLexer(RegexLexer):
"""
For Linux-style Kconfig files.
.. versionadded:: 1.6
"""
name = 'Kconfig'
aliases = ['kconfig', 'menuconfig', 'linux-config', 'kernel-config']
# Adjust this if new kconfig file names appear in your environment
filenames = ['Kconfig', '*Config.in*', 'external.in*',
'standard-modules.in']
mimetypes = ['text/x-kconfig']
# No re.MULTILINE, indentation-aware help text needs line-by-line handling
flags = 0
def call_indent(level):
# If indentation >= {level} is detected, enter state 'indent{level}'
return (_rx_indent(level), String.Doc, 'indent%s' % level)
def do_indent(level):
# Print paragraphs of indentation level >= {level} as String.Doc,
# ignoring blank lines. Then return to 'root' state.
return [
(_rx_indent(level), String.Doc),
(r'\s*\n', Text),
default('#pop:2')
]
tokens = {
'root': [
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(words((
'mainmenu', 'config', 'menuconfig', 'choice', 'endchoice',
'comment', 'menu', 'endmenu', 'visible if', 'if', 'endif',
'source', 'prompt', 'select', 'depends on', 'default',
'range', 'option'), suffix=r'\b'),
Keyword),
(r'(---help---|help)[\t ]*\n', Keyword, 'help'),
(r'(bool|tristate|string|hex|int|defconfig_list|modules|env)\b',
Name.Builtin),
(r'[!=&|]', Operator),
(r'[()]', Punctuation),
(r'[0-9]+', Number.Integer),
(r"'(''|[^'])*'", String.Single),
(r'"(""|[^"])*"', String.Double),
(r'\S+', Text),
],
# Help text is indented, multi-line and ends when a lower indentation
# level is detected.
'help': [
# Skip blank lines after help token, if any
(r'\s*\n', Text),
# Determine the first help line's indentation level heuristically(!).
# Attention: this is not perfect, but works for 99% of "normal"
# indentation schemes up to a max. indentation level of 7.
call_indent(7),
call_indent(6),
call_indent(5),
call_indent(4),
call_indent(3),
call_indent(2),
call_indent(1),
default('#pop'), # for incomplete help sections without text
],
# Handle text for indentation levels 7 to 1
'indent7': do_indent(7),
'indent6': do_indent(6),
'indent5': do_indent(5),
'indent4': do_indent(4),
'indent3': do_indent(3),
'indent2': do_indent(2),
'indent1': do_indent(1),
}
class Cfengine3Lexer(RegexLexer):
"""
Lexer for `CFEngine3 <http://cfengine.org>`_ policy files.
.. versionadded:: 1.5
"""
name = 'CFEngine3'
aliases = ['cfengine3', 'cf3']
filenames = ['*.cf']
mimetypes = []
tokens = {
'root': [
(r'#.*?\n', Comment),
(r'(body)(\s+)(\S+)(\s+)(control)',
bygroups(Keyword, Text, Keyword, Text, Keyword)),
(r'(body|bundle)(\s+)(\S+)(\s+)(\w+)(\()',
bygroups(Keyword, Text, Keyword, Text, Name.Function, Punctuation),
'arglist'),
(r'(body|bundle)(\s+)(\S+)(\s+)(\w+)',
bygroups(Keyword, Text, Keyword, Text, Name.Function)),
(r'(")([^"]+)(")(\s+)(string|slist|int|real)(\s*)(=>)(\s*)',
bygroups(Punctuation, Name.Variable, Punctuation,
Text, Keyword.Type, Text, Operator, Text)),
(r'(\S+)(\s*)(=>)(\s*)',
bygroups(Keyword.Reserved, Text, Operator, Text)),
(r'"', String, 'string'),
(r'(\w+)(\()', bygroups(Name.Function, Punctuation)),
(r'([\w.!&|()]+)(::)', bygroups(Name.Class, Punctuation)),
(r'(\w+)(:)', bygroups(Keyword.Declaration, Punctuation)),
(r'@[{(][^)}]+[})]', Name.Variable),
(r'[(){},;]', Punctuation),
(r'=>', Operator),
(r'->', Operator),
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer),
(r'\w+', Name.Function),
(r'\s+', Text),
],
'string': [
(r'\$[{(]', String.Interpol, 'interpol'),
(r'\\.', String.Escape),
(r'"', String, '#pop'),
(r'\n', String),
(r'.', String),
],
'interpol': [
(r'\$[{(]', String.Interpol, '#push'),
(r'[})]', String.Interpol, '#pop'),
(r'[^${()}]+', String.Interpol),
],
'arglist': [
(r'\)', Punctuation, '#pop'),
(r',', Punctuation),
(r'\w+', Name.Variable),
(r'\s+', Text),
],
}
class ApacheConfLexer(RegexLexer):
"""
Lexer for configuration files following the Apache config file
format.
.. versionadded:: 0.6
"""
name = 'ApacheConf'
aliases = ['apacheconf', 'aconf', 'apache']
filenames = ['.htaccess', 'apache.conf', 'apache2.conf']
mimetypes = ['text/x-apacheconf']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'(#.*?)$', Comment),
(r'(<[^\s>]+)(?:(\s+)(.*?))?(>)',
bygroups(Name.Tag, Text, String, Name.Tag)),
(r'([a-z]\w*)(\s+)',
bygroups(Name.Builtin, Text), 'value'),
(r'\.+', Text),
],
'value': [
(r'\\\n', Text),
(r'$', Text, '#pop'),
(r'\\', Text),
(r'[^\S\n]+', Text),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'\d+', Number),
(r'/([a-z0-9][\w./-]+)', String.Other),
(r'(on|off|none|any|all|double|email|dns|min|minimal|'
r'os|productonly|full|emerg|alert|crit|error|warn|'
r'notice|info|debug|registry|script|inetd|standalone|'
r'user|group)\b', Keyword),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'[^\s"\\]+', Text)
],
}
class SquidConfLexer(RegexLexer):
"""
Lexer for `squid <http://www.squid-cache.org/>`_ configuration files.
.. versionadded:: 0.9
"""
name = 'SquidConf'
aliases = ['squidconf', 'squid.conf', 'squid']
filenames = ['squid.conf']
mimetypes = ['text/x-squidconf']
flags = re.IGNORECASE
keywords = (
"access_log", "acl", "always_direct", "announce_host",
"announce_period", "announce_port", "announce_to", "anonymize_headers",
"append_domain", "as_whois_server", "auth_param_basic",
"authenticate_children", "authenticate_program", "authenticate_ttl",
"broken_posts", "buffered_logs", "cache_access_log", "cache_announce",
"cache_dir", "cache_dns_program", "cache_effective_group",
"cache_effective_user", "cache_host", "cache_host_acl",
"cache_host_domain", "cache_log", "cache_mem", "cache_mem_high",
"cache_mem_low", "cache_mgr", "cachemgr_passwd", "cache_peer",
"cache_peer_access", "cahce_replacement_policy", "cache_stoplist",
"cache_stoplist_pattern", "cache_store_log", "cache_swap",
"cache_swap_high", "cache_swap_log", "cache_swap_low", "client_db",
"client_lifetime", "client_netmask", "connect_timeout", "coredump_dir",
"dead_peer_timeout", "debug_options", "delay_access", "delay_class",
"delay_initial_bucket_level", "delay_parameters", "delay_pools",
"deny_info", "dns_children", "dns_defnames", "dns_nameservers",
"dns_testnames", "emulate_httpd_log", "err_html_text",
"fake_user_agent", "firewall_ip", "forwarded_for", "forward_snmpd_port",
"fqdncache_size", "ftpget_options", "ftpget_program", "ftp_list_width",
"ftp_passive", "ftp_user", "half_closed_clients", "header_access",
"header_replace", "hierarchy_stoplist", "high_response_time_warning",
"high_page_fault_warning", "hosts_file", "htcp_port", "http_access",
"http_anonymizer", "httpd_accel", "httpd_accel_host",
"httpd_accel_port", "httpd_accel_uses_host_header",
"httpd_accel_with_proxy", "http_port", "http_reply_access",
"icp_access", "icp_hit_stale", "icp_port", "icp_query_timeout",
"ident_lookup", "ident_lookup_access", "ident_timeout",
"incoming_http_average", "incoming_icp_average", "inside_firewall",
"ipcache_high", "ipcache_low", "ipcache_size", "local_domain",
"local_ip", "logfile_rotate", "log_fqdn", "log_icp_queries",
"log_mime_hdrs", "maximum_object_size", "maximum_single_addr_tries",
"mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr",
"mcast_miss_encode_key", "mcast_miss_port", "memory_pools",
"memory_pools_limit", "memory_replacement_policy", "mime_table",
"min_http_poll_cnt", "min_icp_poll_cnt", "minimum_direct_hops",
"minimum_object_size", "minimum_retry_timeout", "miss_access",
"negative_dns_ttl", "negative_ttl", "neighbor_timeout",
"neighbor_type_domain", "netdb_high", "netdb_low", "netdb_ping_period",
"netdb_ping_rate", "never_direct", "no_cache", "passthrough_proxy",
"pconn_timeout", "pid_filename", "pinger_program", "positive_dns_ttl",
"prefer_direct", "proxy_auth", "proxy_auth_realm", "query_icmp",
"quick_abort", "quick_abort_max", "quick_abort_min",
"quick_abort_pct", "range_offset_limit", "read_timeout",
"redirect_children", "redirect_program",
"redirect_rewrites_host_header", "reference_age",
"refresh_pattern", "reload_into_ims", "request_body_max_size",
"request_size", "request_timeout", "shutdown_lifetime",
"single_parent_bypass", "siteselect_timeout", "snmp_access",
"snmp_incoming_address", "snmp_port", "source_ping", "ssl_proxy",
"store_avg_object_size", "store_objects_per_bucket",
"strip_query_terms", "swap_level1_dirs", "swap_level2_dirs",
"tcp_incoming_address", "tcp_outgoing_address", "tcp_recv_bufsize",
"test_reachability", "udp_hit_obj", "udp_hit_obj_size",
"udp_incoming_address", "udp_outgoing_address", "unique_hostname",
"unlinkd_program", "uri_whitespace", "useragent_log",
"visible_hostname", "wais_relay", "wais_relay_host", "wais_relay_port",
)
opts = (
"proxy-only", "weight", "ttl", "no-query", "default", "round-robin",
"multicast-responder", "on", "off", "all", "deny", "allow", "via",
"parent", "no-digest", "heap", "lru", "realm", "children", "q1", "q2",
"credentialsttl", "none", "disable", "offline_toggle", "diskd",
)
actions = (
"shutdown", "info", "parameter", "server_list", "client_list",
r'squid.conf',
)
actions_stats = (
"objects", "vm_objects", "utilization", "ipcache", "fqdncache", "dns",
"redirector", "io", "reply_headers", "filedescriptors", "netdb",
)
actions_log = ("status", "enable", "disable", "clear")
acls = (
"url_regex", "urlpath_regex", "referer_regex", "port", "proto",
"req_mime_type", "rep_mime_type", "method", "browser", "user", "src",
"dst", "time", "dstdomain", "ident", "snmp_community",
)
ip_re = (
r'(?:(?:(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|0x0*[0-9a-f]{1,2}|'
r'0+[1-3]?[0-7]{0,2})(?:\.(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|'
r'0x0*[0-9a-f]{1,2}|0+[1-3]?[0-7]{0,2})){3})|(?!.*::.*::)(?:(?!:)|'
r':(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)){6}(?:[0-9a-f]{0,4}'
r'(?:(?<=::)|(?<!::):)[0-9a-f]{0,4}(?:(?<=::)|(?<!:)|(?<=:)(?<!::):)|'
r'(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-4]|2[0-4]\d|1\d\d|'
r'[1-9]?\d)){3}))'
)
tokens = {
'root': [
(r'\s+', Whitespace),
(r'#', Comment, 'comment'),
(words(keywords, prefix=r'\b', suffix=r'\b'), Keyword),
(words(opts, prefix=r'\b', suffix=r'\b'), Name.Constant),
# Actions
(words(actions, prefix=r'\b', suffix=r'\b'), String),
(words(actions_stats, prefix=r'stats/', suffix=r'\b'), String),
(words(actions_log, prefix=r'log/', suffix=r'='), String),
(words(acls, prefix=r'\b', suffix=r'\b'), Keyword),
(ip_re + r'(?:/(?:' + ip_re + r'|\b\d+\b))?', Number.Float),
(r'(?:\b\d+\b(?:-\b\d+|%)?)', Number),
(r'\S+', Text),
],
'comment': [
(r'\s*TAG:.*', String.Escape, '#pop'),
(r'.+', Comment, '#pop'),
default('#pop'),
],
}
class NginxConfLexer(RegexLexer):
"""
Lexer for `Nginx <http://nginx.net/>`_ configuration files.
.. versionadded:: 0.11
"""
name = 'Nginx configuration file'
aliases = ['nginx']
filenames = ['nginx.conf']
mimetypes = ['text/x-nginx-conf']
tokens = {
'root': [
(r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Text, Name)),
(r'[^\s;#]+', Keyword, 'stmt'),
include('base'),
],
'block': [
(r'\}', Punctuation, '#pop:2'),
(r'[^\s;#]+', Keyword.Namespace, 'stmt'),
include('base'),
],
'stmt': [
(r'\{', Punctuation, 'block'),
(r';', Punctuation, '#pop'),
include('base'),
],
'base': [
(r'#.*\n', Comment.Single),
(r'on|off', Name.Constant),
(r'\$[^\s;#()]+', Name.Variable),
(r'([a-z0-9.-]+)(:)([0-9]+)',
bygroups(Name, Punctuation, Number.Integer)),
(r'[a-z-]+/[a-z-+]+', String), # mimetype
# (r'[a-zA-Z._-]+', Keyword),
(r'[0-9]+[km]?\b', Number.Integer),
(r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Text, String.Regex)),
(r'[:=~]', Punctuation),
(r'[^\s;#{}$]+', String), # catch all
(r'/[^\s;#]*', Name), # pathname
(r'\s+', Text),
(r'[$;]', Text), # leftover characters
],
}
class LighttpdConfLexer(RegexLexer):
"""
Lexer for `Lighttpd <http://lighttpd.net/>`_ configuration files.
.. versionadded:: 0.11
"""
name = 'Lighttpd configuration file'
aliases = ['lighty', 'lighttpd']
filenames = []
mimetypes = ['text/x-lighttpd-conf']
tokens = {
'root': [
(r'#.*\n', Comment.Single),
(r'/\S*', Name), # pathname
(r'[a-zA-Z._-]+', Keyword),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'[0-9]+', Number),
(r'=>|=~|\+=|==|=|\+', Operator),
(r'\$[A-Z]+', Name.Builtin),
(r'[(){}\[\],]', Punctuation),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'\s+', Text),
],
}
class DockerLexer(RegexLexer):
"""
Lexer for `Docker <http://docker.io>`_ configuration files.
.. versionadded:: 2.0
"""
name = 'Docker'
aliases = ['docker', 'dockerfile']
filenames = ['Dockerfile', '*.docker']
mimetypes = ['text/x-dockerfile-config']
_keywords = (r'(?:FROM|MAINTAINER|CMD|EXPOSE|ENV|ADD|ENTRYPOINT|'
r'VOLUME|WORKDIR)')
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r'^(ONBUILD)(\s+)(%s)\b' % (_keywords,),
bygroups(Name.Keyword, Whitespace, Keyword)),
(r'^(%s)\b(.*)' % (_keywords,), bygroups(Keyword, String)),
(r'#.*', Comment),
(r'RUN', Keyword), # Rest of line falls through
(r'(.*\\\n)*.+', using(BashLexer)),
],
}
class TerraformLexer(RegexLexer):
"""
Lexer for `terraformi .tf files <https://www.terraform.io/>`_.
.. versionadded:: 2.1
"""
name = 'Terraform'
aliases = ['terraform', 'tf']
filenames = ['*.tf']
mimetypes = ['application/x-tf', 'application/x-terraform']
tokens = {
'root': [
include('string'),
include('punctuation'),
include('curly'),
include('basic'),
include('whitespace'),
(r'[0-9]+', Number),
],
'basic': [
(words(('true', 'false'), prefix=r'\b', suffix=r'\b'), Keyword.Type),
(r'\s*/\*', Comment.Multiline, 'comment'),
(r'\s*#.*\n', Comment.Single),
(r'(.*?)(\s*)(=)', bygroups(Name.Attribute, Text, Operator)),
(words(('variable', 'resource', 'provider', 'provisioner', 'module'),
prefix=r'\b', suffix=r'\b'), Keyword.Reserved, 'function'),
(words(('ingress', 'egress', 'listener', 'default', 'connection'),
prefix=r'\b', suffix=r'\b'), Keyword.Declaration),
('\$\{', String.Interpol, 'var_builtin'),
],
'function': [
(r'(\s+)(".*")(\s+)', bygroups(Text, String, Text)),
include('punctuation'),
include('curly'),
],
'var_builtin': [
(r'\$\{', String.Interpol, '#push'),
(words(('concat', 'file', 'join', 'lookup', 'element'),
prefix=r'\b', suffix=r'\b'), Name.Builtin),
include('string'),
include('punctuation'),
(r'\s+', Text),
(r'\}', String.Interpol, '#pop'),
],
'string': [
(r'(".*")', bygroups(String.Double)),
],
'punctuation': [
(r'[\[\](),.]', Punctuation),
],
# Keep this seperate from punctuation - we sometimes want to use different
# Tokens for { }
'curly': [
(r'\{', Text.Punctuation),
(r'\}', Text.Punctuation),
],
'comment': [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text),
],
}
class TermcapLexer(RegexLexer):
"""
Lexer for termcap database source.
This is very simple and minimal.
.. versionadded:: 2.1
"""
name = 'Termcap'
aliases = ['termcap']
filenames = ['termcap', 'termcap.src']
mimetypes = []
# NOTE:
# * multiline with trailing backslash
# * separator is ':'
# * to embed colon as data, we must use \072
# * space after separator is not allowed (mayve)
tokens = {
'root': [
(r'^#.*$', Comment),
(r'^[^\s#:|]+', Name.Tag, 'names'),
],
'names': [
(r'\n', Text, '#pop'),
(r':', Punctuation, 'defs'),
(r'\|', Punctuation),
(r'[^:|]+', Name.Attribute),
],
'defs': [
(r'\\\n[ \t]*', Text),
(r'\n[ \t]*', Text, '#pop:2'),
(r'(#)([0-9]+)', bygroups(Operator, Number)),
(r'=', Operator, 'data'),
(r':', Punctuation),
(r'[^\s:=#]+', Name.Class),
],
'data': [
(r'\\072', Literal),
(r':', Punctuation, '#pop'),
(r'[^:\\]+', Literal), # for performance
(r'.', Literal),
],
}
class TerminfoLexer(RegexLexer):
"""
Lexer for terminfo database source.
This is very simple and minimal.
.. versionadded:: 2.1
"""
name = 'Terminfo'
aliases = ['terminfo']
filenames = ['terminfo', 'terminfo.src']
mimetypes = []
# NOTE:
# * multiline with leading whitespace
# * separator is ','
# * to embed comma as data, we can use \,
# * space after separator is allowed
tokens = {
'root': [
(r'^#.*$', Comment),
(r'^[^\s#,|]+', Name.Tag, 'names'),
],
'names': [
(r'\n', Text, '#pop'),
(r'(,)([ \t]*)', bygroups(Punctuation, Text), 'defs'),
(r'\|', Punctuation),
(r'[^,|]+', Name.Attribute),
],
'defs': [
(r'\n[ \t]+', Text),
(r'\n', Text, '#pop:2'),
(r'(#)([0-9]+)', bygroups(Operator, Number)),
(r'=', Operator, 'data'),
(r'(,)([ \t]*)', bygroups(Punctuation, Text)),
(r'[^\s,=#]+', Name.Class),
],
'data': [
(r'\\[,\\]', Literal),
(r'(,)([ \t]*)', bygroups(Punctuation, Text), '#pop'),
(r'[^\\,]+', Literal), # for performance
(r'.', Literal),
],
}
class PkgConfigLexer(RegexLexer):
"""
Lexer for `pkg-config
<http://www.freedesktop.org/wiki/Software/pkg-config/>`_
(see also `manual page <http://linux.die.net/man/1/pkg-config>`_).
.. versionadded:: 2.1
"""
name = 'PkgConfig'
aliases = ['pkgconfig']
filenames = ['*.pc']
mimetypes = []
tokens = {
'root': [
(r'#.*$', Comment.Single),
# variable definitions
(r'^(\w+)(=)', bygroups(Name.Attribute, Operator)),
# keyword lines
(r'^([\w.]+)(:)',
bygroups(Name.Tag, Punctuation), 'spvalue'),
# variable references
include('interp'),
# fallback
(r'[^${}#=:\n.]+', Text),
(r'.', Text),
],
'interp': [
# you can escape literal "$" as "$$"
(r'\$\$', Text),
# variable references
(r'\$\{', String.Interpol, 'curly'),
],
'curly': [
(r'\}', String.Interpol, '#pop'),
(r'\w+', Name.Attribute),
],
'spvalue': [
include('interp'),
(r'#.*$', Comment.Single, '#pop'),
(r'\n', Text, '#pop'),
# fallback
(r'[^${}#\n]+', Text),
(r'.', Text),
],
}
class PacmanConfLexer(RegexLexer):
"""
Lexer for `pacman.conf
<https://www.archlinux.org/pacman/pacman.conf.5.html>`_.
Actually, IniLexer works almost fine for this format,
but it yield error token. It is because pacman.conf has
a form without assignment like:
UseSyslog
Color
TotalDownload
CheckSpace
VerbosePkgLists
These are flags to switch on.
.. versionadded:: 2.1
"""
name = 'PacmanConf'
aliases = ['pacmanconf']
filenames = ['pacman.conf']
mimetypes = []
tokens = {
'root': [
# comment
(r'#.*$', Comment.Single),
# section header
(r'^\s*\[.*?\]\s*$', Keyword),
# variable definitions
# (Leading space is allowed...)
(r'(\w+)(\s*)(=)',
bygroups(Name.Attribute, Text, Operator)),
# flags to on
(r'^(\s*)(\w+)(\s*)$',
bygroups(Text, Name.Attribute, Text)),
# built-in special values
(words((
'$repo', # repository
'$arch', # architecture
'%o', # outfile
'%u', # url
), suffix=r'\b'),
Name.Variable),
# fallback
(r'.', Text),
],
}
| bsd-3-clause | 5,063,656,968,978,506,000 | 32.932773 | 82 | 0.477889 | false |
JCBarahona/edX | common/djangoapps/course_modes/migrations/0007_add_description.py | 114 | 2270 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseMode.description'
db.add_column('course_modes_coursemode', 'description',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
# Changing field 'CourseMode.course_id'
db.alter_column('course_modes_coursemode', 'course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255))
def backwards(self, orm):
# Deleting field 'CourseMode.description'
db.delete_column('course_modes_coursemode', 'description')
# Changing field 'CourseMode.course_id'
db.alter_column('course_modes_coursemode', 'course_id', self.gf('django.db.models.fields.CharField')(max_length=255))
models = {
'course_modes.coursemode': {
'Meta': {'unique_together': "(('course_id', 'mode_slug', 'currency'),)", 'object_name': 'CourseMode'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'expiration_datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_price': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mode_display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mode_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'suggested_prices': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['course_modes']
| agpl-3.0 | 5,266,291,405,919,106,000 | 50.590909 | 147 | 0.598678 | false |
c0hen/django-venv | lib/python3.4/site-packages/django/contrib/gis/db/backends/spatialite/models.py | 510 | 2946 | """
The GeometryColumns and SpatialRefSys models for the SpatiaLite backend.
"""
from django.contrib.gis.db.backends.base.models import SpatialRefSysMixin
from django.contrib.gis.db.backends.spatialite.base import DatabaseWrapper
from django.db import connection, models
from django.db.backends.signals import connection_created
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class SpatialiteGeometryColumns(models.Model):
"""
The 'geometry_columns' table from SpatiaLite.
"""
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
spatial_index_enabled = models.IntegerField()
class Meta:
app_label = 'gis'
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the feature table
name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the feature
geometry column.
"""
return 'f_geometry_column'
def __str__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class SpatialiteSpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from SpatiaLite.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
ref_sys_name = models.CharField(max_length=256)
proj4text = models.CharField(max_length=2048)
@property
def wkt(self):
if hasattr(self, 'srtext'):
return self.srtext
from django.contrib.gis.gdal import SpatialReference
return SpatialReference(self.proj4text).wkt
class Meta:
app_label = 'gis'
db_table = 'spatial_ref_sys'
managed = False
def add_spatial_version_related_fields(sender, **kwargs):
"""
Adds fields after establishing a database connection to prevent database
operations at compile time.
"""
if connection_created.disconnect(add_spatial_version_related_fields, sender=DatabaseWrapper):
spatial_version = connection.ops.spatial_version[0]
if spatial_version >= 4:
SpatialiteSpatialRefSys.add_to_class('srtext', models.CharField(max_length=2048))
SpatialiteGeometryColumns.add_to_class('type', models.IntegerField(db_column='geometry_type'))
else:
SpatialiteGeometryColumns.add_to_class('type', models.CharField(max_length=30))
connection_created.connect(add_spatial_version_related_fields, sender=DatabaseWrapper)
| gpl-3.0 | -7,353,927,209,493,205,000 | 34.071429 | 106 | 0.679566 | false |
smily77/Arduino | arduino-core/src/processing/app/i18n/python/requests/packages/urllib3/connectionpool.py | 184 | 20547 | # urllib3/connectionpool.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
import socket
import errno
from socket import error as SocketError, timeout as SocketTimeout
from .util import resolve_cert_reqs, resolve_ssl_version
try: # Python 3
from http.client import HTTPConnection, HTTPException
from http.client import HTTP_PORT, HTTPS_PORT
except ImportError:
from httplib import HTTPConnection, HTTPException
from httplib import HTTP_PORT, HTTPS_PORT
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import LifoQueue, Empty, Full
try: # Compiled with SSL?
HTTPSConnection = object
BaseSSLError = None
ssl = None
try: # Python 3
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
pass
from .request import RequestMethods
from .response import HTTPResponse
from .util import get_host, is_connection_dropped, ssl_wrap_socket
from .exceptions import (
ClosedPoolError,
EmptyPoolError,
HostChangedError,
MaxRetryError,
SSLError,
TimeoutError,
)
from .packages.ssl_match_hostname import match_hostname, CertificateError
from .packages import six
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
port_by_scheme = {
'http': HTTP_PORT,
'https': HTTPS_PORT,
}
## Connection objects (extension of httplib)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ssl_version = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None):
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
def connect(self):
# Add certificate verification
sock = socket.create_connection((self.host, self.port), self.timeout)
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=self.host,
ssl_version=resolved_ssl_version)
if resolved_cert_reqs != ssl.CERT_NONE:
match_hostname(self.sock.getpeercert(), self.host)
## Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
self.host = host
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
:param timeout:
Socket timeout for each individual connection, can be a float. None
disables timeout.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to false, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
scheme = 'http'
def __init__(self, host, port=None, strict=False, timeout=None, maxsize=1,
block=False, headers=None):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
self.timeout = timeout
self.pool = self.QueueCls(maxsize)
self.block = block
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host))
return HTTPConnection(host=self.host,
port=self.port,
strict=self.strict)
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s" % self.host)
conn.close()
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except Full:
# This should never happen if self.block == True
log.warning("HttpConnectionPool is full, discarding connection: %s"
% self.host)
# Connection never got put back into the pool, close it.
conn.close()
def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw):
"""
Perform a request on a given httplib connection object taken from our
pool.
"""
self.num_requests += 1
if timeout is _Default:
timeout = self.timeout
conn.timeout = timeout # This only does anything in Py26+
conn.request(method, url, **httplib_request_kw)
# Set timeout
sock = getattr(conn, 'sock', False) # AppEngine doesn't have sock attr.
if sock:
sock.settimeout(timeout)
try: # Python 2.7+, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older
httplib_response = conn.getresponse()
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("\"%s %s %s\" %s %s" % (method, url, http_version,
httplib_response.status,
httplib_response.length))
return httplib_response
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
if self.port and not port:
# Use explicit default port for comparison when none is given.
port = port_by_scheme.get(scheme)
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Number of retries to allow before raising a MaxRetryError exception.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307). Each redirect counts as a retry.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one request.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param \**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if retries < 0:
raise MaxRetryError(self, url)
if timeout is _Default:
timeout = self.timeout
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
host = "%s://%s" % (self.scheme, self.host)
if self.port:
host = "%s:%d" % (host, self.port)
raise HostChangedError(self, url, retries - 1)
conn = None
try:
# Request a connection from the queue
conn = self._get_conn(timeout=pool_timeout)
# Make the request on the httplib connection object
httplib_response = self._make_request(conn, method, url,
timeout=timeout,
body=body, headers=headers)
# If we're going to release the connection in ``finally:``, then
# the request doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = not release_conn and conn
# Import httplib's response into our own wrapper object
response = HTTPResponse.from_httplib(httplib_response,
pool=self,
connection=response_conn,
**response_kw)
# else:
# The connection will be put back into the pool when
# ``response.release_conn()`` is called (implicitly by
# ``response.read()``)
except Empty as e:
# Timed out by queue
raise TimeoutError(self, "Request timed out. (pool_timeout=%s)" %
pool_timeout)
except SocketTimeout as e:
# Timed out by socket
raise TimeoutError(self, "Request timed out. (timeout=%s)" %
timeout)
except BaseSSLError as e:
# SSL certificate error
raise SSLError(e)
except CertificateError as e:
# Name mismatch
raise SSLError(e)
except (HTTPException, SocketError) as e:
# Connection broken, discard. It will be replaced next _get_conn().
conn = None
# This is necessary so we can access e below
err = e
if retries == 0:
raise MaxRetryError(self, url, e)
finally:
if release_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warn("Retrying (%d attempts remain) after connection "
"broken by '%r': %s" % (retries, err, url))
return self.urlopen(method, url, body, headers, retries - 1,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, body, headers,
retries - 1, redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`httplib.HTTPSConnection`.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, and ``ssl_version``
are only used if :mod:`ssl` is available and are fed into
:meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket into an SSL socket.
"""
scheme = 'https'
def __init__(self, host, port=None,
strict=False, timeout=None, maxsize=1,
block=False, headers=None,
key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None, ssl_version=None):
HTTPConnectionPool.__init__(self, host, port,
strict, timeout, maxsize,
block, headers)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ssl_version = ssl_version
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not ssl: # Platform-specific: Python compiled without +ssl
if not HTTPSConnection or HTTPSConnection is object:
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
return HTTPSConnection(host=self.host,
port=self.port,
strict=self.strict)
connection = VerifiedHTTPSConnection(host=self.host,
port=self.port,
strict=self.strict)
connection.set_cert(key_file=self.key_file, cert_file=self.cert_file,
cert_reqs=self.cert_reqs, ca_certs=self.ca_certs)
connection.ssl_version = self.ssl_version
return connection
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example: ::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
| lgpl-2.1 | 804,300,333,193,412,700 | 34.487047 | 93 | 0.583881 | false |
timothycrosley/isort | isort/stdlibs/py39.py | 3 | 3295 | """
File contains the standard library of Python 3.9.
DO NOT EDIT. If the standard library changes, a new list should be created
using the mkstdlibs.py script.
"""
stdlib = {
"_thread",
"abc",
"aifc",
"argparse",
"array",
"ast",
"asynchat",
"asyncio",
"asyncore",
"atexit",
"audioop",
"base64",
"bdb",
"binascii",
"binhex",
"bisect",
"builtins",
"bz2",
"cProfile",
"calendar",
"cgi",
"cgitb",
"chunk",
"cmath",
"cmd",
"code",
"codecs",
"codeop",
"collections",
"colorsys",
"compileall",
"concurrent",
"configparser",
"contextlib",
"contextvars",
"copy",
"copyreg",
"crypt",
"csv",
"ctypes",
"curses",
"dataclasses",
"datetime",
"dbm",
"decimal",
"difflib",
"dis",
"distutils",
"doctest",
"email",
"encodings",
"ensurepip",
"enum",
"errno",
"faulthandler",
"fcntl",
"filecmp",
"fileinput",
"fnmatch",
"formatter",
"fractions",
"ftplib",
"functools",
"gc",
"getopt",
"getpass",
"gettext",
"glob",
"graphlib",
"grp",
"gzip",
"hashlib",
"heapq",
"hmac",
"html",
"http",
"imaplib",
"imghdr",
"imp",
"importlib",
"inspect",
"io",
"ipaddress",
"itertools",
"json",
"keyword",
"lib2to3",
"linecache",
"locale",
"logging",
"lzma",
"mailbox",
"mailcap",
"marshal",
"math",
"mimetypes",
"mmap",
"modulefinder",
"msilib",
"msvcrt",
"multiprocessing",
"netrc",
"nis",
"nntplib",
"ntpath",
"numbers",
"operator",
"optparse",
"os",
"ossaudiodev",
"parser",
"pathlib",
"pdb",
"pickle",
"pickletools",
"pipes",
"pkgutil",
"platform",
"plistlib",
"poplib",
"posix",
"posixpath",
"pprint",
"profile",
"pstats",
"pty",
"pwd",
"py_compile",
"pyclbr",
"pydoc",
"queue",
"quopri",
"random",
"re",
"readline",
"reprlib",
"resource",
"rlcompleter",
"runpy",
"sched",
"secrets",
"select",
"selectors",
"shelve",
"shlex",
"shutil",
"signal",
"site",
"smtpd",
"smtplib",
"sndhdr",
"socket",
"socketserver",
"spwd",
"sqlite3",
"sre",
"sre_compile",
"sre_constants",
"sre_parse",
"ssl",
"stat",
"statistics",
"string",
"stringprep",
"struct",
"subprocess",
"sunau",
"symbol",
"symtable",
"sys",
"sysconfig",
"syslog",
"tabnanny",
"tarfile",
"telnetlib",
"tempfile",
"termios",
"test",
"textwrap",
"threading",
"time",
"timeit",
"tkinter",
"token",
"tokenize",
"trace",
"traceback",
"tracemalloc",
"tty",
"turtle",
"turtledemo",
"types",
"typing",
"unicodedata",
"unittest",
"urllib",
"uu",
"uuid",
"venv",
"warnings",
"wave",
"weakref",
"webbrowser",
"winreg",
"winsound",
"wsgiref",
"xdrlib",
"xml",
"xmlrpc",
"zipapp",
"zipfile",
"zipimport",
"zlib",
"zoneinfo",
}
| mit | 5,877,927,733,263,382,000 | 13.775785 | 74 | 0.464036 | false |
olemis/brython | www/src/Lib/encodings/cp861.py | 37 | 35331 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP861.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp861',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00d0, # LATIN CAPITAL LETTER ETH
0x008c: 0x00f0, # LATIN SMALL LETTER ETH
0x008d: 0x00de, # LATIN CAPITAL LETTER THORN
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00fe, # LATIN SMALL LETTER THORN
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x0098: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00a5: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00a6: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00a7: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
'\xd0' # 0x008b -> LATIN CAPITAL LETTER ETH
'\xf0' # 0x008c -> LATIN SMALL LETTER ETH
'\xde' # 0x008d -> LATIN CAPITAL LETTER THORN
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xfe' # 0x0095 -> LATIN SMALL LETTER THORN
'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xdd' # 0x0097 -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xfd' # 0x0098 -> LATIN SMALL LETTER Y WITH ACUTE
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
'\xa3' # 0x009c -> POUND SIGN
'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
'\u20a7' # 0x009e -> PESETA SIGN
'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xc1' # 0x00a4 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xcd' # 0x00a5 -> LATIN CAPITAL LETTER I WITH ACUTE
'\xd3' # 0x00a6 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xda' # 0x00a7 -> LATIN CAPITAL LETTER U WITH ACUTE
'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
'\u2310' # 0x00a9 -> REVERSED NOT SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a3: 0x009c, # POUND SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c1: 0x00a4, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00cd: 0x00a5, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d0: 0x008b, # LATIN CAPITAL LETTER ETH
0x00d3: 0x00a6, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00da: 0x00a7, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x0097, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00de: 0x008d, # LATIN CAPITAL LETTER THORN
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00f0: 0x008c, # LATIN SMALL LETTER ETH
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x0098, # LATIN SMALL LETTER Y WITH ACUTE
0x00fe: 0x0095, # LATIN SMALL LETTER THORN
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| bsd-3-clause | -6,628,472,447,995,054,000 | 48.617479 | 97 | 0.592737 | false |
entwanne/Rodolphe | rodolphe/main/views/tag.py | 2 | 1805 | from django.shortcuts import render_to_response
from django.db.models import Q
from django.template import RequestContext
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils.translation import ugettext as _
from main.models import Post, Tag
from main.forms import PostForm
from utils.search import get_search
from utils.tags import TagsSet
import re
from collections import defaultdict
from urllib.parse import urlencode
def index(request):
indexed_tags = defaultdict(list)
for tag in Tag.objects.all():
first_letter = tag.name[0].upper()
indexed_tags[first_letter].append(tag.name)
sorted_tags = []
for letter, tags in sorted(indexed_tags.items()):
sorted_tags.append((letter, sorted(tags)))
context = RequestContext(request, {
'indexed_tags': sorted_tags
})
return render_to_response('tags.html', context)
def search(request, pattern):
tags = TagsSet.from_string(pattern)
q, search = get_search(request)
regex = r'(\s|\A)\.{}(\W|\Z)'
for tag in tags:
q &= Q(content__iregex=regex.format(re.escape(tag)))
for tag in tags.iter_exclude():
q &= ~Q(content__iregex=regex.format(re.escape(tag)))
paginator = Paginator(Post.objects.filter(q, active=True)
.order_by('-created_at'), 10)
page_id = request.GET.get('page')
try:
posts = paginator.page(page_id)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
context = RequestContext(request, {
'page': posts,
'form': PostForm(),
'title': '{} - {}'.format(_('tag'), pattern),
'search': search
})
return render_to_response('index.html', context)
| bsd-2-clause | -4,771,190,324,839,397,000 | 32.425926 | 72 | 0.660942 | false |
KyoungRan/Django_React_ex | Django_React_Workshop-mbrochh/django/myvenv/lib/python3.4/site-packages/pip/_vendor/progress/helpers.py | 521 | 2854 | # Copyright (c) 2012 Giorgos Verigakis <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import print_function
HIDE_CURSOR = '\x1b[?25l'
SHOW_CURSOR = '\x1b[?25h'
class WriteMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WriteMixin, self).__init__(**kwargs)
self._width = 0
if message:
self.message = message
if self.file.isatty():
if self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
print(self.message, end='', file=self.file)
self.file.flush()
def write(self, s):
if self.file.isatty():
b = '\b' * self._width
c = s.ljust(self._width)
print(b + c, end='', file=self.file)
self._width = max(self._width, len(s))
self.file.flush()
def finish(self):
if self.file.isatty() and self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
class WritelnMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WritelnMixin, self).__init__(**kwargs)
if message:
self.message = message
if self.file.isatty() and self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
def clearln(self):
if self.file.isatty():
print('\r\x1b[K', end='', file=self.file)
def writeln(self, line):
if self.file.isatty():
self.clearln()
print(line, end='', file=self.file)
self.file.flush()
def finish(self):
if self.file.isatty():
print(file=self.file)
if self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
from signal import signal, SIGINT
from sys import exit
class SigIntMixin(object):
"""Registers a signal handler that calls finish on SIGINT"""
def __init__(self, *args, **kwargs):
super(SigIntMixin, self).__init__(*args, **kwargs)
signal(SIGINT, self._sigint_handler)
def _sigint_handler(self, signum, frame):
self.finish()
exit(0)
| mit | -7,695,649,357,062,729,000 | 30.362637 | 74 | 0.616328 | false |
BigRefT/bionic_cms | vendor/plugins/bionic-fckeditor/public/javascripts/fckeditor/editor/filemanager/connectors/py/zope.py | 89 | 5685 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2009 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python and Zope.
This code was not tested at all.
It just was ported from pre 2.5 release, so for further reference see
\editor\filemanager\browser\default\connectors\py\connector.py in previous
releases.
"""
from fckutil import *
from connector import *
import config as Config
class FCKeditorConnectorZope(FCKeditorConnector):
"""
Zope versiof FCKeditorConnector
"""
# Allow access (Zope)
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, context=None):
"""
Constructor
"""
FCKeditorConnector.__init__(self, environ=None) # call superclass constructor
# Instance Attributes
self.context = context
self.request = FCKeditorRequest(context)
def getZopeRootContext(self):
if self.zopeRootContext is None:
self.zopeRootContext = self.context.getPhysicalRoot()
return self.zopeRootContext
def getZopeUploadContext(self):
if self.zopeUploadContext is None:
folderNames = self.userFilesFolder.split("/")
c = self.getZopeRootContext()
for folderName in folderNames:
if (folderName <> ""):
c = c[folderName]
self.zopeUploadContext = c
return self.zopeUploadContext
def setHeader(self, key, value):
self.context.REQUEST.RESPONSE.setHeader(key, value)
def getFolders(self, resourceType, currentFolder):
# Open the folders node
s = ""
s += """<Folders>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["Folder"]):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(name)
)
# Close the folders node
s += """</Folders>"""
return s
def getZopeFoldersAndFiles(self, resourceType, currentFolder):
folders = self.getZopeFolders(resourceType, currentFolder)
files = self.getZopeFiles(resourceType, currentFolder)
s = folders + files
return s
def getZopeFiles(self, resourceType, currentFolder):
# Open the files node
s = ""
s += """<Files>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["File","Image"]):
s += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(name),
((o.get_size() / 1024) + 1)
)
# Close the files node
s += """</Files>"""
return s
def findZopeFolder(self, resourceType, folderName):
# returns the context of the resource / folder
zopeFolder = self.getZopeUploadContext()
folderName = self.removeFromStart(folderName, "/")
folderName = self.removeFromEnd(folderName, "/")
if (resourceType <> ""):
try:
zopeFolder = zopeFolder[resourceType]
except:
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType)
zopeFolder = zopeFolder[resourceType]
if (folderName <> ""):
folderNames = folderName.split("/")
for folderName in folderNames:
zopeFolder = zopeFolder[folderName]
return zopeFolder
def createFolder(self, resourceType, currentFolder):
# Find out where we are
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder)
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def uploadFile(self, resourceType, currentFolder, count=None):
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
file = self.request.get("NewFile", None)
fileName = self.getFileName(file.filename)
fileNameOnly = self.removeExtension(fileName)
fileExtension = self.getExtension(fileName).lower()
if (count):
nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension)
else:
nid = fileName
title = nid
try:
zopeFolder.manage_addProduct['OFSP'].manage_addFile(
id=nid,
title=title,
file=file.read()
)
except:
if (count):
count += 1
else:
count = 1
return self.zopeFileUpload(resourceType, currentFolder, count)
return self.sendUploadResults( 0 )
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, context=None):
r = context.REQUEST
self.request = r
def has_key(self, key):
return self.request.has_key(key)
def get(self, key, default=None):
return self.request.get(key, default)
"""
Running from zope, you will need to modify this connector.
If you have uploaded the FCKeditor into Zope (like me), you need to
move this connector out of Zope, and replace the "connector" with an
alias as below. The key to it is to pass the Zope context in, as
we then have a like to the Zope context.
## Script (Python) "connector.py"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=*args, **kws
##title=ALIAS
##
import Products.zope as connector
return connector.FCKeditorConnectorZope(context=context).doResponse()
"""
| mit | 7,320,153,503,641,974,000 | 28.239362 | 94 | 0.692348 | false |
Simran-B/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_textwrap.py | 55 | 23220 | #
# Test suite for the textwrap module.
#
# Original tests written by Greg Ward <[email protected]>.
# Converted to PyUnit by Peter Hansen <[email protected]>.
# Currently maintained by Greg Ward.
#
# $Id: test_textwrap.py 67896 2008-12-21 17:01:26Z benjamin.peterson $
#
import unittest
from test import test_support
from textwrap import TextWrapper, wrap, fill, dedent
class BaseTestCase(unittest.TestCase):
'''Parent class with utility methods for textwrap tests.'''
def show(self, textin):
if isinstance(textin, list):
result = []
for i in range(len(textin)):
result.append(" %d: %r" % (i, textin[i]))
result = '\n'.join(result)
elif isinstance(textin, basestring):
result = " %s\n" % repr(textin)
return result
def check(self, result, expect):
self.assertEquals(result, expect,
'expected:\n%s\nbut got:\n%s' % (
self.show(expect), self.show(result)))
def check_wrap(self, text, width, expect, **kwargs):
result = wrap(text, width, **kwargs)
self.check(result, expect)
def check_split(self, text, expect):
result = self.wrapper._split(text)
self.assertEquals(result, expect,
"\nexpected %r\n"
"but got %r" % (expect, result))
class WrapTestCase(BaseTestCase):
def setUp(self):
self.wrapper = TextWrapper(width=45)
def test_simple(self):
# Simple case: just words, spaces, and a bit of punctuation
text = "Hello there, how are you this fine day? I'm glad to hear it!"
self.check_wrap(text, 12,
["Hello there,",
"how are you",
"this fine",
"day? I'm",
"glad to hear",
"it!"])
self.check_wrap(text, 42,
["Hello there, how are you this fine day?",
"I'm glad to hear it!"])
self.check_wrap(text, 80, [text])
def test_whitespace(self):
# Whitespace munging and end-of-sentence detection
text = """\
This is a paragraph that already has
line breaks. But some of its lines are much longer than the others,
so it needs to be wrapped.
Some lines are \ttabbed too.
What a mess!
"""
expect = ["This is a paragraph that already has line",
"breaks. But some of its lines are much",
"longer than the others, so it needs to be",
"wrapped. Some lines are tabbed too. What a",
"mess!"]
wrapper = TextWrapper(45, fix_sentence_endings=True)
result = wrapper.wrap(text)
self.check(result, expect)
result = wrapper.fill(text)
self.check(result, '\n'.join(expect))
def test_fix_sentence_endings(self):
wrapper = TextWrapper(60, fix_sentence_endings=True)
# SF #847346: ensure that fix_sentence_endings=True does the
# right thing even on input short enough that it doesn't need to
# be wrapped.
text = "A short line. Note the single space."
expect = ["A short line. Note the single space."]
self.check(wrapper.wrap(text), expect)
# Test some of the hairy end cases that _fix_sentence_endings()
# is supposed to handle (the easy stuff is tested in
# test_whitespace() above).
text = "Well, Doctor? What do you think?"
expect = ["Well, Doctor? What do you think?"]
self.check(wrapper.wrap(text), expect)
text = "Well, Doctor?\nWhat do you think?"
self.check(wrapper.wrap(text), expect)
text = 'I say, chaps! Anyone for "tennis?"\nHmmph!'
expect = ['I say, chaps! Anyone for "tennis?" Hmmph!']
self.check(wrapper.wrap(text), expect)
wrapper.width = 20
expect = ['I say, chaps!', 'Anyone for "tennis?"', 'Hmmph!']
self.check(wrapper.wrap(text), expect)
text = 'And she said, "Go to hell!"\nCan you believe that?'
expect = ['And she said, "Go to',
'hell!" Can you',
'believe that?']
self.check(wrapper.wrap(text), expect)
wrapper.width = 60
expect = ['And she said, "Go to hell!" Can you believe that?']
self.check(wrapper.wrap(text), expect)
text = 'File stdio.h is nice.'
expect = ['File stdio.h is nice.']
self.check(wrapper.wrap(text), expect)
def test_wrap_short(self):
# Wrapping to make short lines longer
text = "This is a\nshort paragraph."
self.check_wrap(text, 20, ["This is a short",
"paragraph."])
self.check_wrap(text, 40, ["This is a short paragraph."])
def test_wrap_short_1line(self):
# Test endcases
text = "This is a short line."
self.check_wrap(text, 30, ["This is a short line."])
self.check_wrap(text, 30, ["(1) This is a short line."],
initial_indent="(1) ")
def test_hyphenated(self):
# Test breaking hyphenated words
text = ("this-is-a-useful-feature-for-"
"reformatting-posts-from-tim-peters'ly")
self.check_wrap(text, 40,
["this-is-a-useful-feature-for-",
"reformatting-posts-from-tim-peters'ly"])
self.check_wrap(text, 41,
["this-is-a-useful-feature-for-",
"reformatting-posts-from-tim-peters'ly"])
self.check_wrap(text, 42,
["this-is-a-useful-feature-for-reformatting-",
"posts-from-tim-peters'ly"])
def test_hyphenated_numbers(self):
# Test that hyphenated numbers (eg. dates) are not broken like words.
text = ("Python 1.0.0 was released on 1994-01-26. Python 1.0.1 was\n"
"released on 1994-02-15.")
self.check_wrap(text, 35, ['Python 1.0.0 was released on',
'1994-01-26. Python 1.0.1 was',
'released on 1994-02-15.'])
self.check_wrap(text, 40, ['Python 1.0.0 was released on 1994-01-26.',
'Python 1.0.1 was released on 1994-02-15.'])
text = "I do all my shopping at 7-11."
self.check_wrap(text, 25, ["I do all my shopping at",
"7-11."])
self.check_wrap(text, 27, ["I do all my shopping at",
"7-11."])
self.check_wrap(text, 29, ["I do all my shopping at 7-11."])
def test_em_dash(self):
# Test text with em-dashes
text = "Em-dashes should be written -- thus."
self.check_wrap(text, 25,
["Em-dashes should be",
"written -- thus."])
# Probe the boundaries of the properly written em-dash,
# ie. " -- ".
self.check_wrap(text, 29,
["Em-dashes should be written",
"-- thus."])
expect = ["Em-dashes should be written --",
"thus."]
self.check_wrap(text, 30, expect)
self.check_wrap(text, 35, expect)
self.check_wrap(text, 36,
["Em-dashes should be written -- thus."])
# The improperly written em-dash is handled too, because
# it's adjacent to non-whitespace on both sides.
text = "You can also do--this or even---this."
expect = ["You can also do",
"--this or even",
"---this."]
self.check_wrap(text, 15, expect)
self.check_wrap(text, 16, expect)
expect = ["You can also do--",
"this or even---",
"this."]
self.check_wrap(text, 17, expect)
self.check_wrap(text, 19, expect)
expect = ["You can also do--this or even",
"---this."]
self.check_wrap(text, 29, expect)
self.check_wrap(text, 31, expect)
expect = ["You can also do--this or even---",
"this."]
self.check_wrap(text, 32, expect)
self.check_wrap(text, 35, expect)
# All of the above behaviour could be deduced by probing the
# _split() method.
text = "Here's an -- em-dash and--here's another---and another!"
expect = ["Here's", " ", "an", " ", "--", " ", "em-", "dash", " ",
"and", "--", "here's", " ", "another", "---",
"and", " ", "another!"]
self.check_split(text, expect)
text = "and then--bam!--he was gone"
expect = ["and", " ", "then", "--", "bam!", "--",
"he", " ", "was", " ", "gone"]
self.check_split(text, expect)
def test_unix_options (self):
# Test that Unix-style command-line options are wrapped correctly.
# Both Optik (OptionParser) and Docutils rely on this behaviour!
text = "You should use the -n option, or --dry-run in its long form."
self.check_wrap(text, 20,
["You should use the",
"-n option, or --dry-",
"run in its long",
"form."])
self.check_wrap(text, 21,
["You should use the -n",
"option, or --dry-run",
"in its long form."])
expect = ["You should use the -n option, or",
"--dry-run in its long form."]
self.check_wrap(text, 32, expect)
self.check_wrap(text, 34, expect)
self.check_wrap(text, 35, expect)
self.check_wrap(text, 38, expect)
expect = ["You should use the -n option, or --dry-",
"run in its long form."]
self.check_wrap(text, 39, expect)
self.check_wrap(text, 41, expect)
expect = ["You should use the -n option, or --dry-run",
"in its long form."]
self.check_wrap(text, 42, expect)
# Again, all of the above can be deduced from _split().
text = "the -n option, or --dry-run or --dryrun"
expect = ["the", " ", "-n", " ", "option,", " ", "or", " ",
"--dry-", "run", " ", "or", " ", "--dryrun"]
self.check_split(text, expect)
def test_funky_hyphens (self):
# Screwy edge cases cooked up by David Goodger. All reported
# in SF bug #596434.
self.check_split("what the--hey!", ["what", " ", "the", "--", "hey!"])
self.check_split("what the--", ["what", " ", "the--"])
self.check_split("what the--.", ["what", " ", "the--."])
self.check_split("--text--.", ["--text--."])
# When I first read bug #596434, this is what I thought David
# was talking about. I was wrong; these have always worked
# fine. The real problem is tested in test_funky_parens()
# below...
self.check_split("--option", ["--option"])
self.check_split("--option-opt", ["--option-", "opt"])
self.check_split("foo --option-opt bar",
["foo", " ", "--option-", "opt", " ", "bar"])
def test_punct_hyphens(self):
# Oh bother, SF #965425 found another problem with hyphens --
# hyphenated words in single quotes weren't handled correctly.
# In fact, the bug is that *any* punctuation around a hyphenated
# word was handled incorrectly, except for a leading "--", which
# was special-cased for Optik and Docutils. So test a variety
# of styles of punctuation around a hyphenated word.
# (Actually this is based on an Optik bug report, #813077).
self.check_split("the 'wibble-wobble' widget",
['the', ' ', "'wibble-", "wobble'", ' ', 'widget'])
self.check_split('the "wibble-wobble" widget',
['the', ' ', '"wibble-', 'wobble"', ' ', 'widget'])
self.check_split("the (wibble-wobble) widget",
['the', ' ', "(wibble-", "wobble)", ' ', 'widget'])
self.check_split("the ['wibble-wobble'] widget",
['the', ' ', "['wibble-", "wobble']", ' ', 'widget'])
def test_funky_parens (self):
# Second part of SF bug #596434: long option strings inside
# parentheses.
self.check_split("foo (--option) bar",
["foo", " ", "(--option)", " ", "bar"])
# Related stuff -- make sure parens work in simpler contexts.
self.check_split("foo (bar) baz",
["foo", " ", "(bar)", " ", "baz"])
self.check_split("blah (ding dong), wubba",
["blah", " ", "(ding", " ", "dong),",
" ", "wubba"])
def test_initial_whitespace(self):
# SF bug #622849 reported inconsistent handling of leading
# whitespace; let's test that a bit, shall we?
text = " This is a sentence with leading whitespace."
self.check_wrap(text, 50,
[" This is a sentence with leading whitespace."])
self.check_wrap(text, 30,
[" This is a sentence with", "leading whitespace."])
def test_no_drop_whitespace(self):
# SF patch #1581073
text = " This is a sentence with much whitespace."
self.check_wrap(text, 10,
[" This is a", " ", "sentence ",
"with ", "much white", "space."],
drop_whitespace=False)
if test_support.have_unicode:
def test_unicode(self):
# *Very* simple test of wrapping Unicode strings. I'm sure
# there's more to it than this, but let's at least make
# sure textwrap doesn't crash on Unicode input!
text = u"Hello there, how are you today?"
self.check_wrap(text, 50, [u"Hello there, how are you today?"])
self.check_wrap(text, 20, [u"Hello there, how are", "you today?"])
olines = self.wrapper.wrap(text)
assert isinstance(olines, list) and isinstance(olines[0], unicode)
otext = self.wrapper.fill(text)
assert isinstance(otext, unicode)
def test_no_split_at_umlaut(self):
text = u"Die Empf\xe4nger-Auswahl"
self.check_wrap(text, 13, [u"Die", u"Empf\xe4nger-", u"Auswahl"])
def test_umlaut_followed_by_dash(self):
text = u"aa \xe4\xe4-\xe4\xe4"
self.check_wrap(text, 7, [u"aa \xe4\xe4-", u"\xe4\xe4"])
def test_split(self):
# Ensure that the standard _split() method works as advertised
# in the comments
text = "Hello there -- you goof-ball, use the -b option!"
result = self.wrapper._split(text)
self.check(result,
["Hello", " ", "there", " ", "--", " ", "you", " ", "goof-",
"ball,", " ", "use", " ", "the", " ", "-b", " ", "option!"])
def test_break_on_hyphens(self):
# Ensure that the break_on_hyphens attributes work
text = "yaba daba-doo"
self.check_wrap(text, 10, ["yaba daba-", "doo"],
break_on_hyphens=True)
self.check_wrap(text, 10, ["yaba", "daba-doo"],
break_on_hyphens=False)
def test_bad_width(self):
# Ensure that width <= 0 is caught.
text = "Whatever, it doesn't matter."
self.assertRaises(ValueError, wrap, text, 0)
self.assertRaises(ValueError, wrap, text, -1)
class LongWordTestCase (BaseTestCase):
def setUp(self):
self.wrapper = TextWrapper()
self.text = '''\
Did you say "supercalifragilisticexpialidocious?"
How *do* you spell that odd word, anyways?
'''
def test_break_long(self):
# Wrap text with long words and lots of punctuation
self.check_wrap(self.text, 30,
['Did you say "supercalifragilis',
'ticexpialidocious?" How *do*',
'you spell that odd word,',
'anyways?'])
self.check_wrap(self.text, 50,
['Did you say "supercalifragilisticexpialidocious?"',
'How *do* you spell that odd word, anyways?'])
# SF bug 797650. Prevent an infinite loop by making sure that at
# least one character gets split off on every pass.
self.check_wrap('-'*10+'hello', 10,
['----------',
' h',
' e',
' l',
' l',
' o'],
subsequent_indent = ' '*15)
# bug 1146. Prevent a long word to be wrongly wrapped when the
# preceding word is exactly one character shorter than the width
self.check_wrap(self.text, 12,
['Did you say ',
'"supercalifr',
'agilisticexp',
'ialidocious?',
'" How *do*',
'you spell',
'that odd',
'word,',
'anyways?'])
def test_nobreak_long(self):
# Test with break_long_words disabled
self.wrapper.break_long_words = 0
self.wrapper.width = 30
expect = ['Did you say',
'"supercalifragilisticexpialidocious?"',
'How *do* you spell that odd',
'word, anyways?'
]
result = self.wrapper.wrap(self.text)
self.check(result, expect)
# Same thing with kwargs passed to standalone wrap() function.
result = wrap(self.text, width=30, break_long_words=0)
self.check(result, expect)
class IndentTestCases(BaseTestCase):
# called before each test method
def setUp(self):
self.text = '''\
This paragraph will be filled, first without any indentation,
and then with some (including a hanging indent).'''
def test_fill(self):
# Test the fill() method
expect = '''\
This paragraph will be filled, first
without any indentation, and then with
some (including a hanging indent).'''
result = fill(self.text, 40)
self.check(result, expect)
def test_initial_indent(self):
# Test initial_indent parameter
expect = [" This paragraph will be filled,",
"first without any indentation, and then",
"with some (including a hanging indent)."]
result = wrap(self.text, 40, initial_indent=" ")
self.check(result, expect)
expect = "\n".join(expect)
result = fill(self.text, 40, initial_indent=" ")
self.check(result, expect)
def test_subsequent_indent(self):
# Test subsequent_indent parameter
expect = '''\
* This paragraph will be filled, first
without any indentation, and then
with some (including a hanging
indent).'''
result = fill(self.text, 40,
initial_indent=" * ", subsequent_indent=" ")
self.check(result, expect)
# Despite the similar names, DedentTestCase is *not* the inverse
# of IndentTestCase!
class DedentTestCase(unittest.TestCase):
def assertUnchanged(self, text):
"""assert that dedent() has no effect on 'text'"""
self.assertEquals(text, dedent(text))
def test_dedent_nomargin(self):
# No lines indented.
text = "Hello there.\nHow are you?\nOh good, I'm glad."
self.assertUnchanged(text)
# Similar, with a blank line.
text = "Hello there.\n\nBoo!"
self.assertUnchanged(text)
# Some lines indented, but overall margin is still zero.
text = "Hello there.\n This is indented."
self.assertUnchanged(text)
# Again, add a blank line.
text = "Hello there.\n\n Boo!\n"
self.assertUnchanged(text)
def test_dedent_even(self):
# All lines indented by two spaces.
text = " Hello there.\n How are ya?\n Oh good."
expect = "Hello there.\nHow are ya?\nOh good."
self.assertEquals(expect, dedent(text))
# Same, with blank lines.
text = " Hello there.\n\n How are ya?\n Oh good.\n"
expect = "Hello there.\n\nHow are ya?\nOh good.\n"
self.assertEquals(expect, dedent(text))
# Now indent one of the blank lines.
text = " Hello there.\n \n How are ya?\n Oh good.\n"
expect = "Hello there.\n\nHow are ya?\nOh good.\n"
self.assertEquals(expect, dedent(text))
def test_dedent_uneven(self):
# Lines indented unevenly.
text = '''\
def foo():
while 1:
return foo
'''
expect = '''\
def foo():
while 1:
return foo
'''
self.assertEquals(expect, dedent(text))
# Uneven indentation with a blank line.
text = " Foo\n Bar\n\n Baz\n"
expect = "Foo\n Bar\n\n Baz\n"
self.assertEquals(expect, dedent(text))
# Uneven indentation with a whitespace-only line.
text = " Foo\n Bar\n \n Baz\n"
expect = "Foo\n Bar\n\n Baz\n"
self.assertEquals(expect, dedent(text))
# dedent() should not mangle internal tabs
def test_dedent_preserve_internal_tabs(self):
text = " hello\tthere\n how are\tyou?"
expect = "hello\tthere\nhow are\tyou?"
self.assertEquals(expect, dedent(text))
# make sure that it preserves tabs when it's not making any
# changes at all
self.assertEquals(expect, dedent(expect))
# dedent() should not mangle tabs in the margin (i.e.
# tabs and spaces both count as margin, but are *not*
# considered equivalent)
def test_dedent_preserve_margin_tabs(self):
text = " hello there\n\thow are you?"
self.assertUnchanged(text)
# same effect even if we have 8 spaces
text = " hello there\n\thow are you?"
self.assertUnchanged(text)
# dedent() only removes whitespace that can be uniformly removed!
text = "\thello there\n\thow are you?"
expect = "hello there\nhow are you?"
self.assertEquals(expect, dedent(text))
text = " \thello there\n \thow are you?"
self.assertEquals(expect, dedent(text))
text = " \t hello there\n \t how are you?"
self.assertEquals(expect, dedent(text))
text = " \thello there\n \t how are you?"
expect = "hello there\n how are you?"
self.assertEquals(expect, dedent(text))
def test_main():
test_support.run_unittest(WrapTestCase,
LongWordTestCase,
IndentTestCases,
DedentTestCase)
if __name__ == '__main__':
test_main()
| apache-2.0 | -1,212,507,766,355,925,500 | 37.128079 | 79 | 0.528079 | false |
alexpilotti/stackalytics-1 | stackalytics/openstack/common/importutils.py | 19 | 2186 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Import related utilities and helper functions.
"""
import sys
import traceback
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ValueError, AttributeError):
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def import_object(import_str, *args, **kwargs):
"""Import a class and return an instance of it."""
return import_class(import_str)(*args, **kwargs)
def import_object_ns(name_space, import_str, *args, **kwargs):
"""Tries to import object from default namespace.
Imports a class and return an instance of it, first by trying
to find the class in a default namespace, then failing back to
a full path if not found in the default namespace.
"""
import_value = "%s.%s" % (name_space, import_str)
try:
return import_class(import_value)(*args, **kwargs)
except ImportError:
return import_class(import_str)(*args, **kwargs)
def import_module(import_str):
"""Import a module."""
__import__(import_str)
return sys.modules[import_str]
def try_import(import_str, default=None):
"""Try to import a module and if it fails return default."""
try:
return import_module(import_str)
except ImportError:
return default
| apache-2.0 | 2,317,949,527,008,469,000 | 31.147059 | 75 | 0.68344 | false |
thepiper/standoff | venv/lib/python2.7/site-packages/pip/_vendor/requests/utils.py | 618 | 21334 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import platform
import re
import sys
import socket
import struct
import warnings
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse,
basestring)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
return os.fstat(fileno).st_size
if hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
return len(o.getvalue())
def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc
host = ri.netloc.split(':')[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth
pass
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = no_proxy.replace(' ', '').split(',')
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['%s/%s' % (name, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def default_headers():
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in re.split(", *<", value):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
'''Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.'''
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
| gpl-3.0 | 6,211,973,571,984,631,000 | 29.175389 | 118 | 0.602091 | false |
mathLab/RBniCS | rbnics/problems/elliptic/elliptic_coercive_compliant_rb_reduced_problem.py | 1 | 1383 | # Copyright (C) 2015-2021 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from math import sqrt
from numpy import isclose
from rbnics.problems.elliptic.elliptic_coercive_compliant_problem import EllipticCoerciveCompliantProblem
from rbnics.problems.elliptic.elliptic_coercive_compliant_reduced_problem import EllipticCoerciveCompliantReducedProblem
from rbnics.problems.elliptic.elliptic_coercive_rb_reduced_problem import EllipticCoerciveRBReducedProblem
from rbnics.reduction_methods.elliptic import EllipticRBReduction
from rbnics.utils.decorators import ReducedProblemFor
EllipticCoerciveCompliantRBReducedProblem_Base = EllipticCoerciveCompliantReducedProblem(
EllipticCoerciveRBReducedProblem)
@ReducedProblemFor(EllipticCoerciveCompliantProblem, EllipticRBReduction)
class EllipticCoerciveCompliantRBReducedProblem(EllipticCoerciveCompliantRBReducedProblem_Base):
# Return an error bound for the current solution
def estimate_error(self):
eps2 = self.get_residual_norm_squared()
beta = self.truth_problem.get_stability_factor_lower_bound()
assert eps2 >= 0. or isclose(eps2, 0.)
assert beta >= 0.
return sqrt(abs(eps2) / beta)
# Return an error bound for the current compliant output
def estimate_error_output(self):
return self.estimate_error()**2
| lgpl-3.0 | 9,077,032,819,618,299,000 | 43.612903 | 120 | 0.799711 | false |
Buggaarde/youtube-dl | youtube_dl/extractor/livestream.py | 107 | 10261 | from __future__ import unicode_literals
import re
import json
import itertools
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse_urlparse,
compat_urlparse,
)
from ..utils import (
ExtractorError,
find_xpath_attr,
int_or_none,
orderedSet,
xpath_with_ns,
)
class LivestreamIE(InfoExtractor):
IE_NAME = 'livestream'
_VALID_URL = r'https?://(?:new\.)?livestream\.com/.*?/(?P<event_name>.*?)(/videos/(?P<id>[0-9]+)(?:/player)?)?/?(?:$|[?#])'
_TESTS = [{
'url': 'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370',
'md5': '53274c76ba7754fb0e8d072716f2292b',
'info_dict': {
'id': '4719370',
'ext': 'mp4',
'title': 'Live from Webster Hall NYC',
'upload_date': '20121012',
'like_count': int,
'view_count': int,
'thumbnail': 're:^http://.*\.jpg$'
}
}, {
'url': 'http://new.livestream.com/tedx/cityenglish',
'info_dict': {
'title': 'TEDCity2.0 (English)',
'id': '2245590',
},
'playlist_mincount': 4,
}, {
'url': 'http://new.livestream.com/chess24/tatasteelchess',
'info_dict': {
'title': 'Tata Steel Chess',
'id': '3705884',
},
'playlist_mincount': 60,
}, {
'url': 'https://new.livestream.com/accounts/362/events/3557232/videos/67864563/player?autoPlay=false&height=360&mute=false&width=640',
'only_matching': True,
}, {
'url': 'http://livestream.com/bsww/concacafbeachsoccercampeonato2015',
'only_matching': True,
}]
def _parse_smil(self, video_id, smil_url):
formats = []
_SWITCH_XPATH = (
'.//{http://www.w3.org/2001/SMIL20/Language}body/'
'{http://www.w3.org/2001/SMIL20/Language}switch')
smil_doc = self._download_xml(
smil_url, video_id,
note='Downloading SMIL information',
errnote='Unable to download SMIL information',
fatal=False)
if smil_doc is False: # Download failed
return formats
title_node = find_xpath_attr(
smil_doc, './/{http://www.w3.org/2001/SMIL20/Language}meta',
'name', 'title')
if title_node is None:
self.report_warning('Cannot find SMIL id')
switch_node = smil_doc.find(_SWITCH_XPATH)
else:
title_id = title_node.attrib['content']
switch_node = find_xpath_attr(
smil_doc, _SWITCH_XPATH, 'id', title_id)
if switch_node is None:
raise ExtractorError('Cannot find switch node')
video_nodes = switch_node.findall(
'{http://www.w3.org/2001/SMIL20/Language}video')
for vn in video_nodes:
tbr = int_or_none(vn.attrib.get('system-bitrate'))
furl = (
'http://livestream-f.akamaihd.net/%s?v=3.0.3&fp=WIN%%2014,0,0,145' %
(vn.attrib['src']))
if 'clipBegin' in vn.attrib:
furl += '&ssek=' + vn.attrib['clipBegin']
formats.append({
'url': furl,
'format_id': 'smil_%d' % tbr,
'ext': 'flv',
'tbr': tbr,
'preference': -1000,
})
return formats
def _extract_video_info(self, video_data):
video_id = compat_str(video_data['id'])
FORMAT_KEYS = (
('sd', 'progressive_url'),
('hd', 'progressive_url_hd'),
)
formats = [{
'format_id': format_id,
'url': video_data[key],
'quality': i + 1,
} for i, (format_id, key) in enumerate(FORMAT_KEYS)
if video_data.get(key)]
smil_url = video_data.get('smil_url')
if smil_url:
formats.extend(self._parse_smil(video_id, smil_url))
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'title': video_data['caption'],
'thumbnail': video_data.get('thumbnail_url'),
'upload_date': video_data['updated_at'].replace('-', '')[:8],
'like_count': video_data.get('likes', {}).get('total'),
'view_count': video_data.get('views'),
}
def _extract_event(self, info):
event_id = compat_str(info['id'])
account = compat_str(info['owner_account_id'])
root_url = (
'https://new.livestream.com/api/accounts/{account}/events/{event}/'
'feed.json'.format(account=account, event=event_id))
def _extract_videos():
last_video = None
for i in itertools.count(1):
if last_video is None:
info_url = root_url
else:
info_url = '{root}?&id={id}&newer=-1&type=video'.format(
root=root_url, id=last_video)
videos_info = self._download_json(info_url, event_id, 'Downloading page {0}'.format(i))['data']
videos_info = [v['data'] for v in videos_info if v['type'] == 'video']
if not videos_info:
break
for v in videos_info:
yield self._extract_video_info(v)
last_video = videos_info[-1]['id']
return self.playlist_result(_extract_videos(), event_id, info['full_name'])
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
event_name = mobj.group('event_name')
webpage = self._download_webpage(url, video_id or event_name)
og_video = self._og_search_video_url(
webpage, 'player url', fatal=False, default=None)
if og_video is not None:
query_str = compat_urllib_parse_urlparse(og_video).query
query = compat_urlparse.parse_qs(query_str)
if 'play_url' in query:
api_url = query['play_url'][0].replace('.smil', '')
info = json.loads(self._download_webpage(
api_url, video_id, 'Downloading video info'))
return self._extract_video_info(info)
config_json = self._search_regex(
r'window.config = ({.*?});', webpage, 'window config')
info = json.loads(config_json)['event']
def is_relevant(vdata, vid):
result = vdata['type'] == 'video'
if video_id is not None:
result = result and compat_str(vdata['data']['id']) == vid
return result
if video_id is None:
# This is an event page:
return self._extract_event(info)
else:
videos = [self._extract_video_info(video_data['data'])
for video_data in info['feed']['data']
if is_relevant(video_data, video_id)]
if not videos:
raise ExtractorError('Cannot find video %s' % video_id)
return videos[0]
# The original version of Livestream uses a different system
class LivestreamOriginalIE(InfoExtractor):
IE_NAME = 'livestream:original'
_VALID_URL = r'''(?x)https?://original\.livestream\.com/
(?P<user>[^/]+)/(?P<type>video|folder)
(?:\?.*?Id=|/)(?P<id>.*?)(&|$)
'''
_TESTS = [{
'url': 'http://original.livestream.com/dealbook/video?clipId=pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
'info_dict': {
'id': 'pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
'ext': 'mp4',
'title': 'Spark 1 (BitCoin) with Cameron Winklevoss & Tyler Winklevoss of Winklevoss Capital',
},
}, {
'url': 'https://original.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3',
'info_dict': {
'id': 'a07bf706-d0e4-4e75-a747-b021d84f2fd3',
},
'playlist_mincount': 4,
}]
def _extract_video(self, user, video_id):
api_url = 'http://x{0}x.api.channel.livestream.com/2.0/clipdetails?extendedInfo=true&id={1}'.format(user, video_id)
info = self._download_xml(api_url, video_id)
# this url is used on mobile devices
stream_url = 'http://x{0}x.api.channel.livestream.com/3.0/getstream.json?id={1}'.format(user, video_id)
stream_info = self._download_json(stream_url, video_id)
item = info.find('channel').find('item')
ns = {'media': 'http://search.yahoo.com/mrss'}
thumbnail_url = item.find(xpath_with_ns('media:thumbnail', ns)).attrib['url']
return {
'id': video_id,
'title': item.find('title').text,
'url': stream_info['progressiveUrl'],
'thumbnail': thumbnail_url,
}
def _extract_folder(self, url, folder_id):
webpage = self._download_webpage(url, folder_id)
paths = orderedSet(re.findall(
r'''(?x)(?:
<li\s+class="folder">\s*<a\s+href="|
<a\s+href="(?=https?://livestre\.am/)
)([^"]+)"''', webpage))
return {
'_type': 'playlist',
'id': folder_id,
'entries': [{
'_type': 'url',
'url': compat_urlparse.urljoin(url, p),
} for p in paths],
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
id = mobj.group('id')
user = mobj.group('user')
url_type = mobj.group('type')
if url_type == 'folder':
return self._extract_folder(url, id)
else:
return self._extract_video(user, id)
# The server doesn't support HEAD request, the generic extractor can't detect
# the redirection
class LivestreamShortenerIE(InfoExtractor):
IE_NAME = 'livestream:shortener'
IE_DESC = False # Do not list
_VALID_URL = r'https?://livestre\.am/(?P<id>.+)'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
id = mobj.group('id')
webpage = self._download_webpage(url, id)
return {
'_type': 'url',
'url': self._og_search_url(webpage),
}
| unlicense | -961,867,645,565,188,400 | 36.043321 | 142 | 0.528603 | false |
ActionAdam/osmc | package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x0a4.py | 252 | 4437 | data = (
'qiet', # 0x00
'qiex', # 0x01
'qie', # 0x02
'qiep', # 0x03
'quot', # 0x04
'quox', # 0x05
'quo', # 0x06
'quop', # 0x07
'qot', # 0x08
'qox', # 0x09
'qo', # 0x0a
'qop', # 0x0b
'qut', # 0x0c
'qux', # 0x0d
'qu', # 0x0e
'qup', # 0x0f
'qurx', # 0x10
'qur', # 0x11
'qyt', # 0x12
'qyx', # 0x13
'qy', # 0x14
'qyp', # 0x15
'qyrx', # 0x16
'qyr', # 0x17
'jjit', # 0x18
'jjix', # 0x19
'jji', # 0x1a
'jjip', # 0x1b
'jjiet', # 0x1c
'jjiex', # 0x1d
'jjie', # 0x1e
'jjiep', # 0x1f
'jjuox', # 0x20
'jjuo', # 0x21
'jjuop', # 0x22
'jjot', # 0x23
'jjox', # 0x24
'jjo', # 0x25
'jjop', # 0x26
'jjut', # 0x27
'jjux', # 0x28
'jju', # 0x29
'jjup', # 0x2a
'jjurx', # 0x2b
'jjur', # 0x2c
'jjyt', # 0x2d
'jjyx', # 0x2e
'jjy', # 0x2f
'jjyp', # 0x30
'njit', # 0x31
'njix', # 0x32
'nji', # 0x33
'njip', # 0x34
'njiet', # 0x35
'njiex', # 0x36
'njie', # 0x37
'njiep', # 0x38
'njuox', # 0x39
'njuo', # 0x3a
'njot', # 0x3b
'njox', # 0x3c
'njo', # 0x3d
'njop', # 0x3e
'njux', # 0x3f
'nju', # 0x40
'njup', # 0x41
'njurx', # 0x42
'njur', # 0x43
'njyt', # 0x44
'njyx', # 0x45
'njy', # 0x46
'njyp', # 0x47
'njyrx', # 0x48
'njyr', # 0x49
'nyit', # 0x4a
'nyix', # 0x4b
'nyi', # 0x4c
'nyip', # 0x4d
'nyiet', # 0x4e
'nyiex', # 0x4f
'nyie', # 0x50
'nyiep', # 0x51
'nyuox', # 0x52
'nyuo', # 0x53
'nyuop', # 0x54
'nyot', # 0x55
'nyox', # 0x56
'nyo', # 0x57
'nyop', # 0x58
'nyut', # 0x59
'nyux', # 0x5a
'nyu', # 0x5b
'nyup', # 0x5c
'xit', # 0x5d
'xix', # 0x5e
'xi', # 0x5f
'xip', # 0x60
'xiet', # 0x61
'xiex', # 0x62
'xie', # 0x63
'xiep', # 0x64
'xuox', # 0x65
'xuo', # 0x66
'xot', # 0x67
'xox', # 0x68
'xo', # 0x69
'xop', # 0x6a
'xyt', # 0x6b
'xyx', # 0x6c
'xy', # 0x6d
'xyp', # 0x6e
'xyrx', # 0x6f
'xyr', # 0x70
'yit', # 0x71
'yix', # 0x72
'yi', # 0x73
'yip', # 0x74
'yiet', # 0x75
'yiex', # 0x76
'yie', # 0x77
'yiep', # 0x78
'yuot', # 0x79
'yuox', # 0x7a
'yuo', # 0x7b
'yuop', # 0x7c
'yot', # 0x7d
'yox', # 0x7e
'yo', # 0x7f
'yop', # 0x80
'yut', # 0x81
'yux', # 0x82
'yu', # 0x83
'yup', # 0x84
'yurx', # 0x85
'yur', # 0x86
'yyt', # 0x87
'yyx', # 0x88
'yy', # 0x89
'yyp', # 0x8a
'yyrx', # 0x8b
'yyr', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'Qot', # 0x90
'Li', # 0x91
'Kit', # 0x92
'Nyip', # 0x93
'Cyp', # 0x94
'Ssi', # 0x95
'Ggop', # 0x96
'Gep', # 0x97
'Mi', # 0x98
'Hxit', # 0x99
'Lyr', # 0x9a
'Bbut', # 0x9b
'Mop', # 0x9c
'Yo', # 0x9d
'Put', # 0x9e
'Hxuo', # 0x9f
'Tat', # 0xa0
'Ga', # 0xa1
'[?]', # 0xa2
'[?]', # 0xa3
'Ddur', # 0xa4
'Bur', # 0xa5
'Gguo', # 0xa6
'Nyop', # 0xa7
'Tu', # 0xa8
'Op', # 0xa9
'Jjut', # 0xaa
'Zot', # 0xab
'Pyt', # 0xac
'Hmo', # 0xad
'Yit', # 0xae
'Vur', # 0xaf
'Shy', # 0xb0
'Vep', # 0xb1
'Za', # 0xb2
'Jo', # 0xb3
'[?]', # 0xb4
'Jjy', # 0xb5
'Got', # 0xb6
'Jjie', # 0xb7
'Wo', # 0xb8
'Du', # 0xb9
'Shur', # 0xba
'Lie', # 0xbb
'Cy', # 0xbc
'Cuop', # 0xbd
'Cip', # 0xbe
'Hxop', # 0xbf
'Shat', # 0xc0
'[?]', # 0xc1
'Shop', # 0xc2
'Che', # 0xc3
'Zziet', # 0xc4
'[?]', # 0xc5
'Ke', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-2.0 | 6,609,237,519,416,604,000 | 16.264591 | 18 | 0.380437 | false |
fibbo/DIRAC | DataManagementSystem/scripts/dirac-dms-remove-catalog-replicas.py | 2 | 1490 | #!/usr/bin/env python
########################################################################
# $Header: $
########################################################################
__RCSID__ = "$Id$"
from DIRAC import exit as DIRACExit
from DIRAC.Core.Base import Script
Script.setUsageMessage( """
Remove the given file replica or a list of file replicas from the File Catalog
This script should be used with great care as it may leave dark data in the storage!
Use dirac-dms-remove-replicas instead
Usage:
%s <LFN | fileContainingLFNs> <SE>
""" % Script.scriptName )
Script.parseCommandLine()
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
dm = DataManager()
import os, sys
args = Script.getPositionalArgs()
if len( args ) < 2:
Script.showHelp()
DIRACExit( -1 )
else:
inputFileName = args[0]
storageElementName = args[1]
if os.path.exists( inputFileName ):
inputFile = open( inputFileName, 'r' )
string = inputFile.read()
lfns = [ lfn.strip() for lfn in string.splitlines() ]
inputFile.close()
else:
lfns = [inputFileName]
res = dm.removeReplicaFromCatalog( storageElementName, lfns )
if not res['OK']:
print res['Message']
DIRACExit(0)
for lfn in sorted( res['Value']['Failed'] ):
message = res['Value']['Failed'][lfn]
print 'Failed to remove %s replica of %s: %s' % ( storageElementName, lfn, message )
print 'Successfully remove %d catalog replicas at %s' % ( len( res['Value']['Successful'] ), storageElementName )
| gpl-3.0 | -6,068,450,115,769,496,000 | 30.702128 | 113 | 0.639597 | false |
indiereign/shaka-player | build/checkversion.py | 3 | 2606 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checks that all the versions match."""
import logging
import os
import re
import shakaBuildHelpers
def player_version():
"""Gets the version of the library from player.js."""
path = os.path.join(shakaBuildHelpers.get_source_base(), 'lib', 'player.js')
with open(path, 'r') as f:
match = re.search(r'goog\.define\(\'GIT_VERSION\', \'(.*)\'\)', f.read())
return match.group(1) if match else ''
def changelog_version():
"""Gets the version of the library from the CHANGELOG."""
path = os.path.join(shakaBuildHelpers.get_source_base(), 'CHANGELOG.md')
with open(path, 'r') as f:
match = re.search(r'## (.*) \(', f.read())
return match.group(1) if match else ''
def check_version(_):
"""Checks that all the versions in the library match."""
changelog = changelog_version()
player = player_version()
git = shakaBuildHelpers.git_version()
npm = shakaBuildHelpers.npm_version()
print 'git version:', git
print 'npm version:', npm
print 'player version:', player
print 'changelog version:', changelog
ret = 0
if 'dirty' in git:
logging.error('Git version is dirty.')
ret = 1
elif 'unknown' in git:
logging.error('Git version is not a tag.')
ret = 1
elif not re.match(r'^v[0-9]+\.[0-9]+\.[0-9]+(?:-[a-z0-9]+)?$', git):
logging.error('Git version is a malformed release version.')
logging.error('It should be a \'v\', followed by three numbers')
logging.error('separated by dots, optionally followed by a hyphen')
logging.error('and a pre-release identifier. See http://semver.org/')
ret = 1
if 'v' + npm != git:
logging.error('NPM version does not match git version.')
ret = 1
if player != git + '-debug':
logging.error('Player version does not match git version.')
ret = 1
if 'v' + changelog != git:
logging.error('Changelog version does not match git version.')
ret = 1
return ret
if __name__ == '__main__':
shakaBuildHelpers.run_main(check_version)
| apache-2.0 | 6,620,711,093,445,984,000 | 30.780488 | 78 | 0.672678 | false |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/test/test_fractions.py | 1 | 26461 | """Tests for Lib/fractions.py."""
from decimal import Decimal
from test.support import requires_IEEE_754
import math
import numbers
import operator
import fractions
import sys
import unittest
import warnings
from copy import copy, deepcopy
from pickle import dumps, loads
F = fractions.Fraction
gcd = fractions.gcd
class DummyFloat(object):
"""Dummy float class for testing comparisons with Fractions"""
def __init__(self, value):
if not isinstance(value, float):
raise TypeError("DummyFloat can only be initialized from float")
self.value = value
def _richcmp(self, other, op):
if isinstance(other, numbers.Rational):
return op(F.from_float(self.value), other)
elif isinstance(other, DummyFloat):
return op(self.value, other.value)
else:
return NotImplemented
def __eq__(self, other): return self._richcmp(other, operator.eq)
def __le__(self, other): return self._richcmp(other, operator.le)
def __lt__(self, other): return self._richcmp(other, operator.lt)
def __ge__(self, other): return self._richcmp(other, operator.ge)
def __gt__(self, other): return self._richcmp(other, operator.gt)
# shouldn't be calling __float__ at all when doing comparisons
def __float__(self):
assert False, "__float__ should not be invoked for comparisons"
# same goes for subtraction
def __sub__(self, other):
assert False, "__sub__ should not be invoked for comparisons"
__rsub__ = __sub__
class DummyRational(object):
"""Test comparison of Fraction with a naive rational implementation."""
def __init__(self, num, den):
g = math.gcd(num, den)
self.num = num // g
self.den = den // g
def __eq__(self, other):
if isinstance(other, fractions.Fraction):
return (self.num == other._numerator and
self.den == other._denominator)
else:
return NotImplemented
def __lt__(self, other):
return(self.num * other._denominator < self.den * other._numerator)
def __gt__(self, other):
return(self.num * other._denominator > self.den * other._numerator)
def __le__(self, other):
return(self.num * other._denominator <= self.den * other._numerator)
def __ge__(self, other):
return(self.num * other._denominator >= self.den * other._numerator)
# this class is for testing comparisons; conversion to float
# should never be used for a comparison, since it loses accuracy
def __float__(self):
assert False, "__float__ should not be invoked"
class DummyFraction(fractions.Fraction):
"""Dummy Fraction subclass for copy and deepcopy testing."""
class GcdTest(unittest.TestCase):
def testMisc(self):
# fractions.gcd() is deprecated
with self.assertWarnsRegex(DeprecationWarning, r'fractions\.gcd'):
gcd(1, 1)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'fractions\.gcd',
DeprecationWarning)
self.assertEqual(0, gcd(0, 0))
self.assertEqual(1, gcd(1, 0))
self.assertEqual(-1, gcd(-1, 0))
self.assertEqual(1, gcd(0, 1))
self.assertEqual(-1, gcd(0, -1))
self.assertEqual(1, gcd(7, 1))
self.assertEqual(-1, gcd(7, -1))
self.assertEqual(1, gcd(-23, 15))
self.assertEqual(12, gcd(120, 84))
self.assertEqual(-12, gcd(84, -120))
self.assertEqual(gcd(120.0, 84), 12.0)
self.assertEqual(gcd(120, 84.0), 12.0)
self.assertEqual(gcd(F(120), F(84)), F(12))
self.assertEqual(gcd(F(120, 77), F(84, 55)), F(12, 385))
def _components(r):
return (r.numerator, r.denominator)
class FractionTest(unittest.TestCase):
def assertTypedEquals(self, expected, actual):
"""Asserts that both the types and values are the same."""
self.assertEqual(type(expected), type(actual))
self.assertEqual(expected, actual)
def assertRaisesMessage(self, exc_type, message,
callable, *args, **kwargs):
"""Asserts that callable(*args, **kwargs) raises exc_type(message)."""
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertEqual(message, str(e))
else:
self.fail("%s not raised" % exc_type.__name__)
def testInit(self):
self.assertEqual((0, 1), _components(F()))
self.assertEqual((7, 1), _components(F(7)))
self.assertEqual((7, 3), _components(F(F(7, 3))))
self.assertEqual((-1, 1), _components(F(-1, 1)))
self.assertEqual((-1, 1), _components(F(1, -1)))
self.assertEqual((1, 1), _components(F(-2, -2)))
self.assertEqual((1, 2), _components(F(5, 10)))
self.assertEqual((7, 15), _components(F(7, 15)))
self.assertEqual((10**23, 1), _components(F(10**23)))
self.assertEqual((3, 77), _components(F(F(3, 7), 11)))
self.assertEqual((-9, 5), _components(F(2, F(-10, 9))))
self.assertEqual((2486, 2485), _components(F(F(22, 7), F(355, 113))))
self.assertRaisesMessage(ZeroDivisionError, "Fraction(12, 0)",
F, 12, 0)
self.assertRaises(TypeError, F, 1.5 + 3j)
self.assertRaises(TypeError, F, "3/2", 3)
self.assertRaises(TypeError, F, 3, 0j)
self.assertRaises(TypeError, F, 3, 1j)
@requires_IEEE_754
def testInitFromFloat(self):
self.assertEqual((5, 2), _components(F(2.5)))
self.assertEqual((0, 1), _components(F(-0.0)))
self.assertEqual((3602879701896397, 36028797018963968),
_components(F(0.1)))
# bug 16469: error types should be consistent with float -> int
self.assertRaises(ValueError, F, float('nan'))
self.assertRaises(OverflowError, F, float('inf'))
self.assertRaises(OverflowError, F, float('-inf'))
def testInitFromDecimal(self):
self.assertEqual((11, 10),
_components(F(Decimal('1.1'))))
self.assertEqual((7, 200),
_components(F(Decimal('3.5e-2'))))
self.assertEqual((0, 1),
_components(F(Decimal('.000e20'))))
# bug 16469: error types should be consistent with decimal -> int
self.assertRaises(ValueError, F, Decimal('nan'))
self.assertRaises(ValueError, F, Decimal('snan'))
self.assertRaises(OverflowError, F, Decimal('inf'))
self.assertRaises(OverflowError, F, Decimal('-inf'))
def testFromString(self):
self.assertEqual((5, 1), _components(F("5")))
self.assertEqual((3, 2), _components(F("3/2")))
self.assertEqual((3, 2), _components(F(" \n +3/2")))
self.assertEqual((-3, 2), _components(F("-3/2 ")))
self.assertEqual((13, 2), _components(F(" 013/02 \n ")))
self.assertEqual((16, 5), _components(F(" 3.2 ")))
self.assertEqual((-16, 5), _components(F(" -3.2 ")))
self.assertEqual((-3, 1), _components(F(" -3. ")))
self.assertEqual((3, 5), _components(F(" .6 ")))
self.assertEqual((1, 3125), _components(F("32.e-5")))
self.assertEqual((1000000, 1), _components(F("1E+06")))
self.assertEqual((-12300, 1), _components(F("-1.23e4")))
self.assertEqual((0, 1), _components(F(" .0e+0\t")))
self.assertEqual((0, 1), _components(F("-0.000e0")))
self.assertRaisesMessage(
ZeroDivisionError, "Fraction(3, 0)",
F, "3/0")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '3/'",
F, "3/")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '/2'",
F, "/2")
self.assertRaisesMessage(
ValueError, "Invalid literal for Fraction: '3 /2'",
F, "3 /2")
self.assertRaisesMessage(
# Denominators don't need a sign.
ValueError, "Invalid literal for Fraction: '3/+2'",
F, "3/+2")
self.assertRaisesMessage(
# Imitate float's parsing.
ValueError, "Invalid literal for Fraction: '+ 3/2'",
F, "+ 3/2")
self.assertRaisesMessage(
# Avoid treating '.' as a regex special character.
ValueError, "Invalid literal for Fraction: '3a2'",
F, "3a2")
self.assertRaisesMessage(
# Don't accept combinations of decimals and rationals.
ValueError, "Invalid literal for Fraction: '3/7.2'",
F, "3/7.2")
self.assertRaisesMessage(
# Don't accept combinations of decimals and rationals.
ValueError, "Invalid literal for Fraction: '3.2/7'",
F, "3.2/7")
self.assertRaisesMessage(
# Allow 3. and .3, but not .
ValueError, "Invalid literal for Fraction: '.'",
F, ".")
def testImmutable(self):
r = F(7, 3)
r.__init__(2, 15)
self.assertEqual((7, 3), _components(r))
self.assertRaises(AttributeError, setattr, r, 'numerator', 12)
self.assertRaises(AttributeError, setattr, r, 'denominator', 6)
self.assertEqual((7, 3), _components(r))
# But if you _really_ need to:
r._numerator = 4
r._denominator = 2
self.assertEqual((4, 2), _components(r))
# Which breaks some important operations:
self.assertNotEqual(F(4, 2), r)
def testFromFloat(self):
self.assertRaises(TypeError, F.from_float, 3+4j)
self.assertEqual((10, 1), _components(F.from_float(10)))
bigint = 1234567890123456789
self.assertEqual((bigint, 1), _components(F.from_float(bigint)))
self.assertEqual((0, 1), _components(F.from_float(-0.0)))
self.assertEqual((10, 1), _components(F.from_float(10.0)))
self.assertEqual((-5, 2), _components(F.from_float(-2.5)))
self.assertEqual((99999999999999991611392, 1),
_components(F.from_float(1e23)))
self.assertEqual(float(10**23), float(F.from_float(1e23)))
self.assertEqual((3602879701896397, 1125899906842624),
_components(F.from_float(3.2)))
self.assertEqual(3.2, float(F.from_float(3.2)))
inf = 1e1000
nan = inf - inf
# bug 16469: error types should be consistent with float -> int
self.assertRaisesMessage(
OverflowError, "Cannot convert inf to Fraction.",
F.from_float, inf)
self.assertRaisesMessage(
OverflowError, "Cannot convert -inf to Fraction.",
F.from_float, -inf)
self.assertRaisesMessage(
ValueError, "Cannot convert nan to Fraction.",
F.from_float, nan)
def testFromDecimal(self):
self.assertRaises(TypeError, F.from_decimal, 3+4j)
self.assertEqual(F(10, 1), F.from_decimal(10))
self.assertEqual(F(0), F.from_decimal(Decimal("-0")))
self.assertEqual(F(5, 10), F.from_decimal(Decimal("0.5")))
self.assertEqual(F(5, 1000), F.from_decimal(Decimal("5e-3")))
self.assertEqual(F(5000), F.from_decimal(Decimal("5e3")))
self.assertEqual(1 - F(1, 10**30),
F.from_decimal(Decimal("0." + "9" * 30)))
# bug 16469: error types should be consistent with decimal -> int
self.assertRaisesMessage(
OverflowError, "Cannot convert Infinity to Fraction.",
F.from_decimal, Decimal("inf"))
self.assertRaisesMessage(
OverflowError, "Cannot convert -Infinity to Fraction.",
F.from_decimal, Decimal("-inf"))
self.assertRaisesMessage(
ValueError, "Cannot convert NaN to Fraction.",
F.from_decimal, Decimal("nan"))
self.assertRaisesMessage(
ValueError, "Cannot convert sNaN to Fraction.",
F.from_decimal, Decimal("snan"))
def testLimitDenominator(self):
rpi = F('3.1415926535897932')
self.assertEqual(rpi.limit_denominator(10000), F(355, 113))
self.assertEqual(-rpi.limit_denominator(10000), F(-355, 113))
self.assertEqual(rpi.limit_denominator(113), F(355, 113))
self.assertEqual(rpi.limit_denominator(112), F(333, 106))
self.assertEqual(F(201, 200).limit_denominator(100), F(1))
self.assertEqual(F(201, 200).limit_denominator(101), F(102, 101))
self.assertEqual(F(0).limit_denominator(10000), F(0))
for i in (0, -1):
self.assertRaisesMessage(
ValueError, "max_denominator should be at least 1",
F(1).limit_denominator, i)
def testConversions(self):
self.assertTypedEquals(-1, math.trunc(F(-11, 10)))
self.assertTypedEquals(1, math.trunc(F(11, 10)))
self.assertTypedEquals(-2, math.floor(F(-11, 10)))
self.assertTypedEquals(-1, math.ceil(F(-11, 10)))
self.assertTypedEquals(-1, math.ceil(F(-10, 10)))
self.assertTypedEquals(-1, int(F(-11, 10)))
self.assertTypedEquals(0, round(F(-1, 10)))
self.assertTypedEquals(0, round(F(-5, 10)))
self.assertTypedEquals(-2, round(F(-15, 10)))
self.assertTypedEquals(-1, round(F(-7, 10)))
self.assertEqual(False, bool(F(0, 1)))
self.assertEqual(True, bool(F(3, 2)))
self.assertTypedEquals(0.1, float(F(1, 10)))
# Check that __float__ isn't implemented by converting the
# numerator and denominator to float before dividing.
self.assertRaises(OverflowError, float, int('2'*400+'7'))
self.assertAlmostEqual(2.0/3,
float(F(int('2'*400+'7'), int('3'*400+'1'))))
self.assertTypedEquals(0.1+0j, complex(F(1,10)))
def testRound(self):
self.assertTypedEquals(F(-200), round(F(-150), -2))
self.assertTypedEquals(F(-200), round(F(-250), -2))
self.assertTypedEquals(F(30), round(F(26), -1))
self.assertTypedEquals(F(-2, 10), round(F(-15, 100), 1))
self.assertTypedEquals(F(-2, 10), round(F(-25, 100), 1))
def testArithmetic(self):
self.assertEqual(F(1, 2), F(1, 10) + F(2, 5))
self.assertEqual(F(-3, 10), F(1, 10) - F(2, 5))
self.assertEqual(F(1, 25), F(1, 10) * F(2, 5))
self.assertEqual(F(1, 4), F(1, 10) / F(2, 5))
self.assertTypedEquals(2, F(9, 10) // F(2, 5))
self.assertTypedEquals(10**23, F(10**23, 1) // F(1))
self.assertEqual(F(2, 3), F(-7, 3) % F(3, 2))
self.assertEqual(F(8, 27), F(2, 3) ** F(3))
self.assertEqual(F(27, 8), F(2, 3) ** F(-3))
self.assertTypedEquals(2.0, F(4) ** F(1, 2))
self.assertEqual(F(1, 1), +F(1, 1))
z = pow(F(-1), F(1, 2))
self.assertAlmostEqual(z.real, 0)
self.assertEqual(z.imag, 1)
# Regression test for #27539.
p = F(-1, 2) ** 0
self.assertEqual(p, F(1, 1))
self.assertEqual(p.numerator, 1)
self.assertEqual(p.denominator, 1)
p = F(-1, 2) ** -1
self.assertEqual(p, F(-2, 1))
self.assertEqual(p.numerator, -2)
self.assertEqual(p.denominator, 1)
p = F(-1, 2) ** -2
self.assertEqual(p, F(4, 1))
self.assertEqual(p.numerator, 4)
self.assertEqual(p.denominator, 1)
def testMixedArithmetic(self):
self.assertTypedEquals(F(11, 10), F(1, 10) + 1)
self.assertTypedEquals(1.1, F(1, 10) + 1.0)
self.assertTypedEquals(1.1 + 0j, F(1, 10) + (1.0 + 0j))
self.assertTypedEquals(F(11, 10), 1 + F(1, 10))
self.assertTypedEquals(1.1, 1.0 + F(1, 10))
self.assertTypedEquals(1.1 + 0j, (1.0 + 0j) + F(1, 10))
self.assertTypedEquals(F(-9, 10), F(1, 10) - 1)
self.assertTypedEquals(-0.9, F(1, 10) - 1.0)
self.assertTypedEquals(-0.9 + 0j, F(1, 10) - (1.0 + 0j))
self.assertTypedEquals(F(9, 10), 1 - F(1, 10))
self.assertTypedEquals(0.9, 1.0 - F(1, 10))
self.assertTypedEquals(0.9 + 0j, (1.0 + 0j) - F(1, 10))
self.assertTypedEquals(F(1, 10), F(1, 10) * 1)
self.assertTypedEquals(0.1, F(1, 10) * 1.0)
self.assertTypedEquals(0.1 + 0j, F(1, 10) * (1.0 + 0j))
self.assertTypedEquals(F(1, 10), 1 * F(1, 10))
self.assertTypedEquals(0.1, 1.0 * F(1, 10))
self.assertTypedEquals(0.1 + 0j, (1.0 + 0j) * F(1, 10))
self.assertTypedEquals(F(1, 10), F(1, 10) / 1)
self.assertTypedEquals(0.1, F(1, 10) / 1.0)
self.assertTypedEquals(0.1 + 0j, F(1, 10) / (1.0 + 0j))
self.assertTypedEquals(F(10, 1), 1 / F(1, 10))
self.assertTypedEquals(10.0, 1.0 / F(1, 10))
self.assertTypedEquals(10.0 + 0j, (1.0 + 0j) / F(1, 10))
self.assertTypedEquals(0, F(1, 10) // 1)
self.assertTypedEquals(0, F(1, 10) // 1.0)
self.assertTypedEquals(10, 1 // F(1, 10))
self.assertTypedEquals(10**23, 10**22 // F(1, 10))
self.assertTypedEquals(10, 1.0 // F(1, 10))
self.assertTypedEquals(F(1, 10), F(1, 10) % 1)
self.assertTypedEquals(0.1, F(1, 10) % 1.0)
self.assertTypedEquals(F(0, 1), 1 % F(1, 10))
self.assertTypedEquals(0.0, 1.0 % F(1, 10))
# No need for divmod since we don't override it.
# ** has more interesting conversion rules.
self.assertTypedEquals(F(100, 1), F(1, 10) ** -2)
self.assertTypedEquals(F(100, 1), F(10, 1) ** 2)
self.assertTypedEquals(0.1, F(1, 10) ** 1.0)
self.assertTypedEquals(0.1 + 0j, F(1, 10) ** (1.0 + 0j))
self.assertTypedEquals(4 , 2 ** F(2, 1))
z = pow(-1, F(1, 2))
self.assertAlmostEqual(0, z.real)
self.assertEqual(1, z.imag)
self.assertTypedEquals(F(1, 4) , 2 ** F(-2, 1))
self.assertTypedEquals(2.0 , 4 ** F(1, 2))
self.assertTypedEquals(0.25, 2.0 ** F(-2, 1))
self.assertTypedEquals(1.0 + 0j, (1.0 + 0j) ** F(1, 10))
self.assertRaises(ZeroDivisionError, operator.pow,
F(0, 1), -2)
def testMixingWithDecimal(self):
# Decimal refuses mixed arithmetic (but not mixed comparisons)
self.assertRaises(TypeError, operator.add,
F(3,11), Decimal('3.1415926'))
self.assertRaises(TypeError, operator.add,
Decimal('3.1415926'), F(3,11))
def testComparisons(self):
self.assertTrue(F(1, 2) < F(2, 3))
self.assertFalse(F(1, 2) < F(1, 2))
self.assertTrue(F(1, 2) <= F(2, 3))
self.assertTrue(F(1, 2) <= F(1, 2))
self.assertFalse(F(2, 3) <= F(1, 2))
self.assertTrue(F(1, 2) == F(1, 2))
self.assertFalse(F(1, 2) == F(1, 3))
self.assertFalse(F(1, 2) != F(1, 2))
self.assertTrue(F(1, 2) != F(1, 3))
def testComparisonsDummyRational(self):
self.assertTrue(F(1, 2) == DummyRational(1, 2))
self.assertTrue(DummyRational(1, 2) == F(1, 2))
self.assertFalse(F(1, 2) == DummyRational(3, 4))
self.assertFalse(DummyRational(3, 4) == F(1, 2))
self.assertTrue(F(1, 2) < DummyRational(3, 4))
self.assertFalse(F(1, 2) < DummyRational(1, 2))
self.assertFalse(F(1, 2) < DummyRational(1, 7))
self.assertFalse(F(1, 2) > DummyRational(3, 4))
self.assertFalse(F(1, 2) > DummyRational(1, 2))
self.assertTrue(F(1, 2) > DummyRational(1, 7))
self.assertTrue(F(1, 2) <= DummyRational(3, 4))
self.assertTrue(F(1, 2) <= DummyRational(1, 2))
self.assertFalse(F(1, 2) <= DummyRational(1, 7))
self.assertFalse(F(1, 2) >= DummyRational(3, 4))
self.assertTrue(F(1, 2) >= DummyRational(1, 2))
self.assertTrue(F(1, 2) >= DummyRational(1, 7))
self.assertTrue(DummyRational(1, 2) < F(3, 4))
self.assertFalse(DummyRational(1, 2) < F(1, 2))
self.assertFalse(DummyRational(1, 2) < F(1, 7))
self.assertFalse(DummyRational(1, 2) > F(3, 4))
self.assertFalse(DummyRational(1, 2) > F(1, 2))
self.assertTrue(DummyRational(1, 2) > F(1, 7))
self.assertTrue(DummyRational(1, 2) <= F(3, 4))
self.assertTrue(DummyRational(1, 2) <= F(1, 2))
self.assertFalse(DummyRational(1, 2) <= F(1, 7))
self.assertFalse(DummyRational(1, 2) >= F(3, 4))
self.assertTrue(DummyRational(1, 2) >= F(1, 2))
self.assertTrue(DummyRational(1, 2) >= F(1, 7))
def testComparisonsDummyFloat(self):
x = DummyFloat(1./3.)
y = F(1, 3)
self.assertTrue(x != y)
self.assertTrue(x < y or x > y)
self.assertFalse(x == y)
self.assertFalse(x <= y and x >= y)
self.assertTrue(y != x)
self.assertTrue(y < x or y > x)
self.assertFalse(y == x)
self.assertFalse(y <= x and y >= x)
def testMixedLess(self):
self.assertTrue(2 < F(5, 2))
self.assertFalse(2 < F(4, 2))
self.assertTrue(F(5, 2) < 3)
self.assertFalse(F(4, 2) < 2)
self.assertTrue(F(1, 2) < 0.6)
self.assertFalse(F(1, 2) < 0.4)
self.assertTrue(0.4 < F(1, 2))
self.assertFalse(0.5 < F(1, 2))
self.assertFalse(float('inf') < F(1, 2))
self.assertTrue(float('-inf') < F(0, 10))
self.assertFalse(float('nan') < F(-3, 7))
self.assertTrue(F(1, 2) < float('inf'))
self.assertFalse(F(17, 12) < float('-inf'))
self.assertFalse(F(144, -89) < float('nan'))
def testMixedLessEqual(self):
self.assertTrue(0.5 <= F(1, 2))
self.assertFalse(0.6 <= F(1, 2))
self.assertTrue(F(1, 2) <= 0.5)
self.assertFalse(F(1, 2) <= 0.4)
self.assertTrue(2 <= F(4, 2))
self.assertFalse(2 <= F(3, 2))
self.assertTrue(F(4, 2) <= 2)
self.assertFalse(F(5, 2) <= 2)
self.assertFalse(float('inf') <= F(1, 2))
self.assertTrue(float('-inf') <= F(0, 10))
self.assertFalse(float('nan') <= F(-3, 7))
self.assertTrue(F(1, 2) <= float('inf'))
self.assertFalse(F(17, 12) <= float('-inf'))
self.assertFalse(F(144, -89) <= float('nan'))
def testBigFloatComparisons(self):
# Because 10**23 can't be represented exactly as a float:
self.assertFalse(F(10**23) == float(10**23))
# The first test demonstrates why these are important.
self.assertFalse(1e23 < float(F(math.trunc(1e23) + 1)))
self.assertTrue(1e23 < F(math.trunc(1e23) + 1))
self.assertFalse(1e23 <= F(math.trunc(1e23) - 1))
self.assertTrue(1e23 > F(math.trunc(1e23) - 1))
self.assertFalse(1e23 >= F(math.trunc(1e23) + 1))
def testBigComplexComparisons(self):
self.assertFalse(F(10**23) == complex(10**23))
self.assertRaises(TypeError, operator.gt, F(10**23), complex(10**23))
self.assertRaises(TypeError, operator.le, F(10**23), complex(10**23))
x = F(3, 8)
z = complex(0.375, 0.0)
w = complex(0.375, 0.2)
self.assertTrue(x == z)
self.assertFalse(x != z)
self.assertFalse(x == w)
self.assertTrue(x != w)
for op in operator.lt, operator.le, operator.gt, operator.ge:
self.assertRaises(TypeError, op, x, z)
self.assertRaises(TypeError, op, z, x)
self.assertRaises(TypeError, op, x, w)
self.assertRaises(TypeError, op, w, x)
def testMixedEqual(self):
self.assertTrue(0.5 == F(1, 2))
self.assertFalse(0.6 == F(1, 2))
self.assertTrue(F(1, 2) == 0.5)
self.assertFalse(F(1, 2) == 0.4)
self.assertTrue(2 == F(4, 2))
self.assertFalse(2 == F(3, 2))
self.assertTrue(F(4, 2) == 2)
self.assertFalse(F(5, 2) == 2)
self.assertFalse(F(5, 2) == float('nan'))
self.assertFalse(float('nan') == F(3, 7))
self.assertFalse(F(5, 2) == float('inf'))
self.assertFalse(float('-inf') == F(2, 5))
def testStringification(self):
self.assertEqual("Fraction(7, 3)", repr(F(7, 3)))
self.assertEqual("Fraction(6283185307, 2000000000)",
repr(F('3.1415926535')))
self.assertEqual("Fraction(-1, 100000000000000000000)",
repr(F(1, -10**20)))
self.assertEqual("7/3", str(F(7, 3)))
self.assertEqual("7", str(F(7, 1)))
def testHash(self):
hmod = sys.hash_info.modulus
hinf = sys.hash_info.inf
self.assertEqual(hash(2.5), hash(F(5, 2)))
self.assertEqual(hash(10**50), hash(F(10**50)))
self.assertNotEqual(hash(float(10**23)), hash(F(10**23)))
self.assertEqual(hinf, hash(F(1, hmod)))
# Check that __hash__ produces the same value as hash(), for
# consistency with int and Decimal. (See issue #10356.)
self.assertEqual(hash(F(-1)), F(-1).__hash__())
def testApproximatePi(self):
# Algorithm borrowed from
# http://docs.python.org/lib/decimal-recipes.html
three = F(3)
lasts, t, s, n, na, d, da = 0, three, 3, 1, 0, 0, 24
while abs(s - lasts) > F(1, 10**9):
lasts = s
n, na = n+na, na+8
d, da = d+da, da+32
t = (t * n) / d
s += t
self.assertAlmostEqual(math.pi, s)
def testApproximateCos1(self):
# Algorithm borrowed from
# http://docs.python.org/lib/decimal-recipes.html
x = F(1)
i, lasts, s, fact, num, sign = 0, 0, F(1), 1, 1, 1
while abs(s - lasts) > F(1, 10**9):
lasts = s
i += 2
fact *= i * (i-1)
num *= x * x
sign *= -1
s += num / fact * sign
self.assertAlmostEqual(math.cos(1), s)
def test_copy_deepcopy_pickle(self):
r = F(13, 7)
dr = DummyFraction(13, 7)
self.assertEqual(r, loads(dumps(r)))
self.assertEqual(id(r), id(copy(r)))
self.assertEqual(id(r), id(deepcopy(r)))
self.assertNotEqual(id(dr), id(copy(dr)))
self.assertNotEqual(id(dr), id(deepcopy(dr)))
self.assertTypedEquals(dr, copy(dr))
self.assertTypedEquals(dr, deepcopy(dr))
def test_slots(self):
# Issue 4998
r = F(13, 7)
self.assertRaises(AttributeError, setattr, r, 'a', 10)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -7,246,032,613,650,884,000 | 40.736593 | 78 | 0.563962 | false |
poornimakshirsagar/sos | sos/plugins/cobbler.py | 12 | 1483 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class Cobbler(Plugin):
plugin_name = "cobbler"
class RedHatCobbler(Cobbler, RedHatPlugin):
"""Cobbler installation server
"""
packages = ('cobbler',)
profiles = ('cluster', 'sysmgmt')
def setup(self):
self.add_copy_spec([
"/etc/cobbler",
"/var/log/cobbler",
"/var/lib/rhn/kickstarts",
"/var/lib/cobbler"
])
class DebianCobbler(Cobbler, DebianPlugin, UbuntuPlugin):
packages = ('cobbler',)
def setup(self):
self.add_copy_spec([
"/etc/cobbler",
"/var/log/cobbler",
"/var/lib/cobbler"
])
self.add_forbidden_path("/var/lib/cobbler/isos")
# vim: set et ts=4 sw=4 :
| gpl-2.0 | 1,689,091,794,866,927,600 | 28.66 | 72 | 0.662171 | false |
saquiba2/numpy2 | numpy/distutils/command/build_ext.py | 149 | 22493 | """ Modified version of build_ext that handles fortran source files.
"""
from __future__ import division, absolute_import, print_function
import os
import sys
from glob import glob
from distutils.dep_util import newer_group
from distutils.command.build_ext import build_ext as old_build_ext
from distutils.errors import DistutilsFileError, DistutilsSetupError,\
DistutilsError
from distutils.file_util import copy_file
from numpy.distutils import log
from numpy.distutils.exec_command import exec_command
from numpy.distutils.system_info import combine_paths
from numpy.distutils.misc_util import filter_sources, has_f_sources, \
has_cxx_sources, get_ext_source_files, \
get_numpy_include_dirs, is_sequence, get_build_architecture, \
msvc_version
from numpy.distutils.command.config_compiler import show_fortran_compilers
try:
set
except NameError:
from sets import Set as set
class build_ext (old_build_ext):
description = "build C/C++/F extensions (compile/link to build directory)"
user_options = old_build_ext.user_options + [
('fcompiler=', None,
"specify the Fortran compiler type"),
('parallel=', 'j',
"number of parallel jobs"),
]
help_options = old_build_ext.help_options + [
('help-fcompiler', None, "list available Fortran compilers",
show_fortran_compilers),
]
def initialize_options(self):
old_build_ext.initialize_options(self)
self.fcompiler = None
self.parallel = None
def finalize_options(self):
if self.parallel:
try:
self.parallel = int(self.parallel)
except ValueError:
raise ValueError("--parallel/-j argument must be an integer")
# Ensure that self.include_dirs and self.distribution.include_dirs
# refer to the same list object. finalize_options will modify
# self.include_dirs, but self.distribution.include_dirs is used
# during the actual build.
# self.include_dirs is None unless paths are specified with
# --include-dirs.
# The include paths will be passed to the compiler in the order:
# numpy paths, --include-dirs paths, Python include path.
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
incl_dirs = self.include_dirs or []
if self.distribution.include_dirs is None:
self.distribution.include_dirs = []
self.include_dirs = self.distribution.include_dirs
self.include_dirs.extend(incl_dirs)
old_build_ext.finalize_options(self)
self.set_undefined_options('build', ('parallel', 'parallel'))
def run(self):
if not self.extensions:
return
# Make sure that extension sources are complete.
self.run_command('build_src')
if self.distribution.has_c_libraries():
if self.inplace:
if self.distribution.have_run.get('build_clib'):
log.warn('build_clib already run, it is too late to ' \
'ensure in-place build of build_clib')
build_clib = self.distribution.get_command_obj('build_clib')
else:
build_clib = self.distribution.get_command_obj('build_clib')
build_clib.inplace = 1
build_clib.ensure_finalized()
build_clib.run()
self.distribution.have_run['build_clib'] = 1
else:
self.run_command('build_clib')
build_clib = self.get_finalized_command('build_clib')
self.library_dirs.append(build_clib.build_clib)
else:
build_clib = None
# Not including C libraries to the list of
# extension libraries automatically to prevent
# bogus linking commands. Extensions must
# explicitly specify the C libraries that they use.
from distutils.ccompiler import new_compiler
from numpy.distutils.fcompiler import new_fcompiler
compiler_type = self.compiler
# Initialize C compiler:
self.compiler = new_compiler(compiler=compiler_type,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
self.compiler.customize(self.distribution)
self.compiler.customize_cmd(self)
self.compiler.show_customization()
# Create mapping of libraries built by build_clib:
clibs = {}
if build_clib is not None:
for libname, build_info in build_clib.libraries or []:
if libname in clibs and clibs[libname] != build_info:
log.warn('library %r defined more than once,'\
' overwriting build_info\n%s... \nwith\n%s...' \
% (libname, repr(clibs[libname])[:300], repr(build_info)[:300]))
clibs[libname] = build_info
# .. and distribution libraries:
for libname, build_info in self.distribution.libraries or []:
if libname in clibs:
# build_clib libraries have a precedence before distribution ones
continue
clibs[libname] = build_info
# Determine if C++/Fortran 77/Fortran 90 compilers are needed.
# Update extension libraries, library_dirs, and macros.
all_languages = set()
for ext in self.extensions:
ext_languages = set()
c_libs = []
c_lib_dirs = []
macros = []
for libname in ext.libraries:
if libname in clibs:
binfo = clibs[libname]
c_libs += binfo.get('libraries', [])
c_lib_dirs += binfo.get('library_dirs', [])
for m in binfo.get('macros', []):
if m not in macros:
macros.append(m)
for l in clibs.get(libname, {}).get('source_languages', []):
ext_languages.add(l)
if c_libs:
new_c_libs = ext.libraries + c_libs
log.info('updating extension %r libraries from %r to %r'
% (ext.name, ext.libraries, new_c_libs))
ext.libraries = new_c_libs
ext.library_dirs = ext.library_dirs + c_lib_dirs
if macros:
log.info('extending extension %r defined_macros with %r'
% (ext.name, macros))
ext.define_macros = ext.define_macros + macros
# determine extension languages
if has_f_sources(ext.sources):
ext_languages.add('f77')
if has_cxx_sources(ext.sources):
ext_languages.add('c++')
l = ext.language or self.compiler.detect_language(ext.sources)
if l:
ext_languages.add(l)
# reset language attribute for choosing proper linker
if 'c++' in ext_languages:
ext_language = 'c++'
elif 'f90' in ext_languages:
ext_language = 'f90'
elif 'f77' in ext_languages:
ext_language = 'f77'
else:
ext_language = 'c' # default
if l and l != ext_language and ext.language:
log.warn('resetting extension %r language from %r to %r.' %
(ext.name, l, ext_language))
ext.language = ext_language
# global language
all_languages.update(ext_languages)
need_f90_compiler = 'f90' in all_languages
need_f77_compiler = 'f77' in all_languages
need_cxx_compiler = 'c++' in all_languages
# Initialize C++ compiler:
if need_cxx_compiler:
self._cxx_compiler = new_compiler(compiler=compiler_type,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
compiler = self._cxx_compiler
compiler.customize(self.distribution, need_cxx=need_cxx_compiler)
compiler.customize_cmd(self)
compiler.show_customization()
self._cxx_compiler = compiler.cxx_compiler()
else:
self._cxx_compiler = None
# Initialize Fortran 77 compiler:
if need_f77_compiler:
ctype = self.fcompiler
self._f77_compiler = new_fcompiler(compiler=self.fcompiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force,
requiref90=False,
c_compiler=self.compiler)
fcompiler = self._f77_compiler
if fcompiler:
ctype = fcompiler.compiler_type
fcompiler.customize(self.distribution)
if fcompiler and fcompiler.get_version():
fcompiler.customize_cmd(self)
fcompiler.show_customization()
else:
self.warn('f77_compiler=%s is not available.' %
(ctype))
self._f77_compiler = None
else:
self._f77_compiler = None
# Initialize Fortran 90 compiler:
if need_f90_compiler:
ctype = self.fcompiler
self._f90_compiler = new_fcompiler(compiler=self.fcompiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force,
requiref90=True,
c_compiler = self.compiler)
fcompiler = self._f90_compiler
if fcompiler:
ctype = fcompiler.compiler_type
fcompiler.customize(self.distribution)
if fcompiler and fcompiler.get_version():
fcompiler.customize_cmd(self)
fcompiler.show_customization()
else:
self.warn('f90_compiler=%s is not available.' %
(ctype))
self._f90_compiler = None
else:
self._f90_compiler = None
# Build extensions
self.build_extensions()
def swig_sources(self, sources):
# Do nothing. Swig sources have beed handled in build_src command.
return sources
def build_extension(self, ext):
sources = ext.sources
if sources is None or not is_sequence(sources):
raise DistutilsSetupError(
("in 'ext_modules' option (extension '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % ext.name)
sources = list(sources)
if not sources:
return
fullname = self.get_ext_fullname(ext.name)
if self.inplace:
modpath = fullname.split('.')
package = '.'.join(modpath[0:-1])
base = modpath[-1]
build_py = self.get_finalized_command('build_py')
package_dir = build_py.get_package_dir(package)
ext_filename = os.path.join(package_dir,
self.get_ext_filename(base))
else:
ext_filename = os.path.join(self.build_lib,
self.get_ext_filename(fullname))
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_filename, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
c_sources, cxx_sources, f_sources, fmodule_sources = \
filter_sources(ext.sources)
if self.compiler.compiler_type=='msvc':
if cxx_sources:
# Needed to compile kiva.agg._agg extension.
extra_args.append('/Zm1000')
# this hack works around the msvc compiler attributes
# problem, msvc uses its own convention :(
c_sources += cxx_sources
cxx_sources = []
# Set Fortran/C++ compilers for compilation and linking.
if ext.language=='f90':
fcompiler = self._f90_compiler
elif ext.language=='f77':
fcompiler = self._f77_compiler
else: # in case ext.language is c++, for instance
fcompiler = self._f90_compiler or self._f77_compiler
if fcompiler is not None:
fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr(ext, 'extra_f77_compile_args') else []
fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr(ext, 'extra_f90_compile_args') else []
cxx_compiler = self._cxx_compiler
# check for the availability of required compilers
if cxx_sources and cxx_compiler is None:
raise DistutilsError("extension %r has C++ sources" \
"but no C++ compiler found" % (ext.name))
if (f_sources or fmodule_sources) and fcompiler is None:
raise DistutilsError("extension %r has Fortran sources " \
"but no Fortran compiler found" % (ext.name))
if ext.language in ['f77', 'f90'] and fcompiler is None:
self.warn("extension %r has Fortran libraries " \
"but no Fortran linker found, using default linker" % (ext.name))
if ext.language=='c++' and cxx_compiler is None:
self.warn("extension %r has C++ libraries " \
"but no C++ linker found, using default linker" % (ext.name))
kws = {'depends':ext.depends}
output_dir = self.build_temp
include_dirs = ext.include_dirs + get_numpy_include_dirs()
c_objects = []
if c_sources:
log.info("compiling C sources")
c_objects = self.compiler.compile(c_sources,
output_dir=output_dir,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_args,
**kws)
if cxx_sources:
log.info("compiling C++ sources")
c_objects += cxx_compiler.compile(cxx_sources,
output_dir=output_dir,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_args,
**kws)
extra_postargs = []
f_objects = []
if fmodule_sources:
log.info("compiling Fortran 90 module sources")
module_dirs = ext.module_dirs[:]
module_build_dir = os.path.join(
self.build_temp, os.path.dirname(
self.get_ext_filename(fullname)))
self.mkpath(module_build_dir)
if fcompiler.module_dir_switch is None:
existing_modules = glob('*.mod')
extra_postargs += fcompiler.module_options(
module_dirs, module_build_dir)
f_objects += fcompiler.compile(fmodule_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs,
depends=ext.depends)
if fcompiler.module_dir_switch is None:
for f in glob('*.mod'):
if f in existing_modules:
continue
t = os.path.join(module_build_dir, f)
if os.path.abspath(f)==os.path.abspath(t):
continue
if os.path.isfile(t):
os.remove(t)
try:
self.move_file(f, module_build_dir)
except DistutilsFileError:
log.warn('failed to move %r to %r' %
(f, module_build_dir))
if f_sources:
log.info("compiling Fortran sources")
f_objects += fcompiler.compile(f_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs,
depends=ext.depends)
objects = c_objects + f_objects
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
libraries = self.get_libraries(ext)[:]
library_dirs = ext.library_dirs[:]
linker = self.compiler.link_shared_object
# Always use system linker when using MSVC compiler.
if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'):
# expand libraries with fcompiler libraries as we are
# not using fcompiler linker
self._libs_with_msvc_and_fortran(fcompiler, libraries, library_dirs)
elif ext.language in ['f77', 'f90'] and fcompiler is not None:
linker = fcompiler.link_shared_object
if ext.language=='c++' and cxx_compiler is not None:
linker = cxx_compiler.link_shared_object
linker(objects, ext_filename,
libraries=libraries,
library_dirs=library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=ext.language)
def _add_dummy_mingwex_sym(self, c_sources):
build_src = self.get_finalized_command("build_src").build_src
build_clib = self.get_finalized_command("build_clib").build_clib
objects = self.compiler.compile([os.path.join(build_src,
"gfortran_vs2003_hack.c")],
output_dir=self.build_temp)
self.compiler.create_static_lib(objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug)
def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries,
c_library_dirs):
if fcompiler is None: return
for libname in c_libraries:
if libname.startswith('msvc'): continue
fileexists = False
for libdir in c_library_dirs or []:
libfile = os.path.join(libdir, '%s.lib' % (libname))
if os.path.isfile(libfile):
fileexists = True
break
if fileexists: continue
# make g77-compiled static libs available to MSVC
fileexists = False
for libdir in c_library_dirs:
libfile = os.path.join(libdir, 'lib%s.a' % (libname))
if os.path.isfile(libfile):
# copy libname.a file to name.lib so that MSVC linker
# can find it
libfile2 = os.path.join(self.build_temp, libname + '.lib')
copy_file(libfile, libfile2)
if self.build_temp not in c_library_dirs:
c_library_dirs.append(self.build_temp)
fileexists = True
break
if fileexists: continue
log.warn('could not find library %r in directories %s'
% (libname, c_library_dirs))
# Always use system linker when using MSVC compiler.
f_lib_dirs = []
for dir in fcompiler.library_dirs:
# correct path when compiling in Cygwin but with normal Win
# Python
if dir.startswith('/usr/lib'):
s, o = exec_command(['cygpath', '-w', dir], use_tee=False)
if not s:
dir = o
f_lib_dirs.append(dir)
c_library_dirs.extend(f_lib_dirs)
# make g77-compiled static libs available to MSVC
for lib in fcompiler.libraries:
if not lib.startswith('msvc'):
c_libraries.append(lib)
p = combine_paths(f_lib_dirs, 'lib' + lib + '.a')
if p:
dst_name = os.path.join(self.build_temp, lib + '.lib')
if not os.path.isfile(dst_name):
copy_file(p[0], dst_name)
if self.build_temp not in c_library_dirs:
c_library_dirs.append(self.build_temp)
def get_source_files (self):
self.check_extensions_list(self.extensions)
filenames = []
for ext in self.extensions:
filenames.extend(get_ext_source_files(ext))
return filenames
def get_outputs (self):
self.check_extensions_list(self.extensions)
outputs = []
for ext in self.extensions:
if not ext.sources:
continue
fullname = self.get_ext_fullname(ext.name)
outputs.append(os.path.join(self.build_lib,
self.get_ext_filename(fullname)))
return outputs
| bsd-3-clause | -5,282,162,224,618,680,000 | 42.090038 | 131 | 0.522207 | false |
snphbaum/scikit-gpuppy | skgpuppy/MLE.py | 1 | 5756 | # Copyright (C) 2015 Philipp Baumgaertel
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE.txt file for details.
import numpy as np
from scipy.optimize import fmin
from scipy.misc import derivative
from scipy.stats import norm
from .Utilities import integrate
from numpy import Inf
from math import fabs, log
class MLE(object):
def __init__(self, density, theta0, support= None, dims = None, fisher_matrix=None):
"""
A class for numerical maximum likelihood estimation
:param density: lambda x,theta with x and theta being vectors
:param theta0: the initial parameters of the density
:param dims: Number of dimensions of x
:param support: The support of the density
:param fisher_matrix: Fisher Information Matrix for the density (containts functions of theta)
.. note:: Either support or dims has to be supplied (support is recommended for estimating the fisher information)
"""
assert(dims is not None or support is not None)
self.theta0 = theta0
self.fisher_min = None
if support is not None:
self.support = support #TODO: Support should be functions of theta
else:
self.support = [(-Inf,Inf) for i in range(dims)]
self.density = density
self.fisher_matrix = fisher_matrix
def _get_nll_func(self, observations):
"""
negative loglikelihood
:return: the negative log likelihood function
"""
def nll_func(theta):
for p in theta:
if p <= 0:
return 1.0e+20
sum = 0.0
for x in observations:
sum -= np.log(self.density(x, theta))
return sum
return nll_func
def mle(self, observations):
"""
:param observations: vector of x vectors
:return: theta (estimated using maximum likelihood estimation)
"""
theta_start = self.theta0
func = self._get_nll_func(observations)
theta_min = fmin(func, theta_start)#,xtol=1e-6,ftol=1e-6)
return theta_min
def get_fisher_function(self,order=1):
"""
Calculate the fisher information matrix
:param order: using derivates of this order (1 or 2)
:return: function (w.r.t. theta) calculating the fisher information matrix
.. note:: If the fisher information matrix was provided to the constructor, then this is used instead of the numerical methods.
"""
assert(order == 1 or order == 2)
def fisher_matrix_function(theta,i,j = None):
if j is None:
j = i
return self.fisher_matrix[i][j](theta)
def fisher(theta, i, j = None):
"""
Fisher information using the first order derivative
:param theta: the theta of the density
:param i: The ith component of the diagonal of the fisher information matrix will be returned (if j is None)
:param j: The i,j th component of the fisher information matrix will be returned
"""
#Bring it in a form that we can derive
fh = lambda ti, t0, tn, x: np.log(self.density(x, list(t0) + [ti] + list(tn)))
# The derivative
f_d_theta_i = lambda x: derivative(fh, theta[i], dx=1e-5, n=1, args=(theta[0:i], theta[i + 1:], x))
if j is not None:
f_d_theta_j = lambda x: derivative(fh, theta[j], dx=1e-5, n=1, args=(theta[0:j], theta[j + 1:], x))
f = lambda x: np.float128(0) if fabs(self.density(x, theta)) < 1e-5 else f_d_theta_i(x) * f_d_theta_j(x) * self.density(x, theta)
else:
# The function to integrate
f = lambda x: np.float128(0) if fabs(self.density(x, theta)) < 1e-5 else f_d_theta_i(x) ** 2 * self.density(x, theta)
#First order
result = integrate(f, self.support)
return result
def fisher_2nd(theta,i, j = None):
"""
Fisher information using the second order derivative
:param theta: the theta of the density
:param i: The ith component of the diagonal of the fisher information matrix will be returned (if j is None)
:param j: The i,j th component of the fisher information matrix will be returned
"""
# The second order derivate version
fh = lambda ti, t0, tn, x: np.log(self.density(x, list(t0) + [ti] + list(tn)))
if j is not None:
raise NotImplementedError()
else:
f_dd_theta_i = lambda x : derivative(fh, theta[i], dx = 1e-5, n=2, args=(theta[0:i],theta[i+1:],x))
f2 = lambda x: 0 if fabs(self.density(x,theta)) < 1e-5 else f_dd_theta_i(x) * self.density(x,theta)
result = -integrate(f2,self.support)
return result
if self.fisher_matrix is not None:
return fisher_matrix_function
if order == 1:
return fisher
elif order == 2:
return fisher_2nd
def sigma(self, theta, observations=None, n=1):
"""
Estimate the quality of the MLE.
:param theta: The parameters theta of the density
:param observations: A list of observation vectors
:param n: Number of observations
:return: The variances corresponding to the maximum likelihood estimates of theta (quality of the estimation) as 1-d array (i.e. diagonal of the cov matrix)
.. note:: Either the observations vector or n have to be provided.
"""
l2d = []
if observations is not None:
n = 1
func = self._get_nll_func(observations)
for i in range(len(theta)):
#Bring it in a form that we can derive
f = lambda ti, t0, tn: func(list(t0) + [ti] + list(tn))
l2d.append(derivative(f, theta[i], dx=1e-5, n=2, args=(theta[0:i], theta[i + 1:])))
else:
#Fisher Information
for i in range(len(theta)):
fisher = self.get_fisher_function()
result = fisher(theta, i)
l2d.append(result)
return 1.0 / np.sqrt(np.array(l2d) * n)
def mle_ci(self, observations, alpha=0.05):
"""
95% CI (if alpha is not given)
:return: lower bound, upper bound
"""
theta = np.array(self.mle(observations))
sigma = self.sigma(theta, observations)
return theta - norm.ppf(1-alpha/2) * sigma, theta + norm.ppf(1-alpha/2) * sigma
| bsd-3-clause | -5,072,560,813,698,661,000 | 28.823834 | 158 | 0.680681 | false |
jusdng/odoo | openerp/addons/base/tests/test_acl.py | 338 | 6323 | import unittest2
from lxml import etree
import openerp
from openerp.tools.misc import mute_logger
from openerp.tests import common
# test group that demo user should not have
GROUP_TECHNICAL_FEATURES = 'base.group_no_one'
class TestACL(common.TransactionCase):
def setUp(self):
super(TestACL, self).setUp()
self.res_currency = self.registry('res.currency')
self.res_partner = self.registry('res.partner')
self.res_users = self.registry('res.users')
_, self.demo_uid = self.registry('ir.model.data').get_object_reference(self.cr, self.uid, 'base', 'user_demo')
self.tech_group = self.registry('ir.model.data').get_object(self.cr, self.uid,
*(GROUP_TECHNICAL_FEATURES.split('.')))
def _set_field_groups(self, model, field_name, groups):
field = model._fields[field_name]
column = model._columns[field_name]
old_groups = field.groups
old_prefetch = column._prefetch
field.groups = groups
column.groups = groups
column._prefetch = False
@self.addCleanup
def cleanup():
field.groups = old_groups
column.groups = old_groups
column._prefetch = old_prefetch
def test_field_visibility_restriction(self):
"""Check that model-level ``groups`` parameter effectively restricts access to that
field for users who do not belong to one of the explicitly allowed groups"""
# Verify the test environment first
original_fields = self.res_currency.fields_get(self.cr, self.demo_uid, [])
form_view = self.res_currency.fields_view_get(self.cr, self.demo_uid, False, 'form')
view_arch = etree.fromstring(form_view.get('arch'))
has_tech_feat = self.res_users.has_group(self.cr, self.demo_uid, GROUP_TECHNICAL_FEATURES)
self.assertFalse(has_tech_feat, "`demo` user should not belong to the restricted group before the test")
self.assertTrue('accuracy' in original_fields, "'accuracy' field must be properly visible before the test")
self.assertNotEquals(view_arch.xpath("//field[@name='accuracy']"), [],
"Field 'accuracy' must be found in view definition before the test")
# restrict access to the field and check it's gone
self._set_field_groups(self.res_currency, 'accuracy', GROUP_TECHNICAL_FEATURES)
fields = self.res_currency.fields_get(self.cr, self.demo_uid, [])
form_view = self.res_currency.fields_view_get(self.cr, self.demo_uid, False, 'form')
view_arch = etree.fromstring(form_view.get('arch'))
self.assertFalse('accuracy' in fields, "'accuracy' field should be gone")
self.assertEquals(view_arch.xpath("//field[@name='accuracy']"), [],
"Field 'accuracy' must not be found in view definition")
# Make demo user a member of the restricted group and check that the field is back
self.tech_group.write({'users': [(4, self.demo_uid)]})
has_tech_feat = self.res_users.has_group(self.cr, self.demo_uid, GROUP_TECHNICAL_FEATURES)
fields = self.res_currency.fields_get(self.cr, self.demo_uid, [])
form_view = self.res_currency.fields_view_get(self.cr, self.demo_uid, False, 'form')
view_arch = etree.fromstring(form_view.get('arch'))
#import pprint; pprint.pprint(fields); pprint.pprint(form_view)
self.assertTrue(has_tech_feat, "`demo` user should now belong to the restricted group")
self.assertTrue('accuracy' in fields, "'accuracy' field must be properly visible again")
self.assertNotEquals(view_arch.xpath("//field[@name='accuracy']"), [],
"Field 'accuracy' must be found in view definition again")
#cleanup
self.tech_group.write({'users': [(3, self.demo_uid)]})
@mute_logger('openerp.models')
def test_field_crud_restriction(self):
"Read/Write RPC access to restricted field should be forbidden"
# Verify the test environment first
has_tech_feat = self.res_users.has_group(self.cr, self.demo_uid, GROUP_TECHNICAL_FEATURES)
self.assertFalse(has_tech_feat, "`demo` user should not belong to the restricted group")
self.assert_(self.res_partner.read(self.cr, self.demo_uid, [1], ['bank_ids']))
self.assert_(self.res_partner.write(self.cr, self.demo_uid, [1], {'bank_ids': []}))
# Now restrict access to the field and check it's forbidden
self._set_field_groups(self.res_partner, 'bank_ids', GROUP_TECHNICAL_FEATURES)
with self.assertRaises(openerp.osv.orm.except_orm):
self.res_partner.read(self.cr, self.demo_uid, [1], ['bank_ids'])
with self.assertRaises(openerp.osv.orm.except_orm):
self.res_partner.write(self.cr, self.demo_uid, [1], {'bank_ids': []})
# Add the restricted group, and check that it works again
self.tech_group.write({'users': [(4, self.demo_uid)]})
has_tech_feat = self.res_users.has_group(self.cr, self.demo_uid, GROUP_TECHNICAL_FEATURES)
self.assertTrue(has_tech_feat, "`demo` user should now belong to the restricted group")
self.assert_(self.res_partner.read(self.cr, self.demo_uid, [1], ['bank_ids']))
self.assert_(self.res_partner.write(self.cr, self.demo_uid, [1], {'bank_ids': []}))
#cleanup
self.tech_group.write({'users': [(3, self.demo_uid)]})
@mute_logger('openerp.models')
def test_fields_browse_restriction(self):
"""Test access to records having restricted fields"""
self._set_field_groups(self.res_partner, 'email', GROUP_TECHNICAL_FEATURES)
pid = self.res_partner.search(self.cr, self.demo_uid, [], limit=1)[0]
part = self.res_partner.browse(self.cr, self.demo_uid, pid)
# accessing fields must no raise exceptions...
part.name
# ... except if they are restricted
with self.assertRaises(openerp.osv.orm.except_orm) as cm:
with mute_logger('openerp.models'):
part.email
self.assertEqual(cm.exception.args[0], 'AccessError')
if __name__ == '__main__':
unittest2.main()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,459,967,571,127,203,000 | 50.406504 | 118 | 0.642417 | false |
lushfuture/Phys-Comp | five-test/node_modules/johnny-five/junk/firmata-latest/node_modules/serialport/node_modules/node-gyp/legacy/tools/gyp/pylib/gyp/xcodeproj_file.py | 42 | 116836 | # Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode project file generator.
This module is both an Xcode project file generator and a documentation of the
Xcode project file format. Knowledge of the project file format was gained
based on extensive experience with Xcode, and by making changes to projects in
Xcode.app and observing the resultant changes in the associated project files.
XCODE PROJECT FILES
The generator targets the file format as written by Xcode 3.2 (specifically,
3.2.6), but past experience has taught that the format has not changed
significantly in the past several years, and future versions of Xcode are able
to read older project files.
Xcode project files are "bundled": the project "file" from an end-user's
perspective is actually a directory with an ".xcodeproj" extension. The
project file from this module's perspective is actually a file inside this
directory, always named "project.pbxproj". This file contains a complete
description of the project and is all that is needed to use the xcodeproj.
Other files contained in the xcodeproj directory are simply used to store
per-user settings, such as the state of various UI elements in the Xcode
application.
The project.pbxproj file is a property list, stored in a format almost
identical to the NeXTstep property list format. The file is able to carry
Unicode data, and is encoded in UTF-8. The root element in the property list
is a dictionary that contains several properties of minimal interest, and two
properties of immense interest. The most important property is a dictionary
named "objects". The entire structure of the project is represented by the
children of this property. The objects dictionary is keyed by unique 96-bit
values represented by 24 uppercase hexadecimal characters. Each value in the
objects dictionary is itself a dictionary, describing an individual object.
Each object in the dictionary is a member of a class, which is identified by
the "isa" property of each object. A variety of classes are represented in a
project file. Objects can refer to other objects by ID, using the 24-character
hexadecimal object key. A project's objects form a tree, with a root object
of class PBXProject at the root. As an example, the PBXProject object serves
as parent to an XCConfigurationList object defining the build configurations
used in the project, a PBXGroup object serving as a container for all files
referenced in the project, and a list of target objects, each of which defines
a target in the project. There are several different types of target object,
such as PBXNativeTarget and PBXAggregateTarget. In this module, this
relationship is expressed by having each target type derive from an abstract
base named XCTarget.
The project.pbxproj file's root dictionary also contains a property, sibling to
the "objects" dictionary, named "rootObject". The value of rootObject is a
24-character object key referring to the root PBXProject object in the
objects dictionary.
In Xcode, every file used as input to a target or produced as a final product
of a target must appear somewhere in the hierarchy rooted at the PBXGroup
object referenced by the PBXProject's mainGroup property. A PBXGroup is
generally represented as a folder in the Xcode application. PBXGroups can
contain other PBXGroups as well as PBXFileReferences, which are pointers to
actual files.
Each XCTarget contains a list of build phases, represented in this module by
the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations
are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the
"Compile Sources" and "Link Binary With Libraries" phases displayed in the
Xcode application. Files used as input to these phases (for example, source
files in the former case and libraries and frameworks in the latter) are
represented by PBXBuildFile objects, referenced by elements of "files" lists
in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile
object as a "weak" reference: it does not "own" the PBXBuildFile, which is
owned by the root object's mainGroup or a descendant group. In most cases, the
layer of indirection between an XCBuildPhase and a PBXFileReference via a
PBXBuildFile appears extraneous, but there's actually one reason for this:
file-specific compiler flags are added to the PBXBuildFile object so as to
allow a single file to be a member of multiple targets while having distinct
compiler flags for each. These flags can be modified in the Xcode applciation
in the "Build" tab of a File Info window.
When a project is open in the Xcode application, Xcode will rewrite it. As
such, this module is careful to adhere to the formatting used by Xcode, to
avoid insignificant changes appearing in the file when it is used in the
Xcode application. This will keep version control repositories happy, and
makes it possible to compare a project file used in Xcode to one generated by
this module to determine if any significant changes were made in the
application.
Xcode has its own way of assigning 24-character identifiers to each object,
which is not duplicated here. Because the identifier only is only generated
once, when an object is created, and is then left unchanged, there is no need
to attempt to duplicate Xcode's behavior in this area. The generator is free
to select any identifier, even at random, to refer to the objects it creates,
and Xcode will retain those identifiers and use them when subsequently
rewriting the project file. However, the generator would choose new random
identifiers each time the project files are generated, leading to difficulties
comparing "used" project files to "pristine" ones produced by this module,
and causing the appearance of changes as every object identifier is changed
when updated projects are checked in to a version control repository. To
mitigate this problem, this module chooses identifiers in a more deterministic
way, by hashing a description of each object as well as its parent and ancestor
objects. This strategy should result in minimal "shift" in IDs as successive
generations of project files are produced.
THIS MODULE
This module introduces several classes, all derived from the XCObject class.
Nearly all of the "brains" are built into the XCObject class, which understands
how to create and modify objects, maintain the proper tree structure, compute
identifiers, and print objects. For the most part, classes derived from
XCObject need only provide a _schema class object, a dictionary that
expresses what properties objects of the class may contain.
Given this structure, it's possible to build a minimal project file by creating
objects of the appropriate types and making the proper connections:
config_list = XCConfigurationList()
group = PBXGroup()
project = PBXProject({'buildConfigurationList': config_list,
'mainGroup': group})
With the project object set up, it can be added to an XCProjectFile object.
XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject
subclass that does not actually correspond to a class type found in a project
file. Rather, it is used to represent the project file's root dictionary.
Printing an XCProjectFile will print the entire project file, including the
full "objects" dictionary.
project_file = XCProjectFile({'rootObject': project})
project_file.ComputeIDs()
project_file.Print()
Xcode project files are always encoded in UTF-8. This module will accept
strings of either the str class or the unicode class. Strings of class str
are assumed to already be encoded in UTF-8. Obviously, if you're just using
ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset.
Strings of class unicode are handled properly and encoded in UTF-8 when
a project file is output.
"""
import gyp.common
import posixpath
import re
import struct
import sys
# hashlib is supplied as of Python 2.5 as the replacement interface for sha
# and other secure hashes. In 2.6, sha is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import sha otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_sha1 = hashlib.sha1
except ImportError:
import sha
_new_sha1 = sha.new
# See XCObject._EncodeString. This pattern is used to determine when a string
# can be printed unquoted. Strings that match this pattern may be printed
# unquoted. Strings that do not match must be quoted and may be further
# transformed to be properly encoded. Note that this expression matches the
# characters listed with "+", for 1 or more occurrences: if a string is empty,
# it must not match this pattern, because it needs to be encoded as "".
_unquoted = re.compile('^[A-Za-z0-9$./_]+$')
# Strings that match this pattern are quoted regardless of what _unquoted says.
# Oddly, Xcode will quote any string with a run of three or more underscores.
_quoted = re.compile('___')
# This pattern should match any character that needs to be escaped by
# XCObject._EncodeString. See that function.
_escaped = re.compile('[\\\\"]|[^ -~]')
# Used by SourceTreeAndPathFromPath
_path_leading_variable = re.compile('^\$\((.*?)\)(/(.*))?$')
def SourceTreeAndPathFromPath(input_path):
"""Given input_path, returns a tuple with sourceTree and path values.
Examples:
input_path (source_tree, output_path)
'$(VAR)/path' ('VAR', 'path')
'$(VAR)' ('VAR', None)
'path' (None, 'path')
"""
source_group_match = _path_leading_variable.match(input_path)
if source_group_match:
source_tree = source_group_match.group(1)
output_path = source_group_match.group(3) # This may be None.
else:
source_tree = None
output_path = input_path
return (source_tree, output_path)
def ConvertVariablesToShellSyntax(input_string):
return re.sub('\$\((.*?)\)', '${\\1}', input_string)
class XCObject(object):
"""The abstract base of all class types used in Xcode project files.
Class variables:
_schema: A dictionary defining the properties of this class. The keys to
_schema are string property keys as used in project files. Values
are a list of four or five elements:
[ is_list, property_type, is_strong, is_required, default ]
is_list: True if the property described is a list, as opposed
to a single element.
property_type: The type to use as the value of the property,
or if is_list is True, the type to use for each
element of the value's list. property_type must
be an XCObject subclass, or one of the built-in
types str, int, or dict.
is_strong: If property_type is an XCObject subclass, is_strong
is True to assert that this class "owns," or serves
as parent, to the property value (or, if is_list is
True, values). is_strong must be False if
property_type is not an XCObject subclass.
is_required: True if the property is required for the class.
Note that is_required being True does not preclude
an empty string ("", in the case of property_type
str) or list ([], in the case of is_list True) from
being set for the property.
default: Optional. If is_requried is True, default may be set
to provide a default value for objects that do not supply
their own value. If is_required is True and default
is not provided, users of the class must supply their own
value for the property.
Note that although the values of the array are expressed in
boolean terms, subclasses provide values as integers to conserve
horizontal space.
_should_print_single_line: False in XCObject. Subclasses whose objects
should be written to the project file in the
alternate single-line format, such as
PBXFileReference and PBXBuildFile, should
set this to True.
_encode_transforms: Used by _EncodeString to encode unprintable characters.
The index into this list is the ordinal of the
character to transform; each value is a string
used to represent the character in the output. XCObject
provides an _encode_transforms list suitable for most
XCObject subclasses.
_alternate_encode_transforms: Provided for subclasses that wish to use
the alternate encoding rules. Xcode seems
to use these rules when printing objects in
single-line format. Subclasses that desire
this behavior should set _encode_transforms
to _alternate_encode_transforms.
_hashables: A list of XCObject subclasses that can be hashed by ComputeIDs
to construct this object's ID. Most classes that need custom
hashing behavior should do it by overriding Hashables,
but in some cases an object's parent may wish to push a
hashable value into its child, and it can do so by appending
to _hashables.
Attribues:
id: The object's identifier, a 24-character uppercase hexadecimal string.
Usually, objects being created should not set id until the entire
project file structure is built. At that point, UpdateIDs() should
be called on the root object to assign deterministic values for id to
each object in the tree.
parent: The object's parent. This is set by a parent XCObject when a child
object is added to it.
_properties: The object's property dictionary. An object's properties are
described by its class' _schema variable.
"""
_schema = {}
_should_print_single_line = False
# See _EncodeString.
_encode_transforms = []
i = 0
while i < ord(' '):
_encode_transforms.append('\\U%04x' % i)
i = i + 1
_encode_transforms[7] = '\\a'
_encode_transforms[8] = '\\b'
_encode_transforms[9] = '\\t'
_encode_transforms[10] = '\\n'
_encode_transforms[11] = '\\v'
_encode_transforms[12] = '\\f'
_encode_transforms[13] = '\\n'
_alternate_encode_transforms = list(_encode_transforms)
_alternate_encode_transforms[9] = chr(9)
_alternate_encode_transforms[10] = chr(10)
_alternate_encode_transforms[11] = chr(11)
def __init__(self, properties=None, id=None, parent=None):
self.id = id
self.parent = parent
self._properties = {}
self._hashables = []
self._SetDefaultsFromSchema()
self.UpdateProperties(properties)
def __repr__(self):
try:
name = self.Name()
except NotImplementedError:
return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Copy(self):
"""Make a copy of this object.
The new object will have its own copy of lists and dicts. Any XCObject
objects owned by this object (marked "strong") will be copied in the
new object, even those found in lists. If this object has any weak
references to other XCObjects, the same references are added to the new
object without making a copy.
"""
that = self.__class__(id=self.id, parent=self.parent)
for key, value in self._properties.iteritems():
is_strong = self._schema[key][2]
if isinstance(value, XCObject):
if is_strong:
new_value = value.Copy()
new_value.parent = that
that._properties[key] = new_value
else:
that._properties[key] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
that._properties[key] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe to
# call Copy.
that._properties[key] = []
for item in value:
new_item = item.Copy()
new_item.parent = that
that._properties[key].append(new_item)
else:
that._properties[key] = value[:]
elif isinstance(value, dict):
# dicts are never strong.
if is_strong:
raise TypeError, 'Strong dict for key ' + key + ' in ' + \
self.__class__.__name__
else:
that._properties[key] = value.copy()
else:
raise TypeError, 'Unexpected type ' + value.__class__.__name__ + \
' for key ' + key + ' in ' + self.__class__.__name__
return that
def Name(self):
"""Return the name corresponding to an object.
Not all objects necessarily need to be nameable, and not all that do have
a "name" property. Override as needed.
"""
# If the schema indicates that "name" is required, try to access the
# property even if it doesn't exist. This will result in a KeyError
# being raised for the property that should be present, which seems more
# appropriate than NotImplementedError in this case.
if 'name' in self._properties or \
('name' in self._schema and self._schema['name'][3]):
return self._properties['name']
raise NotImplementedError, \
self.__class__.__name__ + ' must implement Name'
def Comment(self):
"""Return a comment string for the object.
Most objects just use their name as the comment, but PBXProject uses
different values.
The returned comment is not escaped and does not have any comment marker
strings applied to it.
"""
return self.Name()
def Hashables(self):
hashables = [self.__class__.__name__]
name = self.Name()
if name != None:
hashables.append(name)
hashables.extend(self._hashables)
return hashables
def ComputeIDs(self, recursive=True, overwrite=True, hash=None):
"""Set "id" properties deterministically.
An object's "id" property is set based on a hash of its class type and
name, as well as the class type and name of all ancestor objects. As
such, it is only advisable to call ComputeIDs once an entire project file
tree is built.
If recursive is True, recurse into all descendant objects and update their
hashes.
If overwrite is True, any existing value set in the "id" property will be
replaced.
"""
def _HashUpdate(hash, data):
"""Update hash with data's length and contents.
If the hash were updated only with the value of data, it would be
possible for clowns to induce collisions by manipulating the names of
their objects. By adding the length, it's exceedingly less likely that
ID collisions will be encountered, intentionally or not.
"""
hash.update(struct.pack('>i', len(data)))
hash.update(data)
if hash == None:
hash = _new_sha1()
hashables = self.Hashables()
assert len(hashables) > 0
for hashable in hashables:
_HashUpdate(hash, hashable)
if recursive:
for child in self.Children():
child.ComputeIDs(recursive, overwrite, hash.copy())
if overwrite or self.id == None:
# Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is
# is 160 bits. Instead of throwing out 64 bits of the digest, xor them
# into the portion that gets used.
assert hash.digest_size % 4 == 0
digest_int_count = hash.digest_size / 4
digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest())
id_ints = [0, 0, 0]
for index in xrange(0, digest_int_count):
id_ints[index % 3] ^= digest_ints[index]
self.id = '%08X%08X%08X' % tuple(id_ints)
def EnsureNoIDCollisions(self):
"""Verifies that no two objects have the same ID. Checks all descendants.
"""
ids = {}
descendants = self.Descendants()
for descendant in descendants:
if descendant.id in ids:
other = ids[descendant.id]
raise KeyError, \
'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \
(descendant.id, str(descendant._properties),
str(other._properties), self._properties['rootObject'].Name())
ids[descendant.id] = descendant
def Children(self):
"""Returns a list of all of this object's owned (strong) children."""
children = []
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong) = attributes[0:3]
if is_strong and property in self._properties:
if not is_list:
children.append(self._properties[property])
else:
children.extend(self._properties[property])
return children
def Descendants(self):
"""Returns a list of all of this object's descendants, including this
object.
"""
children = self.Children()
descendants = [self]
for child in children:
descendants.extend(child.Descendants())
return descendants
def PBXProjectAncestor(self):
# The base case for recursion is defined at PBXProject.PBXProjectAncestor.
if self.parent:
return self.parent.PBXProjectAncestor()
return None
def _EncodeComment(self, comment):
"""Encodes a comment to be placed in the project file output, mimicing
Xcode behavior.
"""
# This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If
# the string already contains a "*/", it is turned into "(*)/". This keeps
# the file writer from outputting something that would be treated as the
# end of a comment in the middle of something intended to be entirely a
# comment.
return '/* ' + comment.replace('*/', '(*)/') + ' */'
def _EncodeTransform(self, match):
# This function works closely with _EncodeString. It will only be called
# by re.sub with match.group(0) containing a character matched by the
# the _escaped expression.
char = match.group(0)
# Backslashes (\) and quotation marks (") are always replaced with a
# backslash-escaped version of the same. Everything else gets its
# replacement from the class' _encode_transforms array.
if char == '\\':
return '\\\\'
if char == '"':
return '\\"'
return self._encode_transforms[ord(char)]
def _EncodeString(self, value):
"""Encodes a string to be placed in the project file output, mimicing
Xcode behavior.
"""
# Use quotation marks when any character outside of the range A-Z, a-z, 0-9,
# $ (dollar sign), . (period), and _ (underscore) is present. Also use
# quotation marks to represent empty strings.
#
# Escape " (double-quote) and \ (backslash) by preceding them with a
# backslash.
#
# Some characters below the printable ASCII range are encoded specially:
# 7 ^G BEL is encoded as "\a"
# 8 ^H BS is encoded as "\b"
# 11 ^K VT is encoded as "\v"
# 12 ^L NP is encoded as "\f"
# 127 ^? DEL is passed through as-is without escaping
# - In PBXFileReference and PBXBuildFile objects:
# 9 ^I HT is passed through as-is without escaping
# 10 ^J NL is passed through as-is without escaping
# 13 ^M CR is passed through as-is without escaping
# - In other objects:
# 9 ^I HT is encoded as "\t"
# 10 ^J NL is encoded as "\n"
# 13 ^M CR is encoded as "\n" rendering it indistinguishable from
# 10 ^J NL
# All other nonprintable characters within the ASCII range (0 through 127
# inclusive) are encoded as "\U001f" referring to the Unicode code point in
# hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e".
# Characters above the ASCII range are passed through to the output encoded
# as UTF-8 without any escaping. These mappings are contained in the
# class' _encode_transforms list.
if _unquoted.search(value) and not _quoted.search(value):
return value
return '"' + _escaped.sub(self._EncodeTransform, value) + '"'
def _XCPrint(self, file, tabs, line):
file.write('\t' * tabs + line)
def _XCPrintableValue(self, tabs, value, flatten_list=False):
"""Returns a representation of value that may be printed in a project file,
mimicing Xcode's behavior.
_XCPrintableValue can handle str and int values, XCObjects (which are
made printable by returning their id property), and list and dict objects
composed of any of the above types. When printing a list or dict, and
_should_print_single_line is False, the tabs parameter is used to determine
how much to indent the lines corresponding to the items in the list or
dict.
If flatten_list is True, single-element lists will be transformed into
strings.
"""
printable = ''
comment = None
if self._should_print_single_line:
sep = ' '
element_tabs = ''
end_tabs = ''
else:
sep = '\n'
element_tabs = '\t' * (tabs + 1)
end_tabs = '\t' * tabs
if isinstance(value, XCObject):
printable += value.id
comment = value.Comment()
elif isinstance(value, str):
printable += self._EncodeString(value)
elif isinstance(value, unicode):
printable += self._EncodeString(value.encode('utf-8'))
elif isinstance(value, int):
printable += str(value)
elif isinstance(value, list):
if flatten_list and len(value) <= 1:
if len(value) == 0:
printable += self._EncodeString('')
else:
printable += self._EncodeString(value[0])
else:
printable = '(' + sep
for item in value:
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item, flatten_list) + \
',' + sep
printable += end_tabs + ')'
elif isinstance(value, dict):
printable = '{' + sep
for item_key, item_value in sorted(value.iteritems()):
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \
self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \
sep
printable += end_tabs + '}'
else:
raise TypeError, "Can't make " + value.__class__.__name__ + ' printable'
if comment != None:
printable += ' ' + self._EncodeComment(comment)
return printable
def _XCKVPrint(self, file, tabs, key, value):
"""Prints a key and value, members of an XCObject's _properties dictionary,
to file.
tabs is an int identifying the indentation level. If the class'
_should_print_single_line variable is True, tabs is ignored and the
key-value pair will be followed by a space insead of a newline.
"""
if self._should_print_single_line:
printable = ''
after_kv = ' '
else:
printable = '\t' * tabs
after_kv = '\n'
# Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy
# objects without comments. Sometimes it prints them with comments, but
# the majority of the time, it doesn't. To avoid unnecessary changes to
# the project file after Xcode opens it, don't write comments for
# remoteGlobalIDString. This is a sucky hack and it would certainly be
# cleaner to extend the schema to indicate whether or not a comment should
# be printed, but since this is the only case where the problem occurs and
# Xcode itself can't seem to make up its mind, the hack will suffice.
#
# Also see PBXContainerItemProxy._schema['remoteGlobalIDString'].
if key == 'remoteGlobalIDString' and isinstance(self,
PBXContainerItemProxy):
value_to_print = value.id
else:
value_to_print = value
# PBXBuildFile's settings property is represented in the output as a dict,
# but a hack here has it represented as a string. Arrange to strip off the
# quotes so that it shows up in the output as expected.
if key == 'settings' and isinstance(self, PBXBuildFile):
strip_value_quotes = True
else:
strip_value_quotes = False
# In another one-off, let's set flatten_list on buildSettings properties
# of XCBuildConfiguration objects, because that's how Xcode treats them.
if key == 'buildSettings' and isinstance(self, XCBuildConfiguration):
flatten_list = True
else:
flatten_list = False
try:
printable_key = self._XCPrintableValue(tabs, key, flatten_list)
printable_value = self._XCPrintableValue(tabs, value_to_print,
flatten_list)
if strip_value_quotes and len(printable_value) > 1 and \
printable_value[0] == '"' and printable_value[-1] == '"':
printable_value = printable_value[1:-1]
printable += printable_key + ' = ' + printable_value + ';' + after_kv
except TypeError, e:
gyp.common.ExceptionAppend(e,
'while printing key "%s"' % key)
raise
self._XCPrint(file, 0, printable)
def Print(self, file=sys.stdout):
"""Prints a reprentation of this object to file, adhering to Xcode output
formatting.
"""
self.VerifyHasRequiredProperties()
if self._should_print_single_line:
# When printing an object in a single line, Xcode doesn't put any space
# between the beginning of a dictionary (or presumably a list) and the
# first contained item, so you wind up with snippets like
# ...CDEF = {isa = PBXFileReference; fileRef = 0123...
# If it were me, I would have put a space in there after the opening
# curly, but I guess this is just another one of those inconsistencies
# between how Xcode prints PBXFileReference and PBXBuildFile objects as
# compared to other objects. Mimic Xcode's behavior here by using an
# empty string for sep.
sep = ''
end_tabs = 0
else:
sep = '\n'
end_tabs = 2
# Start the object. For example, '\t\tPBXProject = {\n'.
self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep)
# "isa" isn't in the _properties dictionary, it's an intrinsic property
# of the class which the object belongs to. Xcode always outputs "isa"
# as the first element of an object dictionary.
self._XCKVPrint(file, 3, 'isa', self.__class__.__name__)
# The remaining elements of an object dictionary are sorted alphabetically.
for property, value in sorted(self._properties.iteritems()):
self._XCKVPrint(file, 3, property, value)
# End the object.
self._XCPrint(file, end_tabs, '};\n')
def UpdateProperties(self, properties, do_copy=False):
"""Merge the supplied properties into the _properties dictionary.
The input properties must adhere to the class schema or a KeyError or
TypeError exception will be raised. If adding an object of an XCObject
subclass and the schema indicates a strong relationship, the object's
parent will be set to this object.
If do_copy is True, then lists, dicts, strong-owned XCObjects, and
strong-owned XCObjects in lists will be copied instead of having their
references added.
"""
if properties == None:
return
for property, value in properties.iteritems():
# Make sure the property is in the schema.
if not property in self._schema:
raise KeyError, property + ' not in ' + self.__class__.__name__
# Make sure the property conforms to the schema.
(is_list, property_type, is_strong) = self._schema[property][0:3]
if is_list:
if value.__class__ != list:
raise TypeError, \
property + ' of ' + self.__class__.__name__ + \
' must be list, not ' + value.__class__.__name__
for item in value:
if not isinstance(item, property_type) and \
not (item.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError, \
'item of ' + property + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
item.__class__.__name__
elif not isinstance(value, property_type) and \
not (value.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError, \
property + ' of ' + self.__class__.__name__ + ' must be ' + \
property_type.__name__ + ', not ' + value.__class__.__name__
# Checks passed, perform the assignment.
if do_copy:
if isinstance(value, XCObject):
if is_strong:
self._properties[property] = value.Copy()
else:
self._properties[property] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
self._properties[property] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe
# to call Copy.
self._properties[property] = []
for item in value:
self._properties[property].append(item.Copy())
else:
self._properties[property] = value[:]
elif isinstance(value, dict):
self._properties[property] = value.copy()
else:
raise TypeError, "Don't know how to copy a " + \
value.__class__.__name__ + ' object for ' + \
property + ' in ' + self.__class__.__name__
else:
self._properties[property] = value
# Set up the child's back-reference to this object. Don't use |value|
# any more because it may not be right if do_copy is true.
if is_strong:
if not is_list:
self._properties[property].parent = self
else:
for item in self._properties[property]:
item.parent = self
def HasProperty(self, key):
return key in self._properties
def GetProperty(self, key):
return self._properties[key]
def SetProperty(self, key, value):
self.UpdateProperties({key: value})
def DelProperty(self, key):
if key in self._properties:
del self._properties[key]
def AppendProperty(self, key, value):
# TODO(mark): Support ExtendProperty too (and make this call that)?
# Schema validation.
if not key in self._schema:
raise KeyError, key + ' not in ' + self.__class__.__name__
(is_list, property_type, is_strong) = self._schema[key][0:3]
if not is_list:
raise TypeError, key + ' of ' + self.__class__.__name__ + ' must be list'
if not isinstance(value, property_type):
raise TypeError, 'item of ' + key + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
value.__class__.__name__
# If the property doesn't exist yet, create a new empty list to receive the
# item.
if not key in self._properties:
self._properties[key] = []
# Set up the ownership link.
if is_strong:
value.parent = self
# Store the item.
self._properties[key].append(value)
def VerifyHasRequiredProperties(self):
"""Ensure that all properties identified as required by the schema are
set.
"""
# TODO(mark): A stronger verification mechanism is needed. Some
# subclasses need to perform validation beyond what the schema can enforce.
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and not property in self._properties:
raise KeyError, self.__class__.__name__ + ' requires ' + property
def _SetDefaultsFromSchema(self):
"""Assign object default values according to the schema. This will not
overwrite properties that have already been set."""
defaults = {}
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and len(attributes) >= 5 and \
not property in self._properties:
default = attributes[4]
defaults[property] = default
if len(defaults) > 0:
# Use do_copy=True so that each new object gets its own copy of strong
# objects, lists, and dicts.
self.UpdateProperties(defaults, do_copy=True)
class XCHierarchicalElement(XCObject):
"""Abstract base for PBXGroup and PBXFileReference. Not represented in a
project file."""
# TODO(mark): Do name and path belong here? Probably so.
# If path is set and name is not, name may have a default value. Name will
# be set to the basename of path, if the basename of path is different from
# the full value of path. If path is already just a leaf name, name will
# not be set.
_schema = XCObject._schema.copy()
_schema.update({
'comments': [0, str, 0, 0],
'fileEncoding': [0, str, 0, 0],
'includeInIndex': [0, int, 0, 0],
'indentWidth': [0, int, 0, 0],
'lineEnding': [0, int, 0, 0],
'sourceTree': [0, str, 0, 1, '<group>'],
'tabWidth': [0, int, 0, 0],
'usesTabs': [0, int, 0, 0],
'wrapsLines': [0, int, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
if 'path' in self._properties and not 'name' in self._properties:
path = self._properties['path']
name = posixpath.basename(path)
if name != '' and path != name:
self.SetProperty('name', name)
if 'path' in self._properties and \
(not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>'):
# If the pathname begins with an Xcode variable like "$(SDKROOT)/", take
# the variable out and make the path be relative to that variable by
# assigning the variable name as the sourceTree.
(source_tree, path) = SourceTreeAndPathFromPath(self._properties['path'])
if source_tree != None:
self._properties['sourceTree'] = source_tree
if path != None:
self._properties['path'] = path
if source_tree != None and path == None and \
not 'name' in self._properties:
# The path was of the form "$(SDKROOT)" with no path following it.
# This object is now relative to that variable, so it has no path
# attribute of its own. It does, however, keep a name.
del self._properties['path']
self._properties['name'] = source_tree
def Name(self):
if 'name' in self._properties:
return self._properties['name']
elif 'path' in self._properties:
return self._properties['path']
else:
# This happens in the case of the root PBXGroup.
return None
def Hashables(self):
"""Custom hashables for XCHierarchicalElements.
XCHierarchicalElements are special. Generally, their hashes shouldn't
change if the paths don't change. The normal XCObject implementation of
Hashables adds a hashable for each object, which means that if
the hierarchical structure changes (possibly due to changes caused when
TakeOverOnlyChild runs and encounters slight changes in the hierarchy),
the hashes will change. For example, if a project file initially contains
a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent
a/b. If someone later adds a/f2 to the project file, a/b can no longer be
collapsed, and f1 winds up with parent b and grandparent a. That would
be sufficient to change f1's hash.
To counteract this problem, hashables for all XCHierarchicalElements except
for the main group (which has neither a name nor a path) are taken to be
just the set of path components. Because hashables are inherited from
parents, this provides assurance that a/b/f1 has the same set of hashables
whether its parent is b or a/b.
The main group is a special case. As it is permitted to have no name or
path, it is permitted to use the standard XCObject hash mechanism. This
is not considered a problem because there can be only one main group.
"""
if self == self.PBXProjectAncestor()._properties['mainGroup']:
# super
return XCObject.Hashables(self)
hashables = []
# Put the name in first, ensuring that if TakeOverOnlyChild collapses
# children into a top-level group like "Source", the name always goes
# into the list of hashables without interfering with path components.
if 'name' in self._properties:
# Make it less likely for people to manipulate hashes by following the
# pattern of always pushing an object type value onto the list first.
hashables.append(self.__class__.__name__ + '.name')
hashables.append(self._properties['name'])
# NOTE: This still has the problem that if an absolute path is encountered,
# including paths with a sourceTree, they'll still inherit their parents'
# hashables, even though the paths aren't relative to their parents. This
# is not expected to be much of a problem in practice.
path = self.PathFromSourceTreeAndPath()
if path != None:
components = path.split(posixpath.sep)
for component in components:
hashables.append(self.__class__.__name__ + '.path')
hashables.append(component)
hashables.extend(self._hashables)
return hashables
def Compare(self, other):
# Allow comparison of these types. PBXGroup has the highest sort rank;
# PBXVariantGroup is treated as equal to PBXFileReference.
valid_class_types = {
PBXFileReference: 'file',
PBXGroup: 'group',
PBXVariantGroup: 'file',
}
self_type = valid_class_types[self.__class__]
other_type = valid_class_types[other.__class__]
if self_type == other_type:
# If the two objects are of the same sort rank, compare their names.
return cmp(self.Name(), other.Name())
# Otherwise, sort groups before everything else.
if self_type == 'group':
return -1
return 1
def CompareRootGroup(self, other):
# This function should be used only to compare direct children of the
# containing PBXProject's mainGroup. These groups should appear in the
# listed order.
# TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the
# generator should have a way of influencing this list rather than having
# to hardcode for the generator here.
order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products',
'Build']
# If the groups aren't in the listed order, do a name comparison.
# Otherwise, groups in the listed order should come before those that
# aren't.
self_name = self.Name()
other_name = other.Name()
self_in = isinstance(self, PBXGroup) and self_name in order
other_in = isinstance(self, PBXGroup) and other_name in order
if not self_in and not other_in:
return self.Compare(other)
if self_name in order and not other_name in order:
return -1
if other_name in order and not self_name in order:
return 1
# If both groups are in the listed order, go by the defined order.
self_index = order.index(self_name)
other_index = order.index(other_name)
if self_index < other_index:
return -1
if self_index > other_index:
return 1
return 0
def PathFromSourceTreeAndPath(self):
# Turn the object's sourceTree and path properties into a single flat
# string of a form comparable to the path parameter. If there's a
# sourceTree property other than "<group>", wrap it in $(...) for the
# comparison.
components = []
if self._properties['sourceTree'] != '<group>':
components.append('$(' + self._properties['sourceTree'] + ')')
if 'path' in self._properties:
components.append(self._properties['path'])
if len(components) > 0:
return posixpath.join(*components)
return None
def FullPath(self):
# Returns a full path to self relative to the project file, or relative
# to some other source tree. Start with self, and walk up the chain of
# parents prepending their paths, if any, until no more parents are
# available (project-relative path) or until a path relative to some
# source tree is found.
xche = self
path = None
while isinstance(xche, XCHierarchicalElement) and \
(path == None or \
(not path.startswith('/') and not path.startswith('$'))):
this_path = xche.PathFromSourceTreeAndPath()
if this_path != None and path != None:
path = posixpath.join(this_path, path)
elif this_path != None:
path = this_path
xche = xche.parent
return path
class PBXGroup(XCHierarchicalElement):
"""
Attributes:
_children_by_path: Maps pathnames of children of this PBXGroup to the
actual child XCHierarchicalElement objects.
_variant_children_by_name_and_path: Maps (name, path) tuples of
PBXVariantGroup children to the actual child PBXVariantGroup objects.
"""
_schema = XCHierarchicalElement._schema.copy()
_schema.update({
'children': [1, XCHierarchicalElement, 1, 1, []],
'name': [0, str, 0, 0],
'path': [0, str, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCHierarchicalElement.__init__(self, properties, id, parent)
self._children_by_path = {}
self._variant_children_by_name_and_path = {}
for child in self._properties.get('children', []):
self._AddChildToDicts(child)
def _AddChildToDicts(self, child):
# Sets up this PBXGroup object's dicts to reference the child properly.
child_path = child.PathFromSourceTreeAndPath()
if child_path:
if child_path in self._children_by_path:
raise ValueError, 'Found multiple children with path ' + child_path
self._children_by_path[child_path] = child
if isinstance(child, PBXVariantGroup):
child_name = child._properties.get('name', None)
key = (child_name, child_path)
if key in self._variant_children_by_name_and_path:
raise ValueError, 'Found multiple PBXVariantGroup children with ' + \
'name ' + str(child_name) + ' and path ' + \
str(child_path)
self._variant_children_by_name_and_path[key] = child
def AppendChild(self, child):
# Callers should use this instead of calling
# AppendProperty('children', child) directly because this function
# maintains the group's dicts.
self.AppendProperty('children', child)
self._AddChildToDicts(child)
def GetChildByName(self, name):
# This is not currently optimized with a dict as GetChildByPath is because
# it has few callers. Most callers probably want GetChildByPath. This
# function is only useful to get children that have names but no paths,
# which is rare. The children of the main group ("Source", "Products",
# etc.) is pretty much the only case where this likely to come up.
#
# TODO(mark): Maybe this should raise an error if more than one child is
# present with the same name.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if child.Name() == name:
return child
return None
def GetChildByPath(self, path):
if not path:
return None
if path in self._children_by_path:
return self._children_by_path[path]
return None
def GetChildByRemoteObject(self, remote_object):
# This method is a little bit esoteric. Given a remote_object, which
# should be a PBXFileReference in another project file, this method will
# return this group's PBXReferenceProxy object serving as a local proxy
# for the remote PBXFileReference.
#
# This function might benefit from a dict optimization as GetChildByPath
# for some workloads, but profiling shows that it's not currently a
# problem.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if not isinstance(child, PBXReferenceProxy):
continue
container_proxy = child._properties['remoteRef']
if container_proxy._properties['remoteGlobalIDString'] == remote_object:
return child
return None
def AddOrGetFileByPath(self, path, hierarchical):
"""Returns an existing or new file reference corresponding to path.
If hierarchical is True, this method will create or use the necessary
hierarchical group structure corresponding to path. Otherwise, it will
look in and create an item in the current group only.
If an existing matching reference is found, it is returned, otherwise, a
new one will be created, added to the correct group, and returned.
If path identifies a directory by virtue of carrying a trailing slash,
this method returns a PBXFileReference of "folder" type. If path
identifies a variant, by virtue of it identifying a file inside a directory
with an ".lproj" extension, this method returns a PBXVariantGroup
containing the variant named by path, and possibly other variants. For
all other paths, a "normal" PBXFileReference will be returned.
"""
# Adding or getting a directory? Directories end with a trailing slash.
is_dir = False
if path.endswith('/'):
is_dir = True
normpath = posixpath.normpath(path)
if is_dir:
normpath = path + '/'
else:
normpath = path
# Adding or getting a variant? Variants are files inside directories
# with an ".lproj" extension. Xcode uses variants for localization. For
# a variant path/to/Language.lproj/MainMenu.nib, put a variant group named
# MainMenu.nib inside path/to, and give it a variant named Language. In
# this example, grandparent would be set to path/to and parent_root would
# be set to Language.
variant_name = None
parent = posixpath.dirname(path)
grandparent = posixpath.dirname(parent)
parent_basename = posixpath.basename(parent)
(parent_root, parent_ext) = posixpath.splitext(parent_basename)
if parent_ext == '.lproj':
variant_name = parent_root
if grandparent == '':
grandparent = None
# Putting a directory inside a variant group is not currently supported.
assert not is_dir or variant_name == None
path_split = path.split(posixpath.sep)
if len(path_split) == 1 or \
((is_dir or variant_name != None) and len(path_split) == 2) or \
not hierarchical:
# The PBXFileReference or PBXVariantGroup will be added to or gotten from
# this PBXGroup, no recursion necessary.
if variant_name == None:
# Add or get a PBXFileReference.
file_ref = self.GetChildByPath(normpath)
if file_ref != None:
assert file_ref.__class__ == PBXFileReference
else:
file_ref = PBXFileReference({'path': path})
self.AppendChild(file_ref)
else:
# Add or get a PBXVariantGroup. The variant group name is the same
# as the basename (MainMenu.nib in the example above). grandparent
# specifies the path to the variant group itself, and path_split[-2:]
# is the path of the specific variant relative to its group.
variant_group_name = posixpath.basename(path)
variant_group_ref = self.AddOrGetVariantGroupByNameAndPath(
variant_group_name, grandparent)
variant_path = posixpath.sep.join(path_split[-2:])
variant_ref = variant_group_ref.GetChildByPath(variant_path)
if variant_ref != None:
assert variant_ref.__class__ == PBXFileReference
else:
variant_ref = PBXFileReference({'name': variant_name,
'path': variant_path})
variant_group_ref.AppendChild(variant_ref)
# The caller is interested in the variant group, not the specific
# variant file.
file_ref = variant_group_ref
return file_ref
else:
# Hierarchical recursion. Add or get a PBXGroup corresponding to the
# outermost path component, and then recurse into it, chopping off that
# path component.
next_dir = path_split[0]
group_ref = self.GetChildByPath(next_dir)
if group_ref != None:
assert group_ref.__class__ == PBXGroup
else:
group_ref = PBXGroup({'path': next_dir})
self.AppendChild(group_ref)
return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]),
hierarchical)
def AddOrGetVariantGroupByNameAndPath(self, name, path):
"""Returns an existing or new PBXVariantGroup for name and path.
If a PBXVariantGroup identified by the name and path arguments is already
present as a child of this object, it is returned. Otherwise, a new
PBXVariantGroup with the correct properties is created, added as a child,
and returned.
This method will generally be called by AddOrGetFileByPath, which knows
when to create a variant group based on the structure of the pathnames
passed to it.
"""
key = (name, path)
if key in self._variant_children_by_name_and_path:
variant_group_ref = self._variant_children_by_name_and_path[key]
assert variant_group_ref.__class__ == PBXVariantGroup
return variant_group_ref
variant_group_properties = {'name': name}
if path != None:
variant_group_properties['path'] = path
variant_group_ref = PBXVariantGroup(variant_group_properties)
self.AppendChild(variant_group_ref)
return variant_group_ref
def TakeOverOnlyChild(self, recurse=False):
"""If this PBXGroup has only one child and it's also a PBXGroup, take
it over by making all of its children this object's children.
This function will continue to take over only children when those children
are groups. If there are three PBXGroups representing a, b, and c, with
c inside b and b inside a, and a and b have no other children, this will
result in a taking over both b and c, forming a PBXGroup for a/b/c.
If recurse is True, this function will recurse into children and ask them
to collapse themselves by taking over only children as well. Assuming
an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f
(d1, d2, and f are files, the rest are groups), recursion will result in
a group for a/b/c containing a group for d3/e.
"""
# At this stage, check that child class types are PBXGroup exactly,
# instead of using isinstance. The only subclass of PBXGroup,
# PBXVariantGroup, should not participate in reparenting in the same way:
# reparenting by merging different object types would be wrong.
while len(self._properties['children']) == 1 and \
self._properties['children'][0].__class__ == PBXGroup:
# Loop to take over the innermost only-child group possible.
child = self._properties['children'][0]
# Assume the child's properties, including its children. Save a copy
# of this object's old properties, because they'll still be needed.
# This object retains its existing id and parent attributes.
old_properties = self._properties
self._properties = child._properties
self._children_by_path = child._children_by_path
if not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>':
# The child was relative to its parent. Fix up the path. Note that
# children with a sourceTree other than "<group>" are not relative to
# their parents, so no path fix-up is needed in that case.
if 'path' in old_properties:
if 'path' in self._properties:
# Both the original parent and child have paths set.
self._properties['path'] = posixpath.join(old_properties['path'],
self._properties['path'])
else:
# Only the original parent has a path, use it.
self._properties['path'] = old_properties['path']
if 'sourceTree' in old_properties:
# The original parent had a sourceTree set, use it.
self._properties['sourceTree'] = old_properties['sourceTree']
# If the original parent had a name set, keep using it. If the original
# parent didn't have a name but the child did, let the child's name
# live on. If the name attribute seems unnecessary now, get rid of it.
if 'name' in old_properties and old_properties['name'] != None and \
old_properties['name'] != self.Name():
self._properties['name'] = old_properties['name']
if 'name' in self._properties and 'path' in self._properties and \
self._properties['name'] == self._properties['path']:
del self._properties['name']
# Notify all children of their new parent.
for child in self._properties['children']:
child.parent = self
# If asked to recurse, recurse.
if recurse:
for child in self._properties['children']:
if child.__class__ == PBXGroup:
child.TakeOverOnlyChild(recurse)
def SortGroup(self):
self._properties['children'] = \
sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y))
# Recurse.
for child in self._properties['children']:
if isinstance(child, PBXGroup):
child.SortGroup()
class XCFileLikeElement(XCHierarchicalElement):
# Abstract base for objects that can be used as the fileRef property of
# PBXBuildFile.
def PathHashables(self):
# A PBXBuildFile that refers to this object will call this method to
# obtain additional hashables specific to this XCFileLikeElement. Don't
# just use this object's hashables, they're not specific and unique enough
# on their own (without access to the parent hashables.) Instead, provide
# hashables that identify this object by path by getting its hashables as
# well as the hashables of ancestor XCHierarchicalElement objects.
hashables = []
xche = self
while xche != None and isinstance(xche, XCHierarchicalElement):
xche_hashables = xche.Hashables()
for index in xrange(0, len(xche_hashables)):
hashables.insert(index, xche_hashables[index])
xche = xche.parent
return hashables
class XCContainerPortal(XCObject):
# Abstract base for objects that can be used as the containerPortal property
# of PBXContainerItemProxy.
pass
class XCRemoteObject(XCObject):
# Abstract base for objects that can be used as the remoteGlobalIDString
# property of PBXContainerItemProxy.
pass
class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'explicitFileType': [0, str, 0, 0],
'lastKnownFileType': [0, str, 0, 0],
'name': [0, str, 0, 0],
'path': [0, str, 0, 1],
})
# Weird output rules for PBXFileReference.
_should_print_single_line = True
# super
_encode_transforms = XCFileLikeElement._alternate_encode_transforms
def __init__(self, properties=None, id=None, parent=None):
# super
XCFileLikeElement.__init__(self, properties, id, parent)
if 'path' in self._properties and self._properties['path'].endswith('/'):
self._properties['path'] = self._properties['path'][:-1]
is_dir = True
else:
is_dir = False
if 'path' in self._properties and \
not 'lastKnownFileType' in self._properties and \
not 'explicitFileType' in self._properties:
# TODO(mark): This is the replacement for a replacement for a quick hack.
# It is no longer incredibly sucky, but this list needs to be extended.
extension_map = {
'a': 'archive.ar',
'app': 'wrapper.application',
'bdic': 'file',
'bundle': 'wrapper.cfbundle',
'c': 'sourcecode.c.c',
'cc': 'sourcecode.cpp.cpp',
'cpp': 'sourcecode.cpp.cpp',
'css': 'text.css',
'cxx': 'sourcecode.cpp.cpp',
'dylib': 'compiled.mach-o.dylib',
'framework': 'wrapper.framework',
'h': 'sourcecode.c.h',
'hxx': 'sourcecode.cpp.h',
'icns': 'image.icns',
'java': 'sourcecode.java',
'js': 'sourcecode.javascript',
'm': 'sourcecode.c.objc',
'mm': 'sourcecode.cpp.objcpp',
'nib': 'wrapper.nib',
'o': 'compiled.mach-o.objfile',
'pdf': 'image.pdf',
'pl': 'text.script.perl',
'plist': 'text.plist.xml',
'pm': 'text.script.perl',
'png': 'image.png',
'py': 'text.script.python',
'r': 'sourcecode.rez',
'rez': 'sourcecode.rez',
's': 'sourcecode.asm',
'strings': 'text.plist.strings',
'ttf': 'file',
'xcconfig': 'text.xcconfig',
'xib': 'file.xib',
'y': 'sourcecode.yacc',
}
if is_dir:
file_type = 'folder'
else:
basename = posixpath.basename(self._properties['path'])
(root, ext) = posixpath.splitext(basename)
# Check the map using a lowercase extension.
# TODO(mark): Maybe it should try with the original case first and fall
# back to lowercase, in case there are any instances where case
# matters. There currently aren't.
if ext != '':
ext = ext[1:].lower()
# TODO(mark): "text" is the default value, but "file" is appropriate
# for unrecognized files not containing text. Xcode seems to choose
# based on content.
file_type = extension_map.get(ext, 'text')
self._properties['lastKnownFileType'] = file_type
class PBXVariantGroup(PBXGroup, XCFileLikeElement):
"""PBXVariantGroup is used by Xcode to represent localizations."""
# No additions to the schema relative to PBXGroup.
pass
# PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below
# because it uses PBXContainerItemProxy, defined below.
class XCBuildConfiguration(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'baseConfigurationReference': [0, PBXFileReference, 0, 0],
'buildSettings': [0, dict, 0, 1, {}],
'name': [0, str, 0, 1],
})
def HasBuildSetting(self, key):
return key in self._properties['buildSettings']
def GetBuildSetting(self, key):
return self._properties['buildSettings'][key]
def SetBuildSetting(self, key, value):
# TODO(mark): If a list, copy?
self._properties['buildSettings'][key] = value
def AppendBuildSetting(self, key, value):
if not key in self._properties['buildSettings']:
self._properties['buildSettings'][key] = []
self._properties['buildSettings'][key].append(value)
def DelBuildSetting(self, key):
if key in self._properties['buildSettings']:
del self._properties['buildSettings'][key]
def SetBaseConfiguration(self, value):
self._properties['baseConfigurationReference'] = value
class XCConfigurationList(XCObject):
# _configs is the default list of configurations.
_configs = [ XCBuildConfiguration({'name': 'Debug'}),
XCBuildConfiguration({'name': 'Release'}) ]
_schema = XCObject._schema.copy()
_schema.update({
'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs],
'defaultConfigurationIsVisible': [0, int, 0, 1, 1],
'defaultConfigurationName': [0, str, 0, 1, 'Release'],
})
def Name(self):
return 'Build configuration list for ' + \
self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"'
def ConfigurationNamed(self, name):
"""Convenience accessor to obtain an XCBuildConfiguration by name."""
for configuration in self._properties['buildConfigurations']:
if configuration._properties['name'] == name:
return configuration
raise KeyError, name
def DefaultConfiguration(self):
"""Convenience accessor to obtain the default XCBuildConfiguration."""
return self.ConfigurationNamed(self._properties['defaultConfigurationName'])
def HasBuildSetting(self, key):
"""Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1.
"""
has = None
value = None
for configuration in self._properties['buildConfigurations']:
configuration_has = configuration.HasBuildSetting(key)
if has == None:
has = configuration_has
elif has != configuration_has:
return -1
if configuration_has:
configuration_value = configuration.GetBuildSetting(key)
if value == None:
value = configuration_value
elif value != configuration_value:
return -1
if not has:
return 0
return 1
def GetBuildSetting(self, key):
"""Gets the build setting for key.
All child XCConfiguration objects must have the same value set for the
setting, or a ValueError will be raised.
"""
# TODO(mark): This is wrong for build settings that are lists. The list
# contents should be compared (and a list copy returned?)
value = None
for configuration in self._properties['buildConfigurations']:
configuration_value = configuration.GetBuildSetting(key)
if value == None:
value = configuration_value
else:
if value != configuration_value:
raise ValueError, 'Variant values for ' + key
return value
def SetBuildSetting(self, key, value):
"""Sets the build setting for key to value in all child
XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBuildSetting(key, value)
def AppendBuildSetting(self, key, value):
"""Appends value to the build setting for key, which is treated as a list,
in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.AppendBuildSetting(key, value)
def DelBuildSetting(self, key):
"""Deletes the build setting key from all child XCBuildConfiguration
objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.DelBuildSetting(key)
def SetBaseConfiguration(self, value):
"""Sets the build configuration in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBaseConfiguration(value)
class PBXBuildFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'fileRef': [0, XCFileLikeElement, 0, 1],
'settings': [0, str, 0, 0], # hack, it's a dict
})
# Weird output rules for PBXBuildFile.
_should_print_single_line = True
_encode_transforms = XCObject._alternate_encode_transforms
def Name(self):
# Example: "main.cc in Sources"
return self._properties['fileRef'].Name() + ' in ' + self.parent.Name()
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# It is not sufficient to just rely on Name() to get the
# XCFileLikeElement's name, because that is not a complete pathname.
# PathHashables returns hashables unique enough that no two
# PBXBuildFiles should wind up with the same set of hashables, unless
# someone adds the same file multiple times to the same target. That
# would be considered invalid anyway.
hashables.extend(self._properties['fileRef'].PathHashables())
return hashables
class XCBuildPhase(XCObject):
"""Abstract base for build phase classes. Not represented in a project
file.
Attributes:
_files_by_path: A dict mapping each path of a child in the files list by
path (keys) to the corresponding PBXBuildFile children (values).
_files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys)
to the corresponding PBXBuildFile children (values).
"""
# TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't
# actually have a "files" list. XCBuildPhase should not have "files" but
# another abstract subclass of it should provide this, and concrete build
# phase types that do have "files" lists should be derived from that new
# abstract subclass. XCBuildPhase should only provide buildActionMask and
# runOnlyForDeploymentPostprocessing, and not files or the various
# file-related methods and attributes.
_schema = XCObject._schema.copy()
_schema.update({
'buildActionMask': [0, int, 0, 1, 0x7fffffff],
'files': [1, PBXBuildFile, 1, 1, []],
'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
self._files_by_path = {}
self._files_by_xcfilelikeelement = {}
for pbxbuildfile in self._properties.get('files', []):
self._AddBuildFileToDicts(pbxbuildfile)
def FileGroup(self, path):
# Subclasses must override this by returning a two-element tuple. The
# first item in the tuple should be the PBXGroup to which "path" should be
# added, either as a child or deeper descendant. The second item should
# be a boolean indicating whether files should be added into hierarchical
# groups or one single flat group.
raise NotImplementedError, \
self.__class__.__name__ + ' must implement FileGroup'
def _AddPathToDict(self, pbxbuildfile, path):
"""Adds path to the dict tracking paths belonging to this build phase.
If the path is already a member of this build phase, raises an exception.
"""
if path in self._files_by_path:
raise ValueError, 'Found multiple build files with path ' + path
self._files_by_path[path] = pbxbuildfile
def _AddBuildFileToDicts(self, pbxbuildfile, path=None):
"""Maintains the _files_by_path and _files_by_xcfilelikeelement dicts.
If path is specified, then it is the path that is being added to the
phase, and pbxbuildfile must contain either a PBXFileReference directly
referencing that path, or it must contain a PBXVariantGroup that itself
contains a PBXFileReference referencing the path.
If path is not specified, either the PBXFileReference's path or the paths
of all children of the PBXVariantGroup are taken as being added to the
phase.
If the path is already present in the phase, raises an exception.
If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile
are already present in the phase, referenced by a different PBXBuildFile
object, raises an exception. This does not raise an exception when
a PBXFileReference or PBXVariantGroup reappear and are referenced by the
same PBXBuildFile that has already introduced them, because in the case
of PBXVariantGroup objects, they may correspond to multiple paths that are
not all added simultaneously. When this situation occurs, the path needs
to be added to _files_by_path, but nothing needs to change in
_files_by_xcfilelikeelement, and the caller should have avoided adding
the PBXBuildFile if it is already present in the list of children.
"""
xcfilelikeelement = pbxbuildfile._properties['fileRef']
paths = []
if path != None:
# It's best when the caller provides the path.
if isinstance(xcfilelikeelement, PBXVariantGroup):
paths.append(path)
else:
# If the caller didn't provide a path, there can be either multiple
# paths (PBXVariantGroup) or one.
if isinstance(xcfilelikeelement, PBXVariantGroup):
for variant in xcfilelikeelement._properties['children']:
paths.append(variant.FullPath())
else:
paths.append(xcfilelikeelement.FullPath())
# Add the paths first, because if something's going to raise, the
# messages provided by _AddPathToDict are more useful owing to its
# having access to a real pathname and not just an object's Name().
for a_path in paths:
self._AddPathToDict(pbxbuildfile, a_path)
# If another PBXBuildFile references this XCFileLikeElement, there's a
# problem.
if xcfilelikeelement in self._files_by_xcfilelikeelement and \
self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile:
raise ValueError, 'Found multiple build files for ' + \
xcfilelikeelement.Name()
self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile
def AppendBuildFile(self, pbxbuildfile, path=None):
# Callers should use this instead of calling
# AppendProperty('files', pbxbuildfile) directly because this function
# maintains the object's dicts. Better yet, callers can just call AddFile
# with a pathname and not worry about building their own PBXBuildFile
# objects.
self.AppendProperty('files', pbxbuildfile)
self._AddBuildFileToDicts(pbxbuildfile, path)
def AddFile(self, path, settings=None):
(file_group, hierarchical) = self.FileGroup(path)
file_ref = file_group.AddOrGetFileByPath(path, hierarchical)
if file_ref in self._files_by_xcfilelikeelement and \
isinstance(file_ref, PBXVariantGroup):
# There's already a PBXBuildFile in this phase corresponding to the
# PBXVariantGroup. path just provides a new variant that belongs to
# the group. Add the path to the dict.
pbxbuildfile = self._files_by_xcfilelikeelement[file_ref]
self._AddBuildFileToDicts(pbxbuildfile, path)
else:
# Add a new PBXBuildFile to get file_ref into the phase.
if settings is None:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref})
else:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings})
self.AppendBuildFile(pbxbuildfile, path)
class PBXHeadersBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Headers'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXResourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Resources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXSourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Sources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXFrameworksBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Frameworks'
def FileGroup(self, path):
(root, ext) = posixpath.splitext(path)
if ext != '':
ext = ext[1:].lower()
if ext == 'o':
# .o files are added to Xcode Frameworks phases, but conceptually aren't
# frameworks, they're more like sources or intermediates. Redirect them
# to show up in one of those other groups.
return self.PBXProjectAncestor().RootGroupForPath(path)
else:
return (self.PBXProjectAncestor().FrameworksGroup(), False)
class PBXShellScriptBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'inputPaths': [1, str, 0, 1, []],
'name': [0, str, 0, 0],
'outputPaths': [1, str, 0, 1, []],
'shellPath': [0, str, 0, 1, '/bin/sh'],
'shellScript': [0, str, 0, 1],
'showEnvVarsInLog': [0, int, 0, 0],
})
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'ShellScript'
class PBXCopyFilesBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'dstPath': [0, str, 0, 1],
'dstSubfolderSpec': [0, int, 0, 1],
'name': [0, str, 0, 0],
})
# path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is
# "DIR", match group 3 is "path" or None.
path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$')
# path_tree_to_subfolder maps names of Xcode variables to the associated
# dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object.
path_tree_to_subfolder = {
'BUILT_PRODUCTS_DIR': 16, # Products Directory
# Other types that can be chosen via the Xcode UI.
# TODO(mark): Map Xcode variable names to these.
# : 1, # Wrapper
# : 6, # Executables: 6
# : 7, # Resources
# : 15, # Java Resources
# : 10, # Frameworks
# : 11, # Shared Frameworks
# : 12, # Shared Support
# : 13, # PlugIns
}
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'CopyFiles'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
def SetDestination(self, path):
"""Set the dstSubfolderSpec and dstPath properties from path.
path may be specified in the same notation used for XCHierarchicalElements,
specifically, "$(DIR)/path".
"""
path_tree_match = self.path_tree_re.search(path)
if path_tree_match:
# Everything else needs to be relative to an Xcode variable.
path_tree = path_tree_match.group(1)
relative_path = path_tree_match.group(3)
if path_tree in self.path_tree_to_subfolder:
subfolder = self.path_tree_to_subfolder[path_tree]
if relative_path == None:
relative_path = ''
else:
# The path starts with an unrecognized Xcode variable
# name like $(SRCROOT). Xcode will still handle this
# as an "absolute path" that starts with the variable.
subfolder = 0
relative_path = path
elif path.startswith('/'):
# Special case. Absolute paths are in dstSubfolderSpec 0.
subfolder = 0
relative_path = path[1:]
else:
raise ValueError, 'Can\'t use path %s in a %s' % \
(path, self.__class__.__name__)
self._properties['dstPath'] = relative_path
self._properties['dstSubfolderSpec'] = subfolder
class PBXBuildRule(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'compilerSpec': [0, str, 0, 1],
'filePatterns': [0, str, 0, 0],
'fileType': [0, str, 0, 1],
'isEditable': [0, int, 0, 1, 1],
'outputFiles': [1, str, 0, 1, []],
'script': [0, str, 0, 0],
})
def Name(self):
# Not very inspired, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.append(self._properties['fileType'])
if 'filePatterns' in self._properties:
hashables.append(self._properties['filePatterns'])
return hashables
class PBXContainerItemProxy(XCObject):
# When referencing an item in this project file, containerPortal is the
# PBXProject root object of this project file. When referencing an item in
# another project file, containerPortal is a PBXFileReference identifying
# the other project file.
#
# When serving as a proxy to an XCTarget (in this project file or another),
# proxyType is 1. When serving as a proxy to a PBXFileReference (in another
# project file), proxyType is 2. Type 2 is used for references to the
# producs of the other project file's targets.
#
# Xcode is weird about remoteGlobalIDString. Usually, it's printed without
# a comment, indicating that it's tracked internally simply as a string, but
# sometimes it's printed with a comment (usually when the object is initially
# created), indicating that it's tracked as a project file object at least
# sometimes. This module always tracks it as an object, but contains a hack
# to prevent it from printing the comment in the project file output. See
# _XCKVPrint.
_schema = XCObject._schema.copy()
_schema.update({
'containerPortal': [0, XCContainerPortal, 0, 1],
'proxyType': [0, int, 0, 1],
'remoteGlobalIDString': [0, XCRemoteObject, 0, 1],
'remoteInfo': [0, str, 0, 1],
})
def __repr__(self):
props = self._properties
name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo'])
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['containerPortal'].Hashables())
hashables.extend(self._properties['remoteGlobalIDString'].Hashables())
return hashables
class PBXTargetDependency(XCObject):
# The "target" property accepts an XCTarget object, and obviously not
# NoneType. But XCTarget is defined below, so it can't be put into the
# schema yet. The definition of PBXTargetDependency can't be moved below
# XCTarget because XCTarget's own schema references PBXTargetDependency.
# Python doesn't deal well with this circular relationship, and doesn't have
# a real way to do forward declarations. To work around, the type of
# the "target" property is reset below, after XCTarget is defined.
#
# At least one of "name" and "target" is required.
_schema = XCObject._schema.copy()
_schema.update({
'name': [0, str, 0, 0],
'target': [0, None.__class__, 0, 0],
'targetProxy': [0, PBXContainerItemProxy, 1, 1],
})
def __repr__(self):
name = self._properties.get('name') or self._properties['target'].Name()
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['targetProxy'].Hashables())
return hashables
class PBXReferenceProxy(XCFileLikeElement):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'fileType': [0, str, 0, 1],
'path': [0, str, 0, 1],
'remoteRef': [0, PBXContainerItemProxy, 1, 1],
})
class XCTarget(XCRemoteObject):
# An XCTarget is really just an XCObject, the XCRemoteObject thing is just
# to allow PBXProject to be used in the remoteGlobalIDString property of
# PBXContainerItemProxy.
#
# Setting a "name" property at instantiation may also affect "productName",
# which may in turn affect the "PRODUCT_NAME" build setting in children of
# "buildConfigurationList". See __init__ below.
_schema = XCRemoteObject._schema.copy()
_schema.update({
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'buildPhases': [1, XCBuildPhase, 1, 1, []],
'dependencies': [1, PBXTargetDependency, 1, 1, []],
'name': [0, str, 0, 1],
'productName': [0, str, 0, 1],
})
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCRemoteObject.__init__(self, properties, id, parent)
# Set up additional defaults not expressed in the schema. If a "name"
# property was supplied, set "productName" if it is not present. Also set
# the "PRODUCT_NAME" build setting in each configuration, but only if
# the setting is not present in any build configuration.
if 'name' in self._properties:
if not 'productName' in self._properties:
self.SetProperty('productName', self._properties['name'])
if 'productName' in self._properties:
if 'buildConfigurationList' in self._properties:
configs = self._properties['buildConfigurationList']
if configs.HasBuildSetting('PRODUCT_NAME') == 0:
configs.SetBuildSetting('PRODUCT_NAME',
self._properties['productName'])
def AddDependency(self, other):
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject == other_pbxproject:
# The easy case. Add a dependency to another target in the same
# project file.
container = PBXContainerItemProxy({'containerPortal': pbxproject,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name()})
dependency = PBXTargetDependency({'target': other,
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
else:
# The hard case. Add a dependency to a target in a different project
# file. Actually, this case isn't really so hard.
other_project_ref = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[1]
container = PBXContainerItemProxy({
'containerPortal': other_project_ref,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name(),
})
dependency = PBXTargetDependency({'name': other.Name(),
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
# Proxy all of these through to the build configuration list.
def ConfigurationNamed(self, name):
return self._properties['buildConfigurationList'].ConfigurationNamed(name)
def DefaultConfiguration(self):
return self._properties['buildConfigurationList'].DefaultConfiguration()
def HasBuildSetting(self, key):
return self._properties['buildConfigurationList'].HasBuildSetting(key)
def GetBuildSetting(self, key):
return self._properties['buildConfigurationList'].GetBuildSetting(key)
def SetBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].SetBuildSetting(key, \
value)
def AppendBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].AppendBuildSetting(key, \
value)
def DelBuildSetting(self, key):
return self._properties['buildConfigurationList'].DelBuildSetting(key)
# Redefine the type of the "target" property. See PBXTargetDependency._schema
# above.
PBXTargetDependency._schema['target'][1] = XCTarget
class PBXNativeTarget(XCTarget):
# buildPhases is overridden in the schema to be able to set defaults.
#
# NOTE: Contrary to most objects, it is advisable to set parent when
# constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject
# object. A parent reference is required for a PBXNativeTarget during
# construction to be able to set up the target defaults for productReference,
# because a PBXBuildFile object must be created for the target and it must
# be added to the PBXProject's mainGroup hierarchy.
_schema = XCTarget._schema.copy()
_schema.update({
'buildPhases': [1, XCBuildPhase, 1, 1,
[PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]],
'buildRules': [1, PBXBuildRule, 1, 1, []],
'productReference': [0, PBXFileReference, 0, 1],
'productType': [0, str, 0, 1],
})
# Mapping from Xcode product-types to settings. The settings are:
# filetype : used for explicitFileType in the project file
# prefix : the prefix for the file name
# suffix : the suffix for the filen ame
_product_filetypes = {
'com.apple.product-type.application': ['wrapper.application',
'', '.app'],
'com.apple.product-type.bundle': ['wrapper.cfbundle',
'', '.bundle'],
'com.apple.product-type.framework': ['wrapper.framework',
'', '.framework'],
'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib',
'lib', '.dylib'],
'com.apple.product-type.library.static': ['archive.ar',
'lib', '.a'],
'com.apple.product-type.tool': ['compiled.mach-o.executable',
'', ''],
'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib',
'', '.so'],
}
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCTarget.__init__(self, properties, id, parent)
if 'productName' in self._properties and \
'productType' in self._properties and \
not 'productReference' in self._properties and \
self._properties['productType'] in self._product_filetypes:
products_group = None
pbxproject = self.PBXProjectAncestor()
if pbxproject != None:
products_group = pbxproject.ProductsGroup()
if products_group != None:
(filetype, prefix, suffix) = \
self._product_filetypes[self._properties['productType']]
# Xcode does not have a distinct type for loadable modules that are
# pure BSD targets (not in a bundle wrapper). GYP allows such modules
# to be specified by setting a target type to loadable_module without
# having mac_bundle set. These are mapped to the pseudo-product type
# com.googlecode.gyp.xcode.bundle.
#
# By picking up this special type and converting it to a dynamic
# library (com.apple.product-type.library.dynamic) with fix-ups,
# single-file loadable modules can be produced.
#
# MACH_O_TYPE is changed to mh_bundle to produce the proper file type
# (as opposed to mh_dylib). In order for linking to succeed,
# DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be
# cleared. They are meaningless for type mh_bundle.
#
# Finally, the .so extension is forcibly applied over the default
# (.dylib), unless another forced extension is already selected.
# .dylib is plainly wrong, and .bundle is used by loadable_modules in
# bundle wrappers (com.apple.product-type.bundle). .so seems an odd
# choice because it's used as the extension on many other systems that
# don't distinguish between linkable shared libraries and non-linkable
# loadable modules, but there's precedent: Python loadable modules on
# Mac OS X use an .so extension.
if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle':
self._properties['productType'] = \
'com.apple.product-type.library.dynamic'
self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
self.SetBuildSetting('DYLIB_CURRENT_VERSION', '')
self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '')
if force_extension == None:
force_extension = suffix[1:]
if force_extension is not None:
# If it's a wrapper (bundle), set WRAPPER_EXTENSION.
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_EXTENSION', force_extension)
else:
# Extension override.
suffix = '.' + force_extension
self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension)
if filetype.startswith('compiled.mach-o.executable'):
product_name = self._properties['productName']
product_name += suffix
suffix = ''
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
# Xcode handles most prefixes based on the target type, however there
# are exceptions. If a "BSD Dynamic Library" target is added in the
# Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that
# behavior.
if force_prefix is not None:
prefix = force_prefix
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_PREFIX', prefix)
else:
self.SetBuildSetting('EXECUTABLE_PREFIX', prefix)
if force_outdir is not None:
self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir)
# TODO(tvl): Remove the below hack.
# http://code.google.com/p/gyp/issues/detail?id=122
# Some targets include the prefix in the target_name. These targets
# really should just add a product_name setting that doesn't include
# the prefix. For example:
# target_name = 'libevent', product_name = 'event'
# This check cleans up for them.
product_name = self._properties['productName']
prefix_len = len(prefix)
if prefix_len and (product_name[:prefix_len] == prefix):
product_name = product_name[prefix_len:]
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
ref_props = {
'explicitFileType': filetype,
'includeInIndex': 0,
'path': prefix + product_name + suffix,
'sourceTree': 'BUILT_PRODUCTS_DIR',
}
file_ref = PBXFileReference(ref_props)
products_group.AppendChild(file_ref)
self.SetProperty('productReference', file_ref)
def GetBuildPhaseByType(self, type):
if not 'buildPhases' in self._properties:
return None
the_phase = None
for phase in self._properties['buildPhases']:
if isinstance(phase, type):
# Some phases may be present in multiples in a well-formed project file,
# but phases like PBXSourcesBuildPhase may only be present singly, and
# this function is intended as an aid to GetBuildPhaseByType. Loop
# over the entire list of phases and assert if more than one of the
# desired type is found.
assert the_phase == None
the_phase = phase
return the_phase
def HeadersPhase(self):
headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase)
if headers_phase == None:
headers_phase = PBXHeadersBuildPhase()
# The headers phase should come before the resources, sources, and
# frameworks phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXResourcesBuildPhase) or \
isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, headers_phase)
headers_phase.parent = self
return headers_phase
def ResourcesPhase(self):
resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase)
if resources_phase == None:
resources_phase = PBXResourcesBuildPhase()
# The resources phase should come before the sources and frameworks
# phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, resources_phase)
resources_phase.parent = self
return resources_phase
def SourcesPhase(self):
sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase)
if sources_phase == None:
sources_phase = PBXSourcesBuildPhase()
self.AppendProperty('buildPhases', sources_phase)
return sources_phase
def FrameworksPhase(self):
frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase)
if frameworks_phase == None:
frameworks_phase = PBXFrameworksBuildPhase()
self.AppendProperty('buildPhases', frameworks_phase)
return frameworks_phase
def AddDependency(self, other):
# super
XCTarget.AddDependency(self, other)
static_library_type = 'com.apple.product-type.library.static'
shared_library_type = 'com.apple.product-type.library.dynamic'
framework_type = 'com.apple.product-type.framework'
if isinstance(other, PBXNativeTarget) and \
'productType' in self._properties and \
self._properties['productType'] != static_library_type and \
'productType' in other._properties and \
(other._properties['productType'] == static_library_type or \
((other._properties['productType'] == shared_library_type or \
other._properties['productType'] == framework_type) and \
((not other.HasBuildSetting('MACH_O_TYPE')) or
other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))):
file_ref = other.GetProperty('productReference')
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject != other_pbxproject:
other_project_product_group = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[0]
file_ref = other_project_product_group.GetChildByRemoteObject(file_ref)
self.FrameworksPhase().AppendProperty('files',
PBXBuildFile({'fileRef': file_ref}))
class PBXAggregateTarget(XCTarget):
pass
class PBXProject(XCContainerPortal):
# A PBXProject is really just an XCObject, the XCContainerPortal thing is
# just to allow PBXProject to be used in the containerPortal property of
# PBXContainerItemProxy.
"""
Attributes:
path: "sample.xcodeproj". TODO(mark) Document me!
_other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each
value is a reference to the dict in the
projectReferences list associated with the keyed
PBXProject.
"""
_schema = XCContainerPortal._schema.copy()
_schema.update({
'attributes': [0, dict, 0, 0],
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'],
'hasScannedForEncodings': [0, int, 0, 1, 1],
'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()],
'projectDirPath': [0, str, 0, 1, ''],
'projectReferences': [1, dict, 0, 0],
'projectRoot': [0, str, 0, 1, ''],
'targets': [1, XCTarget, 1, 1, []],
})
def __init__(self, properties=None, id=None, parent=None, path=None):
self.path = path
self._other_pbxprojects = {}
# super
return XCContainerPortal.__init__(self, properties, id, parent)
def Name(self):
name = self.path
if name[-10:] == '.xcodeproj':
name = name[:-10]
return posixpath.basename(name)
def Path(self):
return self.path
def Comment(self):
return 'Project object'
def Children(self):
# super
children = XCContainerPortal.Children(self)
# Add children that the schema doesn't know about. Maybe there's a more
# elegant way around this, but this is the only case where we need to own
# objects in a dictionary (that is itself in a list), and three lines for
# a one-off isn't that big a deal.
if 'projectReferences' in self._properties:
for reference in self._properties['projectReferences']:
children.append(reference['ProductGroup'])
return children
def PBXProjectAncestor(self):
return self
def _GroupByName(self, name):
if not 'mainGroup' in self._properties:
self.SetProperty('mainGroup', PBXGroup())
main_group = self._properties['mainGroup']
group = main_group.GetChildByName(name)
if group == None:
group = PBXGroup({'name': name})
main_group.AppendChild(group)
return group
# SourceGroup and ProductsGroup are created by default in Xcode's own
# templates.
def SourceGroup(self):
return self._GroupByName('Source')
def ProductsGroup(self):
return self._GroupByName('Products')
# IntermediatesGroup is used to collect source-like files that are generated
# by rules or script phases and are placed in intermediate directories such
# as DerivedSources.
def IntermediatesGroup(self):
return self._GroupByName('Intermediates')
# FrameworksGroup and ProjectsGroup are top-level groups used to collect
# frameworks and projects.
def FrameworksGroup(self):
return self._GroupByName('Frameworks')
def ProjectsGroup(self):
return self._GroupByName('Projects')
def RootGroupForPath(self, path):
"""Returns a PBXGroup child of this object to which path should be added.
This method is intended to choose between SourceGroup and
IntermediatesGroup on the basis of whether path is present in a source
directory or an intermediates directory. For the purposes of this
determination, any path located within a derived file directory such as
PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates
directory.
The returned value is a two-element tuple. The first element is the
PBXGroup, and the second element specifies whether that group should be
organized hierarchically (True) or as a single flat list (False).
"""
# TODO(mark): make this a class variable and bind to self on call?
# Also, this list is nowhere near exhaustive.
# INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by
# gyp.generator.xcode. There should probably be some way for that module
# to push the names in, rather than having to hard-code them here.
source_tree_groups = {
'DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
}
(source_tree, path) = SourceTreeAndPathFromPath(path)
if source_tree != None and source_tree in source_tree_groups:
(group_func, hierarchical) = source_tree_groups[source_tree]
group = group_func()
return (group, hierarchical)
# TODO(mark): make additional choices based on file extension.
return (self.SourceGroup(), True)
def AddOrGetFileInRootGroup(self, path):
"""Returns a PBXFileReference corresponding to path in the correct group
according to RootGroupForPath's heuristics.
If an existing PBXFileReference for path exists, it will be returned.
Otherwise, one will be created and returned.
"""
(group, hierarchical) = self.RootGroupForPath(path)
return group.AddOrGetFileByPath(path, hierarchical)
def RootGroupsTakeOverOnlyChildren(self, recurse=False):
"""Calls TakeOverOnlyChild for all groups in the main group."""
for group in self._properties['mainGroup']._properties['children']:
if isinstance(group, PBXGroup):
group.TakeOverOnlyChild(recurse)
def SortGroups(self):
# Sort the children of the mainGroup (like "Source" and "Products")
# according to their defined order.
self._properties['mainGroup']._properties['children'] = \
sorted(self._properties['mainGroup']._properties['children'],
cmp=lambda x,y: x.CompareRootGroup(y))
# Sort everything else by putting group before files, and going
# alphabetically by name within sections of groups and files. SortGroup
# is recursive.
for group in self._properties['mainGroup']._properties['children']:
if not isinstance(group, PBXGroup):
continue
if group.Name() == 'Products':
# The Products group is a special case. Instead of sorting
# alphabetically, sort things in the order of the targets that
# produce the products. To do this, just build up a new list of
# products based on the targets.
products = []
for target in self._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
product = target._properties['productReference']
# Make sure that the product is already in the products group.
assert product in group._properties['children']
products.append(product)
# Make sure that this process doesn't miss anything that was already
# in the products group.
assert len(products) == len(group._properties['children'])
group._properties['children'] = products
else:
group.SortGroup()
def AddOrGetProjectReference(self, other_pbxproject):
"""Add a reference to another project file (via PBXProject object) to this
one.
Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in
this project file that contains a PBXReferenceProxy object for each
product of each PBXNativeTarget in the other project file. ProjectRef is
a PBXFileReference to the other project file.
If this project file already references the other project file, the
existing ProductGroup and ProjectRef are returned. The ProductGroup will
still be updated if necessary.
"""
if not 'projectReferences' in self._properties:
self._properties['projectReferences'] = []
product_group = None
project_ref = None
if not other_pbxproject in self._other_pbxprojects:
# This project file isn't yet linked to the other one. Establish the
# link.
product_group = PBXGroup({'name': 'Products'})
# ProductGroup is strong.
product_group.parent = self
# There's nothing unique about this PBXGroup, and if left alone, it will
# wind up with the same set of hashables as all other PBXGroup objects
# owned by the projectReferences list. Add the hashables of the
# remote PBXProject that it's related to.
product_group._hashables.extend(other_pbxproject.Hashables())
# The other project reports its path as relative to the same directory
# that this project's path is relative to. The other project's path
# is not necessarily already relative to this project. Figure out the
# pathname that this project needs to use to refer to the other one.
this_path = posixpath.dirname(self.Path())
projectDirPath = self.GetProperty('projectDirPath')
if projectDirPath:
if posixpath.isabs(projectDirPath[0]):
this_path = projectDirPath
else:
this_path = posixpath.join(this_path, projectDirPath)
other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path)
# ProjectRef is weak (it's owned by the mainGroup hierarchy).
project_ref = PBXFileReference({
'lastKnownFileType': 'wrapper.pb-project',
'path': other_path,
'sourceTree': 'SOURCE_ROOT',
})
self.ProjectsGroup().AppendChild(project_ref)
ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref}
self._other_pbxprojects[other_pbxproject] = ref_dict
self.AppendProperty('projectReferences', ref_dict)
# Xcode seems to sort this list case-insensitively
self._properties['projectReferences'] = \
sorted(self._properties['projectReferences'], cmp=lambda x,y:
cmp(x['ProjectRef'].Name().lower(),
y['ProjectRef'].Name().lower()))
else:
# The link already exists. Pull out the relevnt data.
project_ref_dict = self._other_pbxprojects[other_pbxproject]
product_group = project_ref_dict['ProductGroup']
project_ref = project_ref_dict['ProjectRef']
self._SetUpProductReferences(other_pbxproject, product_group, project_ref)
return [product_group, project_ref]
def _SetUpProductReferences(self, other_pbxproject, product_group,
project_ref):
# TODO(mark): This only adds references to products in other_pbxproject
# when they don't exist in this pbxproject. Perhaps it should also
# remove references from this pbxproject that are no longer present in
# other_pbxproject. Perhaps it should update various properties if they
# change.
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
other_fileref = target._properties['productReference']
if product_group.GetChildByRemoteObject(other_fileref) == None:
# Xcode sets remoteInfo to the name of the target and not the name
# of its product, despite this proxy being a reference to the product.
container_item = PBXContainerItemProxy({
'containerPortal': project_ref,
'proxyType': 2,
'remoteGlobalIDString': other_fileref,
'remoteInfo': target.Name()
})
# TODO(mark): Does sourceTree get copied straight over from the other
# project? Can the other project ever have lastKnownFileType here
# instead of explicitFileType? (Use it if so?) Can path ever be
# unset? (I don't think so.) Can other_fileref have name set, and
# does it impact the PBXReferenceProxy if so? These are the questions
# that perhaps will be answered one day.
reference_proxy = PBXReferenceProxy({
'fileType': other_fileref._properties['explicitFileType'],
'path': other_fileref._properties['path'],
'sourceTree': other_fileref._properties['sourceTree'],
'remoteRef': container_item,
})
product_group.AppendChild(reference_proxy)
def SortRemoteProductReferences(self):
# For each remote project file, sort the associated ProductGroup in the
# same order that the targets are sorted in the remote project file. This
# is the sort order used by Xcode.
def CompareProducts(x, y, remote_products):
# x and y are PBXReferenceProxy objects. Go through their associated
# PBXContainerItem to get the remote PBXFileReference, which will be
# present in the remote_products list.
x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString']
y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString']
x_index = remote_products.index(x_remote)
y_index = remote_products.index(y_remote)
# Use the order of each remote PBXFileReference in remote_products to
# determine the sort order.
return cmp(x_index, y_index)
for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems():
# Build up a list of products in the remote project file, ordered the
# same as the targets that produce them.
remote_products = []
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
remote_products.append(target._properties['productReference'])
# Sort the PBXReferenceProxy children according to the list of remote
# products.
product_group = ref_dict['ProductGroup']
product_group._properties['children'] = sorted(
product_group._properties['children'],
cmp=lambda x, y: CompareProducts(x, y, remote_products))
class XCProjectFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'archiveVersion': [0, int, 0, 1, 1],
'classes': [0, dict, 0, 1, {}],
'objectVersion': [0, int, 0, 1, 45],
'rootObject': [0, PBXProject, 1, 1],
})
def SetXcodeVersion(self, version):
version_to_object_version = {
'2.4': 45,
'3.0': 45,
'3.1': 45,
'3.2': 46,
}
if not version in version_to_object_version:
supported_str = ', '.join(sorted(version_to_object_version.keys()))
raise Exception(
'Unsupported Xcode version %s (supported: %s)' %
( version, supported_str ) )
compatibility_version = 'Xcode %s' % version
self._properties['rootObject'].SetProperty('compatibilityVersion',
compatibility_version)
self.SetProperty('objectVersion', version_to_object_version[version]);
def ComputeIDs(self, recursive=True, overwrite=True, hash=None):
# Although XCProjectFile is implemented here as an XCObject, it's not a
# proper object in the Xcode sense, and it certainly doesn't have its own
# ID. Pass through an attempt to update IDs to the real root object.
if recursive:
self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash)
def Print(self, file=sys.stdout):
self.VerifyHasRequiredProperties()
# Add the special "objects" property, which will be caught and handled
# separately during printing. This structure allows a fairly standard
# loop do the normal printing.
self._properties['objects'] = {}
self._XCPrint(file, 0, '// !$*UTF8*$!\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '{ ')
else:
self._XCPrint(file, 0, '{\n')
for property, value in sorted(self._properties.iteritems(),
cmp=lambda x, y: cmp(x, y)):
if property == 'objects':
self._PrintObjects(file)
else:
self._XCKVPrint(file, 1, property, value)
self._XCPrint(file, 0, '}\n')
del self._properties['objects']
def _PrintObjects(self, file):
if self._should_print_single_line:
self._XCPrint(file, 0, 'objects = {')
else:
self._XCPrint(file, 1, 'objects = {\n')
objects_by_class = {}
for object in self.Descendants():
if object == self:
continue
class_name = object.__class__.__name__
if not class_name in objects_by_class:
objects_by_class[class_name] = []
objects_by_class[class_name].append(object)
for class_name in sorted(objects_by_class):
self._XCPrint(file, 0, '\n')
self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n')
for object in sorted(objects_by_class[class_name],
cmp=lambda x, y: cmp(x.id, y.id)):
object.Print(file)
self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '}; ')
else:
self._XCPrint(file, 1, '};\n')
| mit | -4,960,518,596,211,695,000 | 40.168428 | 80 | 0.654678 | false |
alexryndin/ambari | ambari-server/src/main/resources/stacks/ADH/1.0/services/HDFS/package/scripts/journalnode.py | 2 | 6746 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
FILE_TYPE_XML
from utils import service
from hdfs import hdfs
import journalnode_upgrade
from ambari_commons.os_family_impl import OsFamilyImpl
from ambari_commons import OSConst
class JournalNode(Script):
def install(self, env):
import params
self.install_packages(env)
env.set_params(params)
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class JournalNodeDefault(JournalNode):
def get_stack_to_component(self):
return {"HDP": "hadoop-hdfs-journalnode"}
def pre_upgrade_restart(self, env, upgrade_type=None):
Logger.info("Executing Stack Upgrade pre-restart")
import params
env.set_params(params)
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env)
service(
action="start", name="journalnode", user=params.hdfs_user,
create_pid_dir=True,
create_log_dir=True
)
def post_upgrade_restart(self, env, upgrade_type=None):
if upgrade_type == "nonrolling":
return
Logger.info("Executing Stack Upgrade post-restart")
import params
env.set_params(params)
journalnode_upgrade.post_upgrade_check()
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
service(
action="stop", name="journalnode", user=params.hdfs_user,
create_pid_dir=True,
create_log_dir=True
)
def configure(self, env):
import params
Directory(params.jn_edits_dir,
create_parents=True,
cd_access="a",
owner=params.hdfs_user,
group=params.user_group
)
env.set_params(params)
hdfs()
pass
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.journalnode_pid_file)
def security_status(self, env):
import status_params
env.set_params(status_params)
props_value_check = {"hadoop.security.authentication": "kerberos",
"hadoop.security.authorization": "true"}
props_empty_check = ["hadoop.security.auth_to_local"]
props_read_check = None
core_site_expectations = build_expectations('core-site', props_value_check, props_empty_check,
props_read_check)
props_value_check = None
props_empty_check = ['dfs.journalnode.keytab.file',
'dfs.journalnode.kerberos.principal']
props_read_check = ['dfs.journalnode.keytab.file']
hdfs_site_expectations = build_expectations('hdfs-site', props_value_check, props_empty_check,
props_read_check)
hdfs_expectations = {}
hdfs_expectations.update(hdfs_site_expectations)
hdfs_expectations.update(core_site_expectations)
security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
{'core-site.xml': FILE_TYPE_XML})
if 'core-site' in security_params and 'hadoop.security.authentication' in security_params['core-site'] and \
security_params['core-site']['hadoop.security.authentication'].lower() == 'kerberos':
result_issues = validate_security_config_properties(security_params, hdfs_expectations)
if not result_issues: # If all validations passed successfully
try:
# Double check the dict before calling execute
if ('hdfs-site' not in security_params or
'dfs.journalnode.kerberos.keytab.file' not in security_params['hdfs-site'] or
'dfs.journalnode.kerberos.principal' not in security_params['hdfs-site']):
self.put_structured_out({"securityState": "UNSECURED"})
self.put_structured_out(
{"securityIssuesFound": "Keytab file or principal are not set property."})
return
cached_kinit_executor(status_params.kinit_path_local,
status_params.hdfs_user,
security_params['hdfs-site']['dfs.journalnode.kerberos.keytab.file'],
security_params['hdfs-site']['dfs.journalnode.kerberos.principal'],
status_params.hostname,
status_params.tmp_dir)
self.put_structured_out({"securityState": "SECURED_KERBEROS"})
except Exception as e:
self.put_structured_out({"securityState": "ERROR"})
self.put_structured_out({"securityStateErrorInfo": str(e)})
else:
issues = []
for cf in result_issues:
issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
self.put_structured_out({"securityState": "UNSECURED"})
else:
self.put_structured_out({"securityState": "UNSECURED"})
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class JournalNodeWindows(JournalNode):
def install(self, env):
import install_params
self.install_packages(env)
def start(self, env):
import params
self.configure(env)
Service(params.journalnode_win_service_name, action="start")
def stop(self, env):
import params
Service(params.journalnode_win_service_name, action="stop")
def configure(self, env):
import params
env.set_params(params)
hdfs("journalnode")
pass
def status(self, env):
import status_params
env.set_params(status_params)
check_windows_service_status(status_params.journalnode_win_service_name)
if __name__ == "__main__":
JournalNode().execute()
| apache-2.0 | 9,085,197,930,481,679,000 | 35.863388 | 114 | 0.669286 | false |
zackslash/scrapy | scrapy/shell.py | 44 | 6963 | """Scrapy Shell
See documentation in docs/topics/shell.rst
"""
from __future__ import print_function
import os
import signal
import warnings
from twisted.internet import reactor, threads, defer
from twisted.python import threadable
from w3lib.url import any_to_uri
from scrapy.crawler import Crawler
from scrapy.exceptions import IgnoreRequest, ScrapyDeprecationWarning
from scrapy.http import Request, Response
from scrapy.item import BaseItem
from scrapy.settings import Settings
from scrapy.spiders import Spider
from scrapy.utils.console import start_python_console
from scrapy.utils.misc import load_object
from scrapy.utils.response import open_in_browser
from scrapy.utils.conf import get_config
from scrapy.utils.console import DEFAULT_PYTHON_SHELLS
class Shell(object):
relevant_classes = (Crawler, Spider, Request, Response, BaseItem,
Settings)
def __init__(self, crawler, update_vars=None, code=None):
self.crawler = crawler
self.update_vars = update_vars or (lambda x: None)
self.item_class = load_object(crawler.settings['DEFAULT_ITEM_CLASS'])
self.spider = None
self.inthread = not threadable.isInIOThread()
self.code = code
self.vars = {}
def start(self, url=None, request=None, response=None, spider=None):
# disable accidental Ctrl-C key press from shutting down the engine
signal.signal(signal.SIGINT, signal.SIG_IGN)
if url:
self.fetch(url, spider)
elif request:
self.fetch(request, spider)
elif response:
request = response.request
self.populate_vars(response, request, spider)
else:
self.populate_vars()
if self.code:
print(eval(self.code, globals(), self.vars))
else:
"""
Detect interactive shell setting in scrapy.cfg
e.g.: ~/.config/scrapy.cfg or ~/.scrapy.cfg
[settings]
# shell can be one of ipython, bpython or python;
# to be used as the interactive python console, if available.
# (default is ipython, fallbacks in the order listed above)
shell = python
"""
cfg = get_config()
section, option = 'settings', 'shell'
env = os.environ.get('SCRAPY_PYTHON_SHELL')
shells = []
if env:
shells += env.strip().lower().split(',')
elif cfg.has_option(section, option):
shells += [cfg.get(section, option).strip().lower()]
else: # try all by default
shells += DEFAULT_PYTHON_SHELLS.keys()
# always add standard shell as fallback
shells += ['python']
start_python_console(self.vars, shells=shells,
banner=self.vars.pop('banner', ''))
def _schedule(self, request, spider):
spider = self._open_spider(request, spider)
d = _request_deferred(request)
d.addCallback(lambda x: (x, spider))
self.crawler.engine.crawl(request, spider)
return d
def _open_spider(self, request, spider):
if self.spider:
return self.spider
if spider is None:
spider = self.crawler.spider or self.crawler._create_spider()
self.crawler.spider = spider
self.crawler.engine.open_spider(spider, close_if_idle=False)
self.spider = spider
return spider
def fetch(self, request_or_url, spider=None):
if isinstance(request_or_url, Request):
request = request_or_url
url = request.url
else:
url = any_to_uri(request_or_url)
request = Request(url, dont_filter=True)
request.meta['handle_httpstatus_all'] = True
response = None
try:
response, spider = threads.blockingCallFromThread(
reactor, self._schedule, request, spider)
except IgnoreRequest:
pass
self.populate_vars(response, request, spider)
def populate_vars(self, response=None, request=None, spider=None):
self.vars['crawler'] = self.crawler
self.vars['item'] = self.item_class()
self.vars['settings'] = self.crawler.settings
self.vars['spider'] = spider
self.vars['request'] = request
self.vars['response'] = response
self.vars['sel'] = _SelectorProxy(response)
if self.inthread:
self.vars['fetch'] = self.fetch
self.vars['view'] = open_in_browser
self.vars['shelp'] = self.print_help
self.update_vars(self.vars)
if not self.code:
self.vars['banner'] = self.get_help()
def print_help(self):
print(self.get_help())
def get_help(self):
b = []
b.append("Available Scrapy objects:")
for k, v in sorted(self.vars.items()):
if self._is_relevant(v):
b.append(" %-10s %s" % (k, v))
b.append("Useful shortcuts:")
b.append(" shelp() Shell help (print this help)")
if self.inthread:
b.append(" fetch(req_or_url) Fetch request (or URL) and "
"update local objects")
b.append(" view(response) View response in a browser")
return "\n".join("[s] %s" % l for l in b)
def _is_relevant(self, value):
return isinstance(value, self.relevant_classes)
def inspect_response(response, spider):
"""Open a shell to inspect the given response"""
Shell(spider.crawler).start(response=response)
def _request_deferred(request):
"""Wrap a request inside a Deferred.
This function is harmful, do not use it until you know what you are doing.
This returns a Deferred whose first pair of callbacks are the request
callback and errback. The Deferred also triggers when the request
callback/errback is executed (ie. when the request is downloaded)
WARNING: Do not call request.replace() until after the deferred is called.
"""
request_callback = request.callback
request_errback = request.errback
def _restore_callbacks(result):
request.callback = request_callback
request.errback = request_errback
return result
d = defer.Deferred()
d.addBoth(_restore_callbacks)
if request.callback:
d.addCallbacks(request.callback, request.errback)
request.callback, request.errback = d.callback, d.errback
return d
class _SelectorProxy(object):
def __init__(self, response):
self._proxiedresponse = response
def __getattr__(self, name):
warnings.warn('"sel" shortcut is deprecated. Use "response.xpath()", '
'"response.css()" or "response.selector" instead',
category=ScrapyDeprecationWarning, stacklevel=2)
return getattr(self._proxiedresponse.selector, name)
| bsd-3-clause | -5,611,503,319,833,055,000 | 34.345178 | 78 | 0.61597 | false |
andante20/volatility | volatility/plugins/gui/vtypes/win2003.py | 58 | 2168 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (C) 2010,2011,2012 Michael Hale Ligh <[email protected]>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.obj as obj
class Win2003x86GuiVTypes(obj.ProfileModification):
"""Apply the overlays for Windows 2003 x86 (builds on Windows XP x86)"""
before = ["XP2003x86BaseVTypes"]
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '32bit',
'major': lambda x: x == 5,
'minor': lambda x: x == 2}
def modification(self, profile):
profile.merge_overlay({
'tagWINDOWSTATION' : [ 0x54, {
'spwndClipOwner' : [ 0x18, ['pointer', ['tagWND']]],
'pGlobalAtomTable' : [ 0x3C, ['pointer', ['void']]],
}],
'tagTHREADINFO' : [ None, {
'PtiLink' : [ 0xB0, ['_LIST_ENTRY']],
'fsHooks' : [ 0x9C, ['unsigned long']],
'aphkStart' : [ 0xF8, ['array', 16, ['pointer', ['tagHOOK']]]],
}],
'tagDESKTOP' : [ None, {
'hsectionDesktop' : [ 0x3c, ['pointer', ['void']]],
'pheapDesktop' : [ 0x40, ['pointer', ['tagWIN32HEAP']]],
'ulHeapSize' : [ 0x44, ['unsigned long']],
'PtiList' : [ 0x60, ['_LIST_ENTRY']],
}],
'tagSERVERINFO' : [ None, {
'cHandleEntries' : [ 4, ['unsigned long']],
'cbHandleTable' : [ 0x1b8, ['unsigned long']],
}],
})
| gpl-2.0 | 4,136,559,547,899,335,700 | 37.714286 | 76 | 0.58072 | false |
OpenPymeMx/OCB | addons/base_report_designer/plugin/openerp_report_designer/bin/script/SendToServer.py | 90 | 10565 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer [email protected]
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import string
import unohelper
import random
import xmlrpclib
import base64, tempfile
from com.sun.star.task import XJobExecutor
import os
import sys
if __name__<>'package':
from lib.gui import *
from lib.error import *
from lib.functions import *
from lib.logreport import *
from lib.tools import *
from LoginTest import *
from lib.rpc import *
database="report"
uid = 3
class SendtoServer(unohelper.Base, XJobExecutor):
Kind = {
'PDF' : 'pdf',
'OpenOffice': 'sxw',
'HTML' : 'html'
}
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
LoginTest()
self.logobj=Logger()
if not loginstatus and __name__=="package":
exit(1)
global passwd
self.password = passwd
global url
self.sock=RPCSession(url)
desktop=getDesktop()
oDoc2 = desktop.getCurrentComponent()
docinfo=oDoc2.getDocumentInfo()
self.ids = self.sock.execute(database, uid, self.password, 'ir.module.module', 'search', [('name','=','base_report_designer'),('state', '=', 'installed')])
if not len(self.ids):
ErrorDialog("Please install base_report_designer module.", "", "Module Uninstalled Error!")
exit(1)
report_name = ""
name=""
if docinfo.getUserFieldValue(2)<>"" :
try:
fields=['name','report_name']
self.res_other = self.sock.execute(database, uid, self.password, 'ir.actions.report.xml', 'read', [int(docinfo.getUserFieldValue(2))],fields)
name = self.res_other[0]['name']
report_name = self.res_other[0]['report_name']
except:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logob.log_write('ServerParameter', LOG_ERROR, info)
elif docinfo.getUserFieldValue(3) <> "":
name = ""
result = "rnd"
for i in range(5):
result =result + random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890')
report_name = docinfo.getUserFieldValue(3) + "." + result
else:
ErrorDialog("Please select appropriate module...","Note: use OpenERP Report -> Open a new Report", "Module selection ERROR");
exit(1)
self.win = DBModalDialog(60, 50, 180, 100, "Send To Server")
self.win.addFixedText("lblName",10 , 9, 40, 15, "Report Name :")
self.win.addEdit("txtName", -5, 5, 123, 15,name)
self.win.addFixedText("lblReportName", 2, 30, 50, 15, "Technical Name :")
self.win.addEdit("txtReportName", -5, 25, 123, 15,report_name)
self.win.addCheckBox("chkHeader", 51, 45, 70 ,15, "Corporate Header")
self.win.setCheckBoxState("chkHeader", True)
self.win.addFixedText("lblResourceType", 2 , 60, 50, 15, "Select Rpt. Type :")
self.win.addComboListBox("lstResourceType", -5, 58, 123, 15,True,itemListenerProc=self.lstbox_selected)
self.lstResourceType = self.win.getControl( "lstResourceType" )
self.txtReportName=self.win.getControl( "txtReportName" )
self.txtReportName.Enable=False
for kind in self.Kind.keys():
self.lstResourceType.addItem( kind, self.lstResourceType.getItemCount() )
self.win.addButton( "btnSend", -5, -5, 80, 15, "Send Report to Server", actionListenerProc = self.btnOk_clicked)
self.win.addButton( "btnCancel", -5 - 80 -5, -5, 40, 15, "Cancel", actionListenerProc = self.btnCancel_clicked)
self.win.doModalDialog("lstResourceType", self.Kind.keys()[0])
def lstbox_selected(self, oItemEvent):
pass
def btnCancel_clicked(self, oActionEvent):
self.win.endExecute()
def btnOk_clicked(self, oActionEvent):
if self.win.getEditText("txtName") <> "" and self.win.getEditText("txtReportName") <> "":
desktop=getDesktop()
oDoc2 = desktop.getCurrentComponent()
docinfo=oDoc2.getDocumentInfo()
self.getInverseFieldsRecord(1)
fp_name = tempfile.mktemp('.'+"sxw")
if not oDoc2.hasLocation():
oDoc2.storeAsURL("file://"+fp_name,Array(makePropertyValue("MediaType","application/vnd.sun.xml.writer"),))
if docinfo.getUserFieldValue(2)=="":
name=self.win.getEditText("txtName"),
name_id={}
try:
name_id = self.sock.execute(database, uid, self.password, 'ir.actions.report.xml' , 'search',[('name','=',name)])
if not name_id:
id=self.getID()
docinfo.setUserFieldValue(2,id)
rec = {
'name': self.win.getEditText("txtReportName"),
'key': 'action',
'model': docinfo.getUserFieldValue(3),
'value': 'ir.actions.report.xml,'+str(id),
'key2': 'client_print_multi',
'object': True,
'user_id': uid
}
res = self.sock.execute(database, uid, self.password, 'ir.values' , 'create',rec )
else :
ErrorDialog("This name is already used for another report.\nPlease try with another name.", "", "Error!")
self.logobj.log_write('SendToServer',LOG_WARNING, ': report name already used DB %s' % (database))
self.win.endExecute()
except Exception,e:
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ServerParameter', LOG_ERROR, info)
else:
id = docinfo.getUserFieldValue(2)
vId = self.sock.execute(database, uid, self.password, 'ir.values' , 'search', [('value','=','ir.actions.report.xml,'+str(id))])
rec = { 'name': self.win.getEditText("txtReportName") }
res = self.sock.execute(database, uid, self.password, 'ir.values' , 'write',vId,rec)
oDoc2.store()
data = read_data_from_file( get_absolute_file_path( oDoc2.getURL()[7:] ) )
self.getInverseFieldsRecord(0)
#sock = xmlrpclib.ServerProxy(docinfo.getUserFieldValue(0) +'/xmlrpc/object')
file_type = oDoc2.getURL()[7:].split(".")[-1]
params = {
'name': self.win.getEditText("txtName"),
'model': docinfo.getUserFieldValue(3),
'report_name': self.win.getEditText("txtReportName"),
'header': (self.win.getCheckBoxState("chkHeader") <> 0),
'report_type': self.Kind[self.win.getListBoxSelectedItem("lstResourceType")],
}
if self.win.getListBoxSelectedItem("lstResourceType")=='OpenOffice':
params['report_type']=file_type
self.sock.execute(database, uid, self.password, 'ir.actions.report.xml', 'write', int(docinfo.getUserFieldValue(2)), params)
# Call upload_report as the *last* step, as it will call register_all() and cause the report service
# to be loaded - which requires all the data to be correct in the database
self.sock.execute(database, uid, self.password, 'ir.actions.report.xml', 'upload_report', int(docinfo.getUserFieldValue(2)),base64.encodestring(data),file_type,{})
self.logobj.log_write('SendToServer',LOG_INFO, ':Report %s successfully send using %s'%(params['name'],database))
self.win.endExecute()
else:
ErrorDialog("Either report name or technical name is empty.\nPlease specify an appropriate name.", "", "Error!")
self.logobj.log_write('SendToServer',LOG_WARNING, ': either report name or technical name is empty.')
self.win.endExecute()
def getID(self):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
params = {
'name': self.win.getEditText("txtName"),
'model': docinfo.getUserFieldValue(3),
'report_name': self.win.getEditText('txtReportName')
}
id=self.sock.execute(database, uid, self.password, 'ir.actions.report.xml' ,'create', params)
return id
def getInverseFieldsRecord(self, nVal):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
count=0
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
oPar.SelectedItem = oPar.Items[nVal]
if nVal==0:
oPar.update()
if __name__<>"package" and __name__=="__main__":
SendtoServer(None)
elif __name__=="package":
g_ImplementationHelper.addImplementation( SendtoServer, "org.openoffice.openerp.report.sendtoserver", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -5,215,002,769,034,946,000 | 46.376682 | 175 | 0.587601 | false |
lupyuen/RaspberryPiImage | home/pi/GrovePi/Software/Python/others/temboo/Library/Amazon/CloudDrive/Children/AddChild.py | 5 | 5572 | # -*- coding: utf-8 -*-
###############################################################################
#
# AddChild
# Moves a specified folder into a parent folder.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class AddChild(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the AddChild Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(AddChild, self).__init__(temboo_session, '/Library/Amazon/CloudDrive/Children/AddChild')
def new_input_set(self):
return AddChildInputSet()
def _make_result_set(self, result, path):
return AddChildResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return AddChildChoreographyExecution(session, exec_id, path)
class AddChildInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the AddChild
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
super(AddChildInputSet, self)._set_input('AccessToken', value)
def set_ChildID(self, value):
"""
Set the value of the ChildID input for this Choreo. ((required, string) The ID of the folder that is being moved within a parent folder.)
"""
super(AddChildInputSet, self)._set_input('ChildID', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Amazon. Required unless providing a valid AccessToken.)
"""
super(AddChildInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Amazon. Required unless providing a valid AccessToken.)
"""
super(AddChildInputSet, self)._set_input('ClientSecret', value)
def set_HandleRequestThrottling(self, value):
"""
Set the value of the HandleRequestThrottling input for this Choreo. ((optional, boolean) Whether or not to perform a retry sequence if a throttling error occurs. Set to true to enable this feature. The request will be retried up-to five times when enabled.)
"""
super(AddChildInputSet, self)._set_input('HandleRequestThrottling', value)
def set_MetaDataURL(self, value):
"""
Set the value of the MetaDataURL input for this Choreo. ((optional, string) The appropriate metadataUrl for your account. When not provided, the Choreo will lookup the URL using the Account.GetEndpoint Choreo.)
"""
super(AddChildInputSet, self)._set_input('MetaDataURL', value)
def set_ParentID(self, value):
"""
Set the value of the ParentID input for this Choreo. ((required, string) The ID of the parent folder that will contain the child folder that's being moved.)
"""
super(AddChildInputSet, self)._set_input('ParentID', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(AddChildInputSet, self)._set_input('RefreshToken', value)
class AddChildResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the AddChild Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Amazon.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class AddChildChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return AddChildResultSet(response, path)
| apache-2.0 | -2,735,268,273,463,755,000 | 44.672131 | 265 | 0.680546 | false |
jepler/linuxcnc-mirror | configs/sim/axis/orphans/pysubs/task.py | 5 | 1340 | # This is a component of LinuxCNC
# Copyright 2011, 2013, 2014 Dewey Garrett <[email protected]>,
# Michael Haberler <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import sys
import hal
import emccanon
import interpreter
try:
import emctask
import customtask
except ImportError:
pass
try:
import cPickle as pickle
except ImportError:
import pickle
def starttask():
global pytask
import emc
ini = emc.ini(emctask.ini_filename())
t = ini.find("PYTHON", "PYTHON_TASK")
if int(t) if t else 0:
pytask = customtask.CustomTask()
if 'emctask' in sys.builtin_module_names:
starttask()
| lgpl-2.1 | -6,668,584,115,566,551,000 | 29.454545 | 81 | 0.720149 | false |
TeemuAhola/sailelfcloud | src/qml/python/elfcloudclient.py | 1 | 9585 | '''
Created on Sep 17, 2016
@author: Teemu Ahola [[email protected]]
'''
import os
import elfcloud
import worker
import binascii
import logger
APIKEY = 'swrqwb95d98ou8d'
VALULT_TYPES = [elfcloud.utils.VAULT_TYPE_DEFAULT, 'com.ahola.sailelfcloud']
DEFAULT_REQUEST_SIZE_BYTES = 256 * 1024 # Size of one request when sending or fetching
client = None
class ClientException(Exception):
def __init__(self, id=0, msg="unknown"):
self.__id = id
self.__msg = msg
@property
def id(self):
return self.__id
@property
def msg(self):
return self.__msg
class NotConnected(ClientException):
def __init__(self):
ClientException.__init__(self, 0, "not connected")
class AuthenticationFailure(ClientException):
pass
def handle_exception(func):
from functools import wraps
@wraps(func)
def exception_handler(*args, **kwargs):
try:
return func(*args, **kwargs)
except elfcloud.exceptions.ECAuthException as e:
raise AuthenticationFailure(e.id, e.message) from e
except elfcloud.exceptions.ECException as e:
raise ClientException(e.id, e.message) from e
except elfcloud.exceptions.ClientException as e:
raise ClientException(0, e.message) from e
except NotConnected:
raise
except Exception as e:
raise ClientException(0, str(e)) from e
return exception_handler
def check_connected(func):
from functools import wraps
@wraps(func)
def _check_connection(*args, **kwargs):
if not isConnected():
raise NotConnected()
return func(*args, **kwargs)
return _check_connection
@handle_exception
def setRequestSize(sizeInBytes):
client.set_request_size(sizeInBytes)
@handle_exception
def connect(username, password):
global client
try:
client = elfcloud.Client(username=username, auth_data=password,
apikey=APIKEY,
server_url=elfcloud.utils.SERVER_DEFAULT)
client.auth()
logger.info("elfCLOUD client connected")
setRequestSize(DEFAULT_REQUEST_SIZE_BYTES)
except elfcloud.exceptions.ECAuthException: # this we will handle by ourselves
client = None
raise
@handle_exception
def isConnected():
return client != None
@handle_exception
def disconnect():
global client
if client:
client.terminate()
client = None
logger.info("elfCLOUD client disconnected")
@handle_exception
def setEncryption(key, iv):
client.encryption_mode = elfcloud.utils.ENC_AES256
client.set_encryption_key(binascii.unhexlify(key))
client.set_iv(binascii.unhexlify(iv))
@handle_exception
def clearEncryption():
client.encryption_mode = elfcloud.utils.ENC_NONE
SUBSCRIPTION_FIELD_MAP = {'id':'Id', 'status':'Status', 'start_date':'Start date',
'end_date':'End date', 'storage_quota': 'Quota',
'subscription_type':'Subscription type', 'renewal_type':'Renewal type'}
@handle_exception
@check_connected
def getSubscriptionInfo():
info = client.get_subscription_info()
subscr = info['current_subscription']
return {to_: str(subscr[from_]) for from_,to_ in SUBSCRIPTION_FIELD_MAP.items()}
WHOAMI_FIELD_MAP = {'name':'Name', 'lang':'Language', 'lastname':'Last name', 'firstname':'First name', 'id':'Id',
'email':'E-Mail', 'organization_unit':'Organization unit', 'eula_accepted':'EULA accepted'}
@handle_exception
@check_connected
def getWhoAmI():
info = client.whoami()
user = info['user']
return {to_: str(user[from_]) for from_,to_ in WHOAMI_FIELD_MAP.items()}
@handle_exception
@check_connected
def upload(parentId, remotename, filename, chunkCb=None, cancelCb=None, offset=None):
fileSize = os.path.getsize(filename)
class _FileObj(object):
def __init__(self, fileobj, offset_):
self.fileobj = fileobj
self.totalReadSize = 0 if not offset_ else offset_
self.fileobj.seek(self.totalReadSize)
def read(self, size):
if callable(cancelCb) and cancelCb(self.totalReadSize):
return None
data = self.fileobj.read(size)
self.totalReadSize += len(data)
if len(data) and callable(chunkCb):
chunkCb(fileSize, self.totalReadSize)
return data
with open(filename, "rb") as fileobj:
fo = _FileObj(fileobj, offset)
if offset:
client.store_data(int(parentId), remotename, fo, method="append")
else:
client.store_data(int(parentId), remotename, fo)
@handle_exception
@check_connected
def listVaults():
vaultList = []
vaults = client.list_vaults()
for vault in vaults:
vaultList.append({'name': vault.name,
'id': vault.id,
'size': vault.size,
'type': 'vault',
'vaultType': vault.vault_type,
'permissions': vault.permissions,
'modified': vault.modified_date,
'accessed': vault.last_accessed_date,
'ownerFirstName': vault.owner['firstname'],
'ownerLastName': vault.owner['lastname']})
return vaultList
@handle_exception
@check_connected
def listContent(parentId):
contentList = []
clusters, dataitems = client.list_contents(int(parentId))
for cluster in clusters:
contentList.append({'name': cluster.name,
'id' : cluster.id,
'descendants': cluster.descendants,
'parentId': cluster.parent_id,
'modified': cluster.modified_date,
'accessed': cluster.last_accessed_date,
'permissions': cluster.permissions,
'type': 'cluster'})
for dataitem in dataitems:
contentList.append({'name': dataitem.name,
'id' : 0,
'parentId': dataitem.parent_id,
'type': 'dataitem',
'tags': dataitem.meta.get('TGS', ""),
'encryption': dataitem.meta.get('ENC', "NONE"),
'contentHash':dataitem.meta.get('CHA', ""),
'keyHash': dataitem.meta.get('KHA', "")})
return contentList
@handle_exception
@check_connected
def getDataItemInfo(parentId, name):
dataitem = client.get_dataitem(parentId, name)
return {'id': dataitem.dataitem_id,
'name': dataitem.name,
'size': dataitem.size,
'description': dataitem.description if dataitem.description else '',
'tags': dataitem.tags if dataitem.tags else [],
'accessed': dataitem.last_accessed_date if dataitem.last_accessed_date else '',
'contentHash': dataitem.content_hash if dataitem.content_hash else '',
'encryption': dataitem.__dict__.get('meta').get('ENC', "NONE"),
'keyHash': dataitem.key_hash if dataitem.key_hash else ''}
@handle_exception
@check_connected
def updateDataItem(parentId, name, description=None, tags=None):
client.update_dataitem(parentId, name, description, tags)
@handle_exception
@check_connected
def download(parentId, name, outputPath, key=None, chunkCb=None, cancelCb=None):
"""If cancelCb returns True, download is stopped."""
data = client.fetch_data(parentId, name)['data']
dataLength = data.fileobj.getheader('Content-Length') # Nasty way to get total size since what if Content-Length does not exist.
# I haven't found good way to provide this information in upper level sw.
dataFetched = 0
with open(outputPath, mode='wb') as outputFile:
for chunk in data:
outputFile.write(chunk)
dataFetched += len(chunk)
if len(chunk) and callable(chunkCb): chunkCb(dataLength, dataFetched)
if callable(cancelCb) and cancelCb(): break
@handle_exception
@check_connected
def removeDataItem(parentId, name):
client.remove_dataitem(parentId, name)
@handle_exception
@check_connected
def renameDataItem(parentId, oldName, newName):
client.rename_dataitem(parentId, oldName, newName)
@handle_exception
@check_connected
def addVault(name):
return client.add_vault(name, VALULT_TYPES[0]).id
@handle_exception
@check_connected
def removeVault(vaultId):
client.remove_vault(vaultId)
@handle_exception
@check_connected
def renameVault(vaultId, newName):
client.rename_vault(vaultId, newName)
@handle_exception
@check_connected
def addCluster(parentId, name):
return client.add_cluster(name, parentId).id
@handle_exception
@check_connected
def removeCluster(clusterId):
client.remove_cluster(clusterId)
@handle_exception
@check_connected
def renameCluster(clusterId, newName):
client.rename_cluster(clusterId, newName)
@handle_exception
@check_connected
def setProperty(name, data):
client.set_property(name, data)
@handle_exception
@check_connected
def getProperty(name):
return client.get_property(name)
| gpl-3.0 | 8,277,677,444,831,336,000 | 31.938144 | 132 | 0.610746 | false |
CopeX/odoo | addons/hw_scale/__openerp__.py | 220 | 1699 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Weighting Scale Hardware Driver',
'version': '1.0',
'category': 'Hardware Drivers',
'sequence': 6,
'summary': 'Hardware Driver for Weighting Scales',
'website': 'https://www.odoo.com/page/point-of-sale',
'description': """
Barcode Scanner Hardware Driver
================================
This module allows the point of sale to connect to a scale using a USB HSM Serial Scale Interface,
such as the Mettler Toledo Ariva.
""",
'author': 'OpenERP SA',
'depends': ['hw_proxy'],
'external_dependencies': {'python': ['serial']},
'test': [
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,756,798,229,618,466,000 | 35.148936 | 98 | 0.60565 | false |
hrayr-artunyan/shuup | shuup/xtheme/migrations/0001_initial.py | 9 | 1682 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import enumfields.fields
import shuup.core.fields
import shuup.xtheme.models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='SavedViewConfig',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('theme_identifier', models.CharField(db_index=True, max_length=64, verbose_name='theme identifier')),
('view_name', models.CharField(db_index=True, max_length=64, verbose_name='view name')),
('created_on', models.DateTimeField(verbose_name='created on', auto_now_add=True)),
('status', enumfields.fields.EnumIntegerField(db_index=True, verbose_name='status', enum=shuup.xtheme.models.SavedViewConfigStatus)),
('_data', shuup.core.fields.TaggedJSONField(db_column='data', default=dict, verbose_name='internal data')),
],
),
migrations.CreateModel(
name='ThemeSettings',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('theme_identifier', models.CharField(db_index=True, max_length=64, verbose_name='theme identifier', unique=True)),
('active', models.BooleanField(verbose_name='active', db_index=True, default=False)),
('data', shuup.core.fields.TaggedJSONField(db_column='data', default=dict, verbose_name='data')),
],
),
]
| agpl-3.0 | -3,279,893,957,301,471,700 | 45.722222 | 149 | 0.623068 | false |
youprofit/servo | components/script/dom/bindings/codegen/Configuration.py | 4 | 14097 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from WebIDL import IDLInterface
class Configuration:
"""
Represents global configuration state based on IDL parse data and
the configuration file.
"""
def __init__(self, filename, parseData):
# Read the configuration file.
glbl = {}
execfile(filename, glbl)
config = glbl['DOMInterfaces']
# Build descriptors for all the interfaces we have in the parse data.
# This allows callers to specify a subset of interfaces by filtering
# |parseData|.
self.descriptors = []
self.interfaces = {}
self.maxProtoChainLength = 0
for thing in parseData:
# Some toplevel things are sadly types, and those have an
# isInterface that doesn't mean the same thing as IDLObject's
# isInterface()...
if not isinstance(thing, IDLInterface):
continue
iface = thing
self.interfaces[iface.identifier.name] = iface
if iface.identifier.name not in config:
# Completely skip consequential interfaces with no descriptor
# if they have no interface object because chances are we
# don't need to do anything interesting with them.
if iface.isConsequential() and not iface.hasInterfaceObject():
continue
entry = {}
else:
entry = config[iface.identifier.name]
if not isinstance(entry, list):
assert isinstance(entry, dict)
entry = [entry]
self.descriptors.extend(
[Descriptor(self, iface, x) for x in entry])
# Mark the descriptors for which only a single nativeType implements
# an interface.
for descriptor in self.descriptors:
intefaceName = descriptor.interface.identifier.name
otherDescriptors = [d for d in self.descriptors
if d.interface.identifier.name == intefaceName]
descriptor.uniqueImplementation = len(otherDescriptors) == 1
self.enums = [e for e in parseData if e.isEnum()]
self.dictionaries = [d for d in parseData if d.isDictionary()]
self.callbacks = [c for c in parseData if
c.isCallback() and not c.isInterface()]
# Keep the descriptor list sorted for determinism.
self.descriptors.sort(lambda x, y: cmp(x.name, y.name))
def getInterface(self, ifname):
return self.interfaces[ifname]
def getDescriptors(self, **filters):
"""Gets the descriptors that match the given filters."""
curr = self.descriptors
for key, val in filters.iteritems():
if key == 'webIDLFile':
getter = lambda x: x.interface.filename()
elif key == 'hasInterfaceObject':
getter = lambda x: x.interface.hasInterfaceObject()
elif key == 'isCallback':
getter = lambda x: x.interface.isCallback()
elif key == 'isJSImplemented':
getter = lambda x: x.interface.isJSImplemented()
else:
getter = lambda x: getattr(x, key)
curr = filter(lambda x: getter(x) == val, curr)
return curr
def getEnums(self, webIDLFile):
return filter(lambda e: e.filename() == webIDLFile, self.enums)
@staticmethod
def _filterForFile(items, webIDLFile=""):
"""Gets the items that match the given filters."""
if not webIDLFile:
return items
return filter(lambda x: x.filename() == webIDLFile, items)
def getDictionaries(self, webIDLFile=""):
return self._filterForFile(self.dictionaries, webIDLFile=webIDLFile)
def getCallbacks(self, webIDLFile=""):
return self._filterForFile(self.callbacks, webIDLFile=webIDLFile)
def getDescriptor(self, interfaceName):
"""
Gets the appropriate descriptor for the given interface name.
"""
iface = self.getInterface(interfaceName)
descriptors = self.getDescriptors(interface=iface)
# We should have exactly one result.
if len(descriptors) != 1:
raise NoSuchDescriptorError("For " + interfaceName + " found " +
str(len(descriptors)) + " matches")
return descriptors[0]
def getDescriptorProvider(self):
"""
Gets a descriptor provider that can provide descriptors as needed.
"""
return DescriptorProvider(self)
class NoSuchDescriptorError(TypeError):
def __init__(self, str):
TypeError.__init__(self, str)
class DescriptorProvider:
"""
A way of getting descriptors for interface names
"""
def __init__(self, config):
self.config = config
def getDescriptor(self, interfaceName):
"""
Gets the appropriate descriptor for the given interface name given the
context of the current descriptor.
"""
return self.config.getDescriptor(interfaceName)
class Descriptor(DescriptorProvider):
"""
Represents a single descriptor for an interface. See Bindings.conf.
"""
def __init__(self, config, interface, desc):
DescriptorProvider.__init__(self, config)
self.interface = interface
# Read the desc, and fill in the relevant defaults.
ifaceName = self.interface.identifier.name
# Callback types do not use JS smart pointers, so we should not use the
# built-in rooting mechanisms for them.
if self.interface.isCallback():
self.needsRooting = False
ty = "%sBinding::%s" % (ifaceName, ifaceName)
self.returnType = "Rc<%s>" % ty
self.argumentType = "???"
self.memberType = "???"
self.nativeType = ty
else:
self.needsRooting = True
self.returnType = "Root<%s>" % ifaceName
self.argumentType = "&%s" % ifaceName
self.memberType = "Root<%s>" % ifaceName
self.nativeType = "Root<%s>" % ifaceName
self.concreteType = ifaceName
self.register = desc.get('register', True)
self.outerObjectHook = desc.get('outerObjectHook', 'None')
self.proxy = False
# If we're concrete, we need to crawl our ancestor interfaces and mark
# them as having a concrete descendant.
self.concrete = (not self.interface.isCallback() and
desc.get('concrete', True))
self.operations = {
'IndexedGetter': None,
'IndexedSetter': None,
'IndexedCreator': None,
'IndexedDeleter': None,
'NamedGetter': None,
'NamedSetter': None,
'NamedCreator': None,
'NamedDeleter': None,
'Stringifier': None,
}
def addOperation(operation, m):
if not self.operations[operation]:
self.operations[operation] = m
# Since stringifiers go on the prototype, we only need to worry
# about our own stringifier, not those of our ancestor interfaces.
for m in self.interface.members:
if m.isMethod() and m.isStringifier():
addOperation('Stringifier', m)
if self.concrete:
iface = self.interface
while iface:
for m in iface.members:
if not m.isMethod():
continue
def addIndexedOrNamedOperation(operation, m):
self.proxy = True
if m.isIndexed():
operation = 'Indexed' + operation
else:
assert m.isNamed()
operation = 'Named' + operation
addOperation(operation, m)
if m.isGetter():
addIndexedOrNamedOperation('Getter', m)
if m.isSetter():
addIndexedOrNamedOperation('Setter', m)
if m.isCreator():
addIndexedOrNamedOperation('Creator', m)
if m.isDeleter():
addIndexedOrNamedOperation('Deleter', m)
iface.setUserData('hasConcreteDescendant', True)
iface = iface.parent
if self.proxy:
iface = self.interface
while iface:
iface.setUserData('hasProxyDescendant', True)
iface = iface.parent
self.name = interface.identifier.name
# self.extendedAttributes is a dict of dicts, keyed on
# all/getterOnly/setterOnly and then on member name. Values are an
# array of extended attributes.
self.extendedAttributes = {'all': {}, 'getterOnly': {}, 'setterOnly': {}}
def addExtendedAttribute(attribute, config):
def add(key, members, attribute):
for member in members:
self.extendedAttributes[key].setdefault(member, []).append(attribute)
if isinstance(config, dict):
for key in ['all', 'getterOnly', 'setterOnly']:
add(key, config.get(key, []), attribute)
elif isinstance(config, list):
add('all', config, attribute)
else:
assert isinstance(config, str)
if config == '*':
iface = self.interface
while iface:
add('all', map(lambda m: m.name, iface.members), attribute)
iface = iface.parent
else:
add('all', [config], attribute)
self._binaryNames = desc.get('binaryNames', {})
self._binaryNames.setdefault('__legacycaller', 'LegacyCall')
self._binaryNames.setdefault('__stringifier', 'Stringifier')
self._internalNames = desc.get('internalNames', {})
for member in self.interface.members:
if not member.isAttr() and not member.isMethod():
continue
binaryName = member.getExtendedAttribute("BinaryName")
if binaryName:
assert isinstance(binaryName, list)
assert len(binaryName) == 1
self._binaryNames.setdefault(member.identifier.name,
binaryName[0])
self._internalNames.setdefault(member.identifier.name,
member.identifier.name.replace('-', '_'))
# Build the prototype chain.
self.prototypeChain = []
parent = interface
while parent:
self.prototypeChain.insert(0, parent.identifier.name)
parent = parent.parent
config.maxProtoChainLength = max(config.maxProtoChainLength,
len(self.prototypeChain))
def binaryNameFor(self, name):
return self._binaryNames.get(name, name)
def internalNameFor(self, name):
return self._internalNames.get(name, name)
def getExtendedAttributes(self, member, getter=False, setter=False):
def maybeAppendInfallibleToAttrs(attrs, throws):
if throws is None:
attrs.append("infallible")
elif throws is True:
pass
else:
raise TypeError("Unknown value for 'Throws'")
name = member.identifier.name
if member.isMethod():
attrs = self.extendedAttributes['all'].get(name, [])
throws = member.getExtendedAttribute("Throws")
maybeAppendInfallibleToAttrs(attrs, throws)
return attrs
assert member.isAttr()
assert bool(getter) != bool(setter)
key = 'getterOnly' if getter else 'setterOnly'
attrs = self.extendedAttributes['all'].get(name, []) + self.extendedAttributes[key].get(name, [])
throws = member.getExtendedAttribute("Throws")
if throws is None:
throwsAttr = "GetterThrows" if getter else "SetterThrows"
throws = member.getExtendedAttribute(throwsAttr)
maybeAppendInfallibleToAttrs(attrs, throws)
return attrs
def isGlobal(self):
"""
Returns true if this is the primary interface for a global object
of some sort.
"""
return (self.interface.getExtendedAttribute("Global") or
self.interface.getExtendedAttribute("PrimaryGlobal"))
# Some utility methods
def getTypesFromDescriptor(descriptor):
"""
Get all argument and return types for all members of the descriptor
"""
members = [m for m in descriptor.interface.members]
if descriptor.interface.ctor():
members.append(descriptor.interface.ctor())
members.extend(descriptor.interface.namedConstructors)
signatures = [s for m in members if m.isMethod() for s in m.signatures()]
types = []
for s in signatures:
assert len(s) == 2
(returnType, arguments) = s
types.append(returnType)
types.extend(a.type for a in arguments)
types.extend(a.type for a in members if a.isAttr())
return types
def getTypesFromDictionary(dictionary):
"""
Get all member types for this dictionary
"""
types = []
curDict = dictionary
while curDict:
types.extend([m.type for m in curDict.members])
curDict = curDict.parent
return types
def getTypesFromCallback(callback):
"""
Get the types this callback depends on: its return type and the
types of its arguments.
"""
sig = callback.signatures()[0]
types = [sig[0]] # Return type
types.extend(arg.type for arg in sig[1]) # Arguments
return types
| mpl-2.0 | 3,562,373,444,955,077,600 | 36.895161 | 105 | 0.578988 | false |
analyseuc3m/ANALYSE-v1 | openedx/core/djangoapps/credit/tests/test_partition.py | 76 | 7030 | # -*- coding: utf-8 -*-
"""
Tests for In-Course Reverification Access Control Partition scheme
"""
import ddt
import unittest
from django.conf import settings
from lms.djangoapps.verify_student.models import (
VerificationCheckpoint,
VerificationStatus,
SkippedReverification,
)
from openedx.core.djangoapps.credit.partition_schemes import VerificationPartitionScheme
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.partitions.partitions import UserPartition, Group
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class ReverificationPartitionTest(ModuleStoreTestCase):
"""Tests for the Reverification Partition Scheme. """
SUBMITTED = "submitted"
APPROVED = "approved"
DENIED = "denied"
def setUp(self):
super(ReverificationPartitionTest, self).setUp()
# creating course, checkpoint location and user partition mock object.
self.course = CourseFactory.create()
self.checkpoint_location = u'i4x://{org}/{course}/edx-reverification-block/first_uuid'.format(
org=self.course.id.org, course=self.course.id.course
)
scheme = UserPartition.get_scheme("verification")
self.user_partition = UserPartition(
id=0,
name=u"Verification Checkpoint",
description=u"Verification Checkpoint",
scheme=scheme,
parameters={"location": self.checkpoint_location},
groups=[
Group(scheme.ALLOW, "Allow access to content"),
Group(scheme.DENY, "Deny access to content"),
]
)
self.first_checkpoint = VerificationCheckpoint.objects.create(
course_id=self.course.id,
checkpoint_location=self.checkpoint_location
)
def create_user_and_enroll(self, enrollment_type):
"""Create and enroll users with provided enrollment type."""
user = UserFactory.create()
CourseEnrollment.objects.create(
user=user,
course_id=self.course.id,
mode=enrollment_type,
is_active=True
)
return user
def add_verification_status(self, user, status):
"""Adding the verification status for a user."""
VerificationStatus.add_status_from_checkpoints(
checkpoints=[self.first_checkpoint],
user=user,
status=status
)
@ddt.data(
("verified", SUBMITTED, VerificationPartitionScheme.ALLOW),
("verified", APPROVED, VerificationPartitionScheme.ALLOW),
("verified", DENIED, VerificationPartitionScheme.ALLOW),
("verified", None, VerificationPartitionScheme.DENY),
("honor", None, VerificationPartitionScheme.ALLOW),
)
@ddt.unpack
def test_get_group_for_user(self, enrollment_type, verification_status, expected_group):
# creating user and enroll them.
user = self.create_user_and_enroll(enrollment_type)
if verification_status:
self.add_verification_status(user, verification_status)
self._assert_group_assignment(user, expected_group)
def test_get_group_for_user_with_skipped(self):
# Check that a user is in verified allow group if that user has skipped
# any ICRV block.
user = self.create_user_and_enroll('verified')
SkippedReverification.add_skipped_reverification_attempt(
checkpoint=self.first_checkpoint,
user_id=user.id,
course_id=self.course.id
)
self._assert_group_assignment(user, VerificationPartitionScheme.ALLOW)
def test_cache_with_skipped_icrv(self):
# Check that a user is in verified allow group if that user has skipped
# any ICRV block.
user = self.create_user_and_enroll('verified')
SkippedReverification.add_skipped_reverification_attempt(
checkpoint=self.first_checkpoint,
user_id=user.id,
course_id=self.course.id
)
# this will warm the cache.
with self.assertNumQueries(3):
self._assert_group_assignment(user, VerificationPartitionScheme.ALLOW)
# no db queries this time.
with self.assertNumQueries(0):
self._assert_group_assignment(user, VerificationPartitionScheme.ALLOW)
def test_cache_with_submitted_status(self):
# Check that a user is in verified allow group if that user has approved status at
# any ICRV block.
user = self.create_user_and_enroll('verified')
self.add_verification_status(user, VerificationStatus.APPROVED_STATUS)
# this will warm the cache.
with self.assertNumQueries(4):
self._assert_group_assignment(user, VerificationPartitionScheme.ALLOW)
# no db queries this time.
with self.assertNumQueries(0):
self._assert_group_assignment(user, VerificationPartitionScheme.ALLOW)
def test_cache_with_denied_status(self):
# Check that a user is in verified allow group if that user has denied at
# any ICRV block.
user = self.create_user_and_enroll('verified')
self.add_verification_status(user, VerificationStatus.DENIED_STATUS)
# this will warm the cache.
with self.assertNumQueries(4):
self._assert_group_assignment(user, VerificationPartitionScheme.ALLOW)
# no db queries this time.
with self.assertNumQueries(0):
self._assert_group_assignment(user, VerificationPartitionScheme.ALLOW)
def test_cache_with_honor(self):
# Check that a user is in honor mode.
# any ICRV block.
user = self.create_user_and_enroll('honor')
# this will warm the cache.
with self.assertNumQueries(3):
self._assert_group_assignment(user, VerificationPartitionScheme.ALLOW)
# no db queries this time.
with self.assertNumQueries(0):
self._assert_group_assignment(user, VerificationPartitionScheme.ALLOW)
def test_cache_with_verified_deny_group(self):
# Check that a user is in verified mode. But not perform any action
user = self.create_user_and_enroll('verified')
# this will warm the cache.
with self.assertNumQueries(3):
self._assert_group_assignment(user, VerificationPartitionScheme.DENY)
# no db queries this time.
with self.assertNumQueries(0):
self._assert_group_assignment(user, VerificationPartitionScheme.DENY)
def _assert_group_assignment(self, user, expected_group_id):
"""Check that the user was assigned to a group. """
actual_group = VerificationPartitionScheme.get_group_for_user(self.course.id, user, self.user_partition)
self.assertEqual(actual_group.id, expected_group_id)
| agpl-3.0 | 4,481,977,371,273,984,500 | 37.839779 | 112 | 0.668137 | false |
tarballs-are-good/sympy | sympy/integrals/tests/test_rationaltools.py | 3 | 2614 | from sympy import symbols, S, I, atan, log, Poly
from sympy.integrals.rationaltools import ratint, \
ratint_ratpart, ratint_logpart, log_to_atan, log_to_real
from sympy.abc import a, x, t
half = S(1)/2
def test_ratint():
assert ratint(S(0), x) == 0
assert ratint(S(7), x) == 7*x
assert ratint(x, x) == x**2/2
assert ratint(2*x, x) == x**2
assert ratint(8*x**7+2*x+1, x) == x**8+x**2+x
f = S(1)
g = x + 1
assert ratint(f / g, x) == log(x + 1)
assert ratint((f,g), x) == log(x + 1)
f = x**3 - x
g = x - 1
assert ratint(f/g, x) == x**3/3 + x**2/2
f = x
g = (x - a)*(x + a)
assert ratint(f/g, x) == log(x**2 - a**2)/2
f = S(1)
g = x**2 + 1
assert ratint(f/g, x, real=None) == atan(x)
assert ratint(f/g, x, real=True) == atan(x)
assert ratint(f/g, x, real=False) == I*log(x + I)/2 - I*log(x - I)/2
f = S(36)
g = x**5-2*x**4-2*x**3+4*x**2+x-2
assert ratint(f/g, x) == \
-4*log(1 + x) + 4*log(-2 + x) - (6 + 12*x)/(1 - x**2)
f = x**4-3*x**2+6
g = x**6-5*x**4+5*x**2+4
assert ratint(f/g, x) == \
atan(x) + atan(x**3) + atan(x/2 - 3*x**S(3)/2 + S(1)/2*x**5)
f = x**7-24*x**4-4*x**2+8*x-8
g = x**8+6*x**6+12*x**4+8*x**2
assert ratint(f/g, x) == \
(4 + 6*x + 8*x**2 + 3*x**3)/(4*x + 4*x**3 + x**5) + log(x)
assert ratint((x**3*f)/(x*g), x) == \
-(12 - 16*x + 6*x**2 - 14*x**3)/(4 + 4*x**2 + x**4) - \
5*2**(S(1)/2)*atan(x*2**(S(1)/2)/2) + S(1)/2*x**2 - 3*log(2 + x**2)
f = x**5-x**4+4*x**3+x**2-x+5
g = x**4-2*x**3+5*x**2-4*x+4
assert ratint(f/g, x) == \
x + S(1)/2*x**2 + S(1)/2*log(2-x+x**2) + (9-4*x)/(14-7*x+7*x**2) + \
13*7**(S(1)/2)*atan(-S(1)/7*7**(S(1)/2) + 2*x*7**(S(1)/2)/7)/49
assert ratint(1/(x**2+x+1), x) == \
2*3**(S(1)/2)*atan(3**(S(1)/2)/3 + 2*x*3**(S(1)/2)/3)/3
assert ratint(1/(x**3+1), x) == \
-log(1 - x + x**2)/6 + log(1 + x)/3 + 3**(S(1)/2)*atan(-3**(S(1)/2)/3 + 2*x*3**(S(1)/2)/3)/3
assert ratint(1/(x**2+x+1), x, real=False) == \
-I*3**half*log(half + x - half*I*3**half)/3 + \
I*3**half*log(half + x + half*I*3**half)/3
assert ratint(1/(x**3+1), x, real=False) == log(1 + x)/3 - \
(S(1)/6 - I*3**half/6)*log(-half + x + I*3**half/2) - \
(S(1)/6 + I*3**half/6)*log(-half + x - I*3**half/2)
def test_ratint_logpart():
assert ratint_logpart(x, x**2-9, x, t) == \
[(Poly(x**2 - 9, x), Poly(-2*t + 1, t))]
assert ratint_logpart(x**2, x**3-5, x, t) == \
[(Poly(x**3 - 5, x), Poly(-3*t + 1, t))]
| bsd-3-clause | -5,423,839,229,775,236,000 | 27.725275 | 100 | 0.435731 | false |
leppa/home-assistant | tests/components/vacuum/common.py | 24 | 6031 | """Collection of helper methods.
All containing methods are legacy helpers that should not be used by new
components. Instead call the service directly.
"""
from homeassistant.components.vacuum import (
ATTR_FAN_SPEED,
ATTR_PARAMS,
DOMAIN,
SERVICE_CLEAN_SPOT,
SERVICE_LOCATE,
SERVICE_PAUSE,
SERVICE_RETURN_TO_BASE,
SERVICE_SEND_COMMAND,
SERVICE_SET_FAN_SPEED,
SERVICE_START,
SERVICE_START_PAUSE,
SERVICE_STOP,
)
from homeassistant.const import (
ATTR_COMMAND,
ATTR_ENTITY_ID,
ENTITY_MATCH_ALL,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.loader import bind_hass
@bind_hass
def turn_on(hass, entity_id=ENTITY_MATCH_ALL):
"""Turn all or specified vacuum on."""
hass.add_job(async_turn_on, hass, entity_id)
async def async_turn_on(hass, entity_id=ENTITY_MATCH_ALL):
"""Turn all or specified vacuum on."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_TURN_ON, data, blocking=True)
@bind_hass
def turn_off(hass, entity_id=ENTITY_MATCH_ALL):
"""Turn all or specified vacuum off."""
hass.add_job(async_turn_off, hass, entity_id)
async def async_turn_off(hass, entity_id=ENTITY_MATCH_ALL):
"""Turn all or specified vacuum off."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_TURN_OFF, data, blocking=True)
@bind_hass
def toggle(hass, entity_id=ENTITY_MATCH_ALL):
"""Toggle all or specified vacuum."""
hass.add_job(async_toggle, hass, entity_id)
async def async_toggle(hass, entity_id=ENTITY_MATCH_ALL):
"""Toggle all or specified vacuum."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_TOGGLE, data, blocking=True)
@bind_hass
def locate(hass, entity_id=ENTITY_MATCH_ALL):
"""Locate all or specified vacuum."""
hass.add_job(async_locate, hass, entity_id)
async def async_locate(hass, entity_id=ENTITY_MATCH_ALL):
"""Locate all or specified vacuum."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_LOCATE, data, blocking=True)
@bind_hass
def clean_spot(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to perform a spot clean-up."""
hass.add_job(async_clean_spot, hass, entity_id)
async def async_clean_spot(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to perform a spot clean-up."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_CLEAN_SPOT, data, blocking=True)
@bind_hass
def return_to_base(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to return to base."""
hass.add_job(async_return_to_base, hass, entity_id)
async def async_return_to_base(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to return to base."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_RETURN_TO_BASE, data, blocking=True)
@bind_hass
def start_pause(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to start or pause the current task."""
hass.add_job(async_start_pause, hass, entity_id)
async def async_start_pause(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to start or pause the current task."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_START_PAUSE, data, blocking=True)
@bind_hass
def start(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to start or resume the current task."""
hass.add_job(async_start, hass, entity_id)
async def async_start(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to start or resume the current task."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_START, data, blocking=True)
@bind_hass
def pause(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or the specified vacuum to pause the current task."""
hass.add_job(async_pause, hass, entity_id)
async def async_pause(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or the specified vacuum to pause the current task."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_PAUSE, data, blocking=True)
@bind_hass
def stop(hass, entity_id=ENTITY_MATCH_ALL):
"""Stop all or specified vacuum."""
hass.add_job(async_stop, hass, entity_id)
async def async_stop(hass, entity_id=ENTITY_MATCH_ALL):
"""Stop all or specified vacuum."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_STOP, data, blocking=True)
@bind_hass
def set_fan_speed(hass, fan_speed, entity_id=ENTITY_MATCH_ALL):
"""Set fan speed for all or specified vacuum."""
hass.add_job(async_set_fan_speed, hass, fan_speed, entity_id)
async def async_set_fan_speed(hass, fan_speed, entity_id=ENTITY_MATCH_ALL):
"""Set fan speed for all or specified vacuum."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
data[ATTR_FAN_SPEED] = fan_speed
await hass.services.async_call(DOMAIN, SERVICE_SET_FAN_SPEED, data, blocking=True)
@bind_hass
def send_command(hass, command, params=None, entity_id=ENTITY_MATCH_ALL):
"""Send command to all or specified vacuum."""
hass.add_job(async_send_command, hass, command, params, entity_id)
async def async_send_command(hass, command, params=None, entity_id=ENTITY_MATCH_ALL):
"""Send command to all or specified vacuum."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
data[ATTR_COMMAND] = command
if params is not None:
data[ATTR_PARAMS] = params
await hass.services.async_call(DOMAIN, SERVICE_SEND_COMMAND, data, blocking=True)
| apache-2.0 | 3,158,031,097,565,718,500 | 33.267045 | 87 | 0.704195 | false |
aksh1/wagtail-cookiecutter-foundation | {{cookiecutter.repo_name}}/pages/migrations/0001_initial.py | 5 | 12936 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
import modelcluster.fields
import wagtail.wagtailcore.fields
import wagtail.wagtailcore.blocks
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0006_add_verbose_names'),
('wagtaildocs', '0003_add_verbose_names'),
('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),
]
operations = [
migrations.CreateModel(
name='ContentBlock',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(max_length=255)),
('body', wagtail.wagtailcore.fields.RichTextField()),
('summary', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('slug', models.SlugField()),
('link_document', models.ForeignKey(related_name='+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='FaqsPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.wagtailcore.fields.StreamField([(b'faq_question', wagtail.wagtailcore.blocks.CharBlock(classname=b'full title')), (b'faq_answer', wagtail.wagtailcore.blocks.RichTextBlock())])),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='HomePage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('title_text', wagtail.wagtailcore.fields.RichTextField(null=True, blank=True)),
('body', wagtail.wagtailcore.fields.RichTextField(null=True, blank=True)),
],
options={
'verbose_name': 'Homepage',
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='HomePageCarouselItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('embed_url', models.URLField(verbose_name=b'Embed URL', blank=True)),
('caption', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('image', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
('link_document', models.ForeignKey(related_name='+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='HomePageContentItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(max_length=100)),
('content', wagtail.wagtailcore.fields.RichTextField(null=True, blank=True)),
('summary', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('slug', models.SlugField()),
('image', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
('link_document', models.ForeignKey(related_name='+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='HomePageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name='+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='StandardIndexPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('subtitle', models.CharField(max_length=255, blank=True)),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('feed_image', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', help_text=b'An optional image to represent the page', null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='StandardIndexPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name='+', blank=True, to='wagtaildocs.Document', null=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='StandardPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('subtitle', models.CharField(max_length=255, blank=True)),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('feed_image', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='StandardPageCarouselItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('embed_url', models.URLField(verbose_name=b'Embed URL', blank=True)),
('caption', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('image', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
('link_document', models.ForeignKey(related_name='+', blank=True, to='wagtaildocs.Document', null=True)),
('link_page', models.ForeignKey(related_name='+', blank=True, to='wagtailcore.Page', null=True)),
('page', modelcluster.fields.ParentalKey(related_name='carousel_items', to='pages.StandardPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='StandardPageRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
('link_document', models.ForeignKey(related_name='+', blank=True, to='wagtaildocs.Document', null=True)),
('link_page', models.ForeignKey(related_name='+', blank=True, to='wagtailcore.Page', null=True)),
('page', modelcluster.fields.ParentalKey(related_name='related_links', to='pages.StandardPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='Testimonial',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('name', models.CharField(max_length=150)),
('text', models.CharField(max_length=255)),
('link_document', models.ForeignKey(related_name='+', blank=True, to='wagtaildocs.Document', null=True)),
('link_page', models.ForeignKey(related_name='+', blank=True, to='wagtailcore.Page', null=True)),
('page', models.ForeignKey(related_name='testimonials', blank=True, to='wagtailcore.Page', null=True)),
('photo', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='standardindexpagerelatedlink',
name='link_page',
field=models.ForeignKey(related_name='+', blank=True, to='wagtailcore.Page', null=True),
),
migrations.AddField(
model_name='standardindexpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(related_name='related_links', to='pages.StandardIndexPage'),
),
migrations.AddField(
model_name='homepagerelatedlink',
name='link_page',
field=models.ForeignKey(related_name='+', blank=True, to='wagtailcore.Page', null=True),
),
migrations.AddField(
model_name='homepagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(related_name='related_links', to='pages.HomePage'),
),
migrations.AddField(
model_name='homepagecontentitem',
name='link_page',
field=models.ForeignKey(related_name='+', blank=True, to='wagtailcore.Page', null=True),
),
migrations.AddField(
model_name='homepagecontentitem',
name='page',
field=modelcluster.fields.ParentalKey(related_name='content_items', to='pages.HomePage'),
),
migrations.AddField(
model_name='homepagecarouselitem',
name='link_page',
field=models.ForeignKey(related_name='+', blank=True, to='wagtailcore.Page', null=True),
),
migrations.AddField(
model_name='homepagecarouselitem',
name='page',
field=modelcluster.fields.ParentalKey(related_name='carousel_items', to='pages.HomePage'),
),
migrations.AddField(
model_name='contentblock',
name='link_page',
field=models.ForeignKey(related_name='+', blank=True, to='wagtailcore.Page', null=True),
),
migrations.AddField(
model_name='contentblock',
name='page',
field=models.ForeignKey(related_name='contentblocks', blank=True, to='wagtailcore.Page', null=True),
),
]
| mit | -7,302,656,029,798,066,000 | 51.37247 | 217 | 0.571892 | false |
willdonetang/taisi360 | tools/parse/parse.py | 1 | 2303 | #!/bin/env
# -*- coding: utf-8 -*-
from langconv import *
from bs4 import BeautifulSoup
import re
import os
import json
#输入Unicode编辑,输出Unicode
def big2simple(line):
#转换繁体到简体
line = Converter('zh-hans').convert(line)
return line
def save_img_url(img_url,num):
img_url_file = open("img_url.txt","a")
line = num+"\t"+img_url+"\n"
img_url_file.write(line.encode("utf-8"))
img_url_file.close()
#path文件路径,num是代表源文件编号
def parse_html(path,num):
in_file = open(path,"r")
if not in_file:
print path,"open error"
return None
soup = BeautifulSoup(open(path,"r"))
result = {}
result['id'] = num
#keyword
keyword = None
node = soup.find("meta",attrs={"name":"keywords"})
if node:
result['keyword'] = big2simple(node['content'])
#title
title = None
node = soup.find("h1",attrs={"class":"entry-title"})
if node:
result['title'] = big2simple(node.get_text())
#category
category = None
node = soup.find("span",attrs={"class":"entry-cat"})
if node:
node = node.find('a')
if node:
result['category'] = big2simple(node.get_text())
#正文
content = None
node = soup.find("div",class_="entry-content")
if node:
for i in node.find_all("script"):
i.decompose()
for i in node.find_all("iframe"):
i.decompose()
tmp = node.find("a",class_="twitter-share-button")
if tmp:
tmp.decompose()
content = big2simple(unicode(node))
result['content'] = content
#正文图片
img_list = node.find_all("img")
result['img_list'] = []
for img in img_list:
img_url = img['src']
result['img_list'].append(img_url)
save_img_url(img_url,num)
return result
def save_result(file_path,ret):
file_out = open(file_path,"a")
ret = json.dumps(ret)
file_out.write(ret)
file_out.write("\n")
file_out.close()
def load_files_list(path):
file_list = []
for i in open(path):
line = i.strip()
if not line:
continue
file_list.append(line)
print "load_files_list ok, count=%d" % len(file_list)
return file_list
if __name__=="__main__":
dir_path = r'C:\Users\Einstein\Desktop\www.fuzokuu.com'
file_num_list = load_files_list("files.txt")
for file_num in file_num_list:
path = os.path.join(dir_path,file_num)
print path
ret = parse_html(path,file_num)
save_result("ret.txt",ret)
| apache-2.0 | -1,982,061,156,409,271,300 | 20.747573 | 56 | 0.65297 | false |
pabloborrego93/edx-platform | lms/djangoapps/staticbook/views.py | 11 | 6395 | """
Views for serving static textbooks.
"""
from django.contrib.auth.decorators import login_required
from django.http import Http404
from edxmako.shortcuts import render_to_response
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.annotator_token import retrieve_token
from courseware.access import has_access
from courseware.courses import get_course_with_access
from notes.utils import notes_enabled_for_course
from static_replace import replace_static_urls
@login_required
def index(request, course_id, book_index, page=None):
"""
Serve static image-based textbooks.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
book_index = int(book_index)
if book_index < 0 or book_index >= len(course.textbooks):
raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.textbooks[book_index]
table_of_contents = textbook.table_of_contents
if page is None:
page = textbook.start_page
return render_to_response(
'staticbook.html',
{
'book_index': book_index, 'page': int(page),
'course': course,
'book_url': textbook.book_url,
'table_of_contents': table_of_contents,
'start_page': textbook.start_page,
'end_page': textbook.end_page,
'staff_access': staff_access,
},
)
def remap_static_url(original_url, course):
"""Remap a URL in the ways the course requires."""
# Ick: this should be possible without having to quote and unquote the URL...
input_url = "'" + original_url + "'"
output_url = replace_static_urls(
input_url,
getattr(course, 'data_dir', None),
course_id=course.id,
static_asset_path=course.static_asset_path
)
# strip off the quotes again...
return output_url[1:-1]
@login_required
def pdf_index(request, course_id, book_index, chapter=None, page=None):
"""
Display a PDF textbook.
course_id: course for which to display text. The course should have
"pdf_textbooks" property defined.
book index: zero-based index of which PDF textbook to display.
chapter: (optional) one-based index into the chapter array of textbook PDFs to display.
Defaults to first chapter. Specifying this assumes that there are separate PDFs for
each chapter in a textbook.
page: (optional) one-based page number to display within the PDF. Defaults to first page.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
book_index = int(book_index)
if book_index < 0 or book_index >= len(course.pdf_textbooks):
raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.pdf_textbooks[book_index]
viewer_params = '&file='
current_url = ''
if 'url' in textbook:
textbook['url'] = remap_static_url(textbook['url'], course)
viewer_params += textbook['url']
current_url = textbook['url']
# then remap all the chapter URLs as well, if they are provided.
current_chapter = None
if 'chapters' in textbook:
for entry in textbook['chapters']:
entry['url'] = remap_static_url(entry['url'], course)
if chapter is not None and int(chapter) <= (len(textbook['chapters'])):
current_chapter = textbook['chapters'][int(chapter) - 1]
else:
current_chapter = textbook['chapters'][0]
viewer_params += current_chapter['url']
current_url = current_chapter['url']
viewer_params += '#zoom=page-fit&disableRange=true'
if page is not None:
viewer_params += '&page={}'.format(page)
if request.GET.get('viewer', '') == 'true':
template = 'pdf_viewer.html'
else:
template = 'static_pdfbook.html'
return render_to_response(
template,
{
'book_index': book_index,
'course': course,
'textbook': textbook,
'chapter': chapter,
'page': page,
'viewer_params': viewer_params,
'current_chapter': current_chapter,
'staff_access': staff_access,
'current_url': current_url,
},
)
@login_required
def html_index(request, course_id, book_index, chapter=None):
"""
Display an HTML textbook.
course_id: course for which to display text. The course should have
"html_textbooks" property defined.
book index: zero-based index of which HTML textbook to display.
chapter: (optional) one-based index into the chapter array of textbook HTML files to display.
Defaults to first chapter. Specifying this assumes that there are separate HTML files for
each chapter in a textbook.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
notes_enabled = notes_enabled_for_course(course)
book_index = int(book_index)
if book_index < 0 or book_index >= len(course.html_textbooks):
raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.html_textbooks[book_index]
if 'url' in textbook:
textbook['url'] = remap_static_url(textbook['url'], course)
# then remap all the chapter URLs as well, if they are provided.
if 'chapters' in textbook:
for entry in textbook['chapters']:
entry['url'] = remap_static_url(entry['url'], course)
student = request.user
return render_to_response(
'static_htmlbook.html',
{
'book_index': book_index,
'course': course,
'textbook': textbook,
'chapter': chapter,
'student': student,
'staff_access': staff_access,
'notes_enabled': notes_enabled,
'storage': course.annotation_storage_url,
'token': retrieve_token(student.email, course.annotation_token_secret),
},
)
| agpl-3.0 | -4,165,255,807,668,865,000 | 34.726257 | 98 | 0.641439 | false |
econchick/heroku-buildpack-python | vendor/pip-1.2.1/contrib/packager/template.py | 12 | 1308 | #! /usr/bin/env python
sources = """
@SOURCES@"""
import os
import sys
import base64
import zlib
import tempfile
import shutil
def unpack(sources):
temp_dir = tempfile.mkdtemp('-scratchdir', 'unpacker-')
for package, content in sources.items():
filepath = package.split(".")
dirpath = os.sep.join(filepath[:-1])
packagedir = os.path.join(temp_dir, dirpath)
if not os.path.isdir(packagedir):
os.makedirs(packagedir)
mod = open(os.path.join(packagedir, "%s.py" % filepath[-1]), 'wb')
try:
mod.write(content.encode("ascii"))
finally:
mod.close()
return temp_dir
if __name__ == "__main__":
if sys.version_info >= (3, 0):
exec("def do_exec(co, loc): exec(co, loc)\n")
import pickle
sources = sources.encode("ascii") # ensure bytes
sources = pickle.loads(zlib.decompress(base64.decodebytes(sources)))
else:
import cPickle as pickle
exec("def do_exec(co, loc): exec co in loc\n")
sources = pickle.loads(zlib.decompress(base64.decodestring(sources)))
try:
temp_dir = unpack(sources)
sys.path.insert(0, temp_dir)
entry = """@ENTRY@"""
do_exec(entry, locals())
finally:
shutil.rmtree(temp_dir)
| mit | -7,528,958,298,495,622,000 | 26.25 | 77 | 0.590979 | false |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Mac/Modules/cf/cfscan.py | 34 | 5533 | # Scan an Apple header file, generating a Python file of generator calls.
import sys
from bgenlocations import TOOLBOXDIR, BGENDIR
sys.path.append(BGENDIR)
from scantools import Scanner_OSX
LONG = "CoreFoundation"
SHORT = "cf"
OBJECTS = ("CFTypeRef",
"CFArrayRef", "CFMutableArrayRef",
"CFDataRef", "CFMutableDataRef",
"CFDictionaryRef", "CFMutableDictionaryRef",
"CFStringRef", "CFMutableStringRef",
"CFURLRef",
## "CFPropertyListRef",
)
# ADD object typenames here
def main():
input = [
"CFBase.h",
"CFArray.h",
## "CFBag.h",
## "CFBundle.h",
## "CFCharacterSet.h",
"CFData.h",
## "CFDate.h",
"CFDictionary.h",
## "CFNumber.h",
## "CFPlugIn.h",
"CFPreferences.h",
"CFPropertyList.h",
## "CFSet.h",
"CFString.h",
## "CFStringEncodingExt.h",
## "CFTimeZone.h",
"CFURL.h",
]
output = SHORT + "gen.py"
defsoutput = TOOLBOXDIR + LONG + ".py"
scanner = MyScanner(input, output, defsoutput)
scanner.scan()
scanner.gentypetest(SHORT+"typetest.py")
scanner.close()
print "=== Testing definitions output code ==="
execfile(defsoutput, {}, {})
print "=== Done scanning and generating, now importing the generated code... ==="
exec "import " + SHORT + "support"
print "=== Done. It's up to you to compile it now! ==="
class MyScanner(Scanner_OSX):
def destination(self, type, name, arglist):
classname = "Function"
listname = "functions"
if arglist and name[:13] != 'CFPreferences':
t, n, m = arglist[0]
if t in OBJECTS and m == "InMode":
classname = "Method"
listname = t + "_methods"
# Special case for the silly first AllocatorRef argument
if t == 'CFAllocatorRef' and m == 'InMode' and len(arglist) > 1:
t, n, m = arglist[1]
if t in OBJECTS and m == "InMode":
classname = "MethodSkipArg1"
listname = t + "_methods"
return classname, listname
def writeinitialdefs(self):
self.defsfile.write("def FOUR_CHAR_CODE(x): return x\n")
def makeblacklistnames(self):
return [
# Memory allocator functions
"CFAllocatorGetDefault",
"CFAllocatorSetDefault",
"CFAllocatorAllocate",
"CFAllocatorReallocate",
"CFAllocatorDeallocate",
"CFGetAllocator",
# Array functions we skip for now.
"CFArrayGetValueAtIndex",
# Data pointer functions. Skip for now.
"CFDataGetBytePtr",
"CFDataGetMutableBytePtr",
"CFDataGetBytes", # XXXX Should support this one
# String functions
"CFStringGetPascalString", # Use the C-string methods.
"CFStringGetPascalStringPtr", # TBD automatically
"CFStringGetCStringPtr",
"CFStringGetCharactersPtr",
"CFStringGetCString",
"CFStringGetCharacters",
"CFURLCreateStringWithFileSystemPath", # Gone in later releases
"CFStringCreateMutableWithExternalCharactersNoCopy", # Not a clue...
"CFStringSetExternalCharactersNoCopy",
"CFStringGetCharacterAtIndex", # No format for single unichars yet.
"kCFStringEncodingInvalidId", # incompatible constant declaration
"CFPropertyListCreateFromXMLData", # Manually generated
]
def makegreylist(self):
return []
def makeblacklisttypes(self):
return [
"CFComparatorFunction", # Callback function pointer
"CFAllocatorContext", # Not interested in providing our own allocator
"void_ptr_ptr", # Tricky. This is the initializer for arrays...
"void_ptr", # Ditto for various array lookup methods
"CFArrayApplierFunction", # Callback function pointer
"CFDictionaryApplierFunction", # Callback function pointer
"va_list", # For printf-to-a-cfstring. Use Python.
"const_CFStringEncoding_ptr", # To be done, I guess
]
def makerepairinstructions(self):
return [
# Buffers in CF seem to be passed as UInt8 * normally.
([("UInt8_ptr", "*", "InMode"), ("CFIndex", "*", "InMode")],
[("UcharInBuffer", "*", "*")]),
([("UniChar_ptr", "*", "InMode"), ("CFIndex", "*", "InMode")],
[("UnicodeInBuffer", "*", "*")]),
# Some functions return a const char *. Don't worry, we won't modify it.
([("const_char_ptr", "*", "ReturnMode")],
[("return_stringptr", "*", "*")]),
# base URLs are optional (pass None for NULL)
([("CFURLRef", "baseURL", "InMode")],
[("OptionalCFURLRef", "*", "*")]),
# We handle CFPropertyListRef objects as plain CFTypeRef
([("CFPropertyListRef", "*", "*")],
[("CFTypeRef", "*", "*")]),
]
if __name__ == "__main__":
main()
| mit | 5,106,745,876,747,666,000 | 38.241135 | 88 | 0.525935 | false |
jmesteve/medical | openerp/addons/point_of_sale/wizard/pos_session_opening.py | 46 | 5175 |
from openerp import netsvc
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.addons.point_of_sale.point_of_sale import pos_session
class pos_session_opening(osv.osv_memory):
_name = 'pos.session.opening'
_columns = {
'pos_config_id' : fields.many2one('pos.config', 'Point of Sale', required=True),
'pos_session_id' : fields.many2one('pos.session', 'PoS Session'),
'pos_state' : fields.related('pos_session_id', 'state',
type='selection',
selection=pos_session.POS_SESSION_STATE,
string='Session Status', readonly=True),
'pos_state_str' : fields.char('Status', 32, readonly=True),
'show_config' : fields.boolean('Show Config', readonly=True),
'pos_session_name' : fields.related('pos_session_id', 'name',
type='char', size=64, readonly=True),
'pos_session_username' : fields.related('pos_session_id', 'user_id', 'name',
type='char', size=64, readonly=True)
}
def open_ui(self, cr, uid, ids, context=None):
context = context or {}
data = self.browse(cr, uid, ids[0], context=context)
context['active_id'] = data.pos_session_id.id
return {
'type' : 'ir.actions.client',
'name' : _('Start Point Of Sale'),
'tag' : 'pos.ui',
'context' : context
}
def open_existing_session_cb_close(self, cr, uid, ids, context=None):
wf_service = netsvc.LocalService("workflow")
wizard = self.browse(cr, uid, ids[0], context=context)
wf_service.trg_validate(uid, 'pos.session', wizard.pos_session_id.id, 'cashbox_control', cr)
return self.open_session_cb(cr, uid, ids, context)
def open_session_cb(self, cr, uid, ids, context=None):
assert len(ids) == 1, "you can open only one session at a time"
proxy = self.pool.get('pos.session')
wizard = self.browse(cr, uid, ids[0], context=context)
if not wizard.pos_session_id:
values = {
'user_id' : uid,
'config_id' : wizard.pos_config_id.id,
}
session_id = proxy.create(cr, uid, values, context=context)
s = proxy.browse(cr, uid, session_id, context=context)
if s.state=='opened':
return self.open_ui(cr, uid, ids, context=context)
return self._open_session(session_id)
return self._open_session(wizard.pos_session_id.id)
def open_existing_session_cb(self, cr, uid, ids, context=None):
assert len(ids) == 1
wizard = self.browse(cr, uid, ids[0], context=context)
return self._open_session(wizard.pos_session_id.id)
def _open_session(self, session_id):
return {
'name': _('Session'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'pos.session',
'res_id': session_id,
'view_id': False,
'type': 'ir.actions.act_window',
}
def on_change_config(self, cr, uid, ids, config_id, context=None):
result = {
'pos_session_id': False,
'pos_state': False,
'pos_state_str' : '',
'pos_session_username' : False,
'pos_session_name' : False,
}
if not config_id:
return {'value' : result}
proxy = self.pool.get('pos.session')
session_ids = proxy.search(cr, uid, [
('state', '!=', 'closed'),
('config_id', '=', config_id),
('user_id', '=', uid),
], context=context)
if session_ids:
session = proxy.browse(cr, uid, session_ids[0], context=context)
result['pos_state'] = str(session.state)
result['pos_state_str'] = dict(pos_session.POS_SESSION_STATE).get(session.state, '')
result['pos_session_id'] = session.id
result['pos_session_name'] = session.name
result['pos_session_username'] = session.user_id.name
return {'value' : result}
def default_get(self, cr, uid, fieldnames, context=None):
so = self.pool.get('pos.session')
session_ids = so.search(cr, uid, [('state','<>','closed'), ('user_id','=',uid)], context=context)
if session_ids:
result = so.browse(cr, uid, session_ids[0], context=context).config_id.id
else:
current_user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
result = current_user.pos_config and current_user.pos_config.id or False
if not result:
r = self.pool.get('pos.config').search(cr, uid, [], context=context)
result = r and r[0] or False
count = self.pool.get('pos.config').search_count(cr, uid, [('state', '=', 'active')], context=context)
show_config = bool(count > 1)
return {
'pos_config_id' : result,
'show_config' : show_config,
}
pos_session_opening()
| agpl-3.0 | -3,633,765,165,747,156,500 | 42.125 | 110 | 0.547826 | false |
terbolous/CouchPotatoServer | libs/suds/transport/__init__.py | 209 | 3895 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Contains transport interface (classes).
"""
class TransportError(Exception):
def __init__(self, reason, httpcode, fp=None):
Exception.__init__(self, reason)
self.httpcode = httpcode
self.fp = fp
class Request:
"""
A transport request
@ivar url: The url for the request.
@type url: str
@ivar message: The message to be sent in a POST request.
@type message: str
@ivar headers: The http headers to be used for the request.
@type headers: dict
"""
def __init__(self, url, message=None):
"""
@param url: The url for the request.
@type url: str
@param message: The (optional) message to be send in the request.
@type message: str
"""
self.url = url
self.headers = {}
self.message = message
def __str__(self):
s = []
s.append('URL:%s' % self.url)
s.append('HEADERS: %s' % self.headers)
s.append('MESSAGE:')
s.append(self.message)
return '\n'.join(s)
class Reply:
"""
A transport reply
@ivar code: The http code returned.
@type code: int
@ivar message: The message to be sent in a POST request.
@type message: str
@ivar headers: The http headers to be used for the request.
@type headers: dict
"""
def __init__(self, code, headers, message):
"""
@param code: The http code returned.
@type code: int
@param headers: The http returned headers.
@type headers: dict
@param message: The (optional) reply message received.
@type message: str
"""
self.code = code
self.headers = headers
self.message = message
def __str__(self):
s = []
s.append('CODE: %s' % self.code)
s.append('HEADERS: %s' % self.headers)
s.append('MESSAGE:')
s.append(self.message)
return '\n'.join(s)
class Transport:
"""
The transport I{interface}.
"""
def __init__(self):
"""
Constructor.
"""
from suds.transport.options import Options
self.options = Options()
del Options
def open(self, request):
"""
Open the url in the specified request.
@param request: A transport request.
@type request: L{Request}
@return: An input stream.
@rtype: stream
@raise TransportError: On all transport errors.
"""
raise Exception('not-implemented')
def send(self, request):
"""
Send soap message. Implementations are expected to handle:
- proxies
- I{http} headers
- cookies
- sending message
- brokering exceptions into L{TransportError}
@param request: A transport request.
@type request: L{Request}
@return: The reply
@rtype: L{Reply}
@raise TransportError: On all transport errors.
"""
raise Exception('not-implemented')
| gpl-3.0 | 5,351,110,417,002,252,000 | 28.961538 | 76 | 0.597433 | false |
nicememory/pie | pyglet/pyglet/extlibs/future/py2_3/future/backports/urllib/response.py | 82 | 3180 | """Response classes used by urllib.
The base class, addbase, defines a minimal file-like interface,
including read() and readline(). The typical response object is an
addinfourl instance, which defines an info() method that returns
headers and a geturl() method that returns the url.
"""
from __future__ import absolute_import, division, unicode_literals
from future.builtins import object
class addbase(object):
"""Base class for addinfo and addclosehook."""
# XXX Add a method to expose the timeout on the underlying socket?
def __init__(self, fp):
# TODO(jhylton): Is there a better way to delegate using io?
self.fp = fp
self.read = self.fp.read
self.readline = self.fp.readline
# TODO(jhylton): Make sure an object with readlines() is also iterable
if hasattr(self.fp, "readlines"):
self.readlines = self.fp.readlines
if hasattr(self.fp, "fileno"):
self.fileno = self.fp.fileno
else:
self.fileno = lambda: None
def __iter__(self):
# Assigning `__iter__` to the instance doesn't work as intended
# because the iter builtin does something like `cls.__iter__(obj)`
# and thus fails to find the _bound_ method `obj.__iter__`.
# Returning just `self.fp` works for built-in file objects but
# might not work for general file-like objects.
return iter(self.fp)
def __repr__(self):
return '<%s at %r whose fp = %r>' % (self.__class__.__name__,
id(self), self.fp)
def close(self):
if self.fp:
self.fp.close()
self.fp = None
self.read = None
self.readline = None
self.readlines = None
self.fileno = None
self.__iter__ = None
self.__next__ = None
def __enter__(self):
if self.fp is None:
raise ValueError("I/O operation on closed file")
return self
def __exit__(self, type, value, traceback):
self.close()
class addclosehook(addbase):
"""Class to add a close hook to an open file."""
def __init__(self, fp, closehook, *hookargs):
addbase.__init__(self, fp)
self.closehook = closehook
self.hookargs = hookargs
def close(self):
if self.closehook:
self.closehook(*self.hookargs)
self.closehook = None
self.hookargs = None
addbase.close(self)
class addinfo(addbase):
"""class to add an info() method to an open file."""
def __init__(self, fp, headers):
addbase.__init__(self, fp)
self.headers = headers
def info(self):
return self.headers
class addinfourl(addbase):
"""class to add info() and geturl() methods to an open file."""
def __init__(self, fp, headers, url, code=None):
addbase.__init__(self, fp)
self.headers = headers
self.url = url
self.code = code
def info(self):
return self.headers
def getcode(self):
return self.code
def geturl(self):
return self.url
del absolute_import, division, unicode_literals, object
| apache-2.0 | 875,265,067,722,785,800 | 29.873786 | 78 | 0.59434 | false |
zaxliu/scipy | doc/source/tutorial/examples/normdiscr_plot2.py | 84 | 1642 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints / 2
npointsf = float(npoints)
nbound = 4 #bounds for the truncated normal
normbound = (1 + 1 / npointsf) * nbound #actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2,1) #integer grid
gridlimitsnorm = (grid - 0.5) / npointsh * nbound #bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
np.random.seed(87655678) #fix the seed for replicability
rvs = normdiscrete.rvs(size=n_sample)
rvsnd = rvs
f,l = np.histogram(rvs,bins=gridlimits)
sfreq = np.vstack([gridint,f,probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
fs = sfreq[:,1].cumsum() / float(n_sample)
ft = sfreq[:,2].cumsum() / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.figure()
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.cdf(ind+0.5,scale=nd_std),
color='b')
plt.ylabel('cdf')
plt.title('Cumulative Frequency and CDF of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
| bsd-3-clause | 4,621,654,733,094,236,000 | 33.208333 | 79 | 0.683922 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.