code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
def elt0(list, context):
"""return first member of reduction"""
return list[0]
def elt1(list, context):
"""return second member"""
return list[1]
def elt2(list, context):
return list[2]
def returnNone(list, context):
return None
def stat1(list, context):
"""return list of len 1 of statements"""
return list
#def statn(list, context):
# """return a list of statement reductions"""
# [stat, semi, statlist] = list
# statlist.insert(0, stat)
# return statlist
def thingcommalist(l, c):
[thing, comma, list] = l
list.insert(0, thing)
return list
def listcommathing(l, c):
[list, comma, thing] = l
list.append(thing)
return list
statn = thingcommalist
selstat = elt0
insstat = elt0
createtablestat = elt0
droptablestat = elt0
delstat = elt0
updatestat = elt0
createindexstat = elt0
dropindexstat = elt0
createviewstat = elt0
dropviewstat = elt0
# drop view statement stuff
def dropview(l, c):
[drop, view, name] = l
from sqlsem import DropView
return DropView(name)
# create view statement stuff
def createview(l, c):
[create, view, name, namelist, as_, selection] = l
from sqlsem import CreateView
return CreateView(name, namelist, selection)
optnamelist0 = returnNone
optnamelistn = elt1
# drop index statement stuff
def dropindex(l, c):
[drop, index, name] = l
from sqlsem import DropIndex
return DropIndex(name)
# create index statement stuff
def createindex(l, c):
[create, index, name, on, table, op, namelist, cp] = l
from sqlsem import CreateIndex
return CreateIndex(name, table, namelist)
def createuniqueindex(l, c):
[create, unique, index, name, on, table, op, namelist, cp] = l
from sqlsem import CreateIndex
return CreateIndex(name, table, namelist, unique=1)
names1 = stat1
namesn = listcommathing
# update statement stuff
def update(l, c):
[upd, name, set, assns, condition] = l
from sqlsem import UpdateOp
return UpdateOp(name, assns, condition)
def assn(l, c):
[col, eq, exp] = l
return (col, exp)
def assn1(l, c):
[ (col, exp) ] = l
from sqlsem import TupleCollector
result = TupleCollector()
result.addbinding(col, exp)
return result
def assnn(l, c):
[ result, comma, (col, exp) ] = l
result.addbinding(col, exp)
return result
# delete statement stuff
def deletefrom(l, c):
[delete, fromkw, name, where] = l
from sqlsem import DeleteOp
return DeleteOp(name, where)
# drop table stuff
def droptable(l, c):
[drop, table, name] = l
from sqlsem import DropTable
return DropTable(name)
# create table statement stuff
def createtable(list, context):
[create, table, name, p1, colelts, p2] = list
from sqlsem import CreateTable
return CreateTable(name, colelts)
colelts1 = stat1
coleltsn = listcommathing
#def coleltsn(list, c):
# [c1, cc, ce] = list
# c1.append(ce)
# return c1
coleltid = elt0
coleltconstraint = elt0
def coldef(l, c):
[colid, datatype, default, constraints] = l
from sqlsem import ColumnDef
return ColumnDef(colid, datatype, default, constraints)
optdef0 = returnNone
optcolconstr0 = returnNone
stringtype = exnumtype = appnumtype = integer = float = varchar = elt0
varcharn = elt0
# insert statement stuff
def insert1(l, c):
[insert, into, name, optcolids, insert_spec] = l
from sqlsem import InsertOp
return InsertOp(name, optcolids, insert_spec)
optcolids0 = returnNone
optcolids1 = elt1
colids1 = stat1
colidsn = listcommathing
def insert_values(l, c):
from sqlsem import InsertValues
return InsertValues(l[2])
def insert_query(l, c):
from sqlsem import InsertSubSelect
return InsertSubSelect(l[0])
litlist1 = stat1
litlistn = listcommathing
sliteral0 = elt0
def sliteralp(l, c):
[p, v] = l
return +v
def sliterald(l, c):
[l1, m, l2] = l
return l1 - l2
def sliterals(l, c):
[l1, p, l2] = l
return l1 + l2
def sliteralm(l, c):
[m, v] = l
return -v
# select statement stuff
def selectx(list, context):
[sub, optorder_by] = list
#sub.union_select = optunion
sub.order_by = optorder_by
# number of dynamic parameters in this parse.
sub.ndynamic = context.ndynamic()
return sub
psubselect = elt1
def subselect(list, context):
[select, alldistinct, selectlist, fromkw, trlist,
optwhere, optgroup, opthaving, optunion] = list
from sqlsem import Selector
sel = Selector(
alldistinct,
selectlist,
trlist,
optwhere,
optgroup,
opthaving,
# store # of dynamic parameters seen in this parse.
ndynamic = context.ndynamic()
)
sel.union_select = optunion
return sel
def ad0(list, context):
return "ALL"
adall = ad0
def addistinct(list, context):
return "DISTINCT"
def where0(list, context):
from sqlsem import BTPredicate
return BTPredicate() # true
where1 = elt1
group0 = returnNone
group1 = elt2
colnames1 = stat1
colnamesn = listcommathing
having0 = returnNone
having1 = elt1
union0 = returnNone
def union1(l, c):
[union, alldistinct, selection] = l
from sqlsem import Union
return Union(alldistinct, selection)
def except1(l, c):
[union, selection] = l
alldistinct = "DISTINCT"
from sqlsem import Except
return Except(alldistinct, selection)
def intersect1(l, c):
[union, selection] = l
alldistinct = "DISTINCT"
from sqlsem import Intersect
return Intersect(alldistinct, selection)
order0 = returnNone
order1 = elt2
#orderby = elt2
sortspec1 = stat1
sortspecn = listcommathing
def sortint(l, c):
from sqlsem import PositionedSort
[num, ord] = l
from types import IntType
if type(num)!=IntType or num<=0:
raise ValueError, `num`+': col position not positive int'
return PositionedSort(num, ord)
def sortcol(l, c):
from sqlsem import NamedSort
[name, ord] = l
return NamedSort(name, ord)
def optord0(l, c):
return "ASC"
optordasc = optord0
def optorddesc(l, c):
return "DESC"
## table reference list returns list of (name, name) or (name, alias)
def trl1(l, c):
[name] = l
return [(name, name)]
def trln(l,c):
[name, comma, others] = l
others.insert(0, (name, name))
return others
def trl1a(l,c):
[name, alias] = l
return [(name, alias)]
def trlna(l,c):
[name, alias, comma, others] = l
others.insert(0, (name, alias))
return others
def trl1as(l,c):
[name, as_, alias] = l
return [(name, alias)]
def trlnas(l,c):
[name, as_, alias, comma, others] = l
others.insert(0, (name, alias))
return others
tablename1 = elt0
columnid1 = elt0
def columnname1(list, context):
[ci] = list
return columnname2([None, None, ci], context)
def columnname2(list, context):
[table, ignore, col] = list
from sqlsem import BoundAttribute
return BoundAttribute(table, col)
def dynamic(list, context):
from sqlsem import BoundAttribute
# return a new dynamic parameter
int = context.param()
return BoundAttribute(0, int)
# expression stuff
def literal(list, context):
[lit] = list
from sqlsem import Constant
return Constant(lit)
def stringstring(l, c):
"""two strings in sequence = apostrophe"""
[l1, l2] = l
from sqlsem import Constant
value = "%s'%s" % (l1.value0, l2)
return Constant(value)
numlit = literal
stringlit = literal
primarylit = elt0
primary1 = elt0
factor1 = elt0
term1 = elt0
exp1 = elt0
def expplus(list, context):
[exp, plus, term] = list
return exp + term
def expminus(list, context):
[exp, minus, term] = list
return exp - term
def termtimes(list, context):
[exp, times, term] = list
return exp * term
def termdiv(list, context):
[exp, div, term] = list
return exp / term
plusfactor = elt1
def minusfactor(list, context):
[minus, factor] = list
return -factor
primaryexp = elt1
primaryset = elt0
def countstar(l, c):
from sqlsem import Count
return Count("*")
def distinctset(l, c):
[agg, p1, distinct, exp, p2] = l
return set(agg, exp, 1)
distinctcount = distinctset
def allset(l, c):
[agg, p1, exp, p2] = l
return set(agg, exp, 0)
allcount = allset
def set(agg, exp, distinct):
import sqlsem
if agg=="AVG":
return sqlsem.Average(exp, distinct)
if agg=="COUNT":
return sqlsem.Count(exp, distinct)
if agg=="MAX":
return sqlsem.Maximum(exp, distinct)
if agg=="MIN":
return sqlsem.Minimum(exp, distinct)
if agg=="SUM":
return sqlsem.Sum(exp, distinct)
if agg=="MEDIAN":
return sqlsem.Median(exp, distinct)
raise NameError, `agg`+": unknown aggregate"
average = count = maximum = minimum = summation = median = elt0
def predicateeq(list, context):
[e1, eq, e2] = list
return e1.equate(e2)
def predicatene(list, context):
[e1, lt, gt, e2] = list
return ~(e1.equate(e2))
def predicatelt(list, context):
[e1, lt, e2] = list
return e1.lt(e2)
def predicategt(list, context):
[e1, lt, e2] = list
return e2.lt(e1)
def predicatele(list, context):
[e1, lt, eq, e2] = list
return e1.le(e2)
def predicatege(list, context):
[e1, lt, eq, e2] = list
return e2.le(e1)
def predbetween(list, context):
[e1, between, e2, andkw, e3] = list
from sqlsem import BetweenPredicate
return BetweenPredicate(e1, e2, e3)
def prednotbetween(list, context):
[e1, notkw, between, e2, andkw, e3] = list
from sqlsem import BetweenPredicate
return ~BetweenPredicate(e1, e2, e3)
predicate1 = elt0
bps = elt1
bp1 = elt0
# exists predicate stuff
predexists = elt0
def exists(l, c):
[ex, paren1, subquery, paren2] = l
from sqlsem import ExistsPred
return ExistsPred(subquery)
def notbf(list, context):
[ notst, thing ] = list
return ~thing
# quantified predicates
nnall = elt0
nnany = elt0
def predqeq(list, context):
[exp, eq, allany, p1, subq, p2] = list
from sqlsem import QuantEQ, QuantNE
if allany=="ANY":
return QuantEQ(exp, subq)
else:
return ~QuantNE(exp, subq)
def predqne(list, context):
[exp, lt, gt, allany, p1, subq, p2] = list
from sqlsem import QuantEQ, QuantNE
if allany=="ANY":
return QuantNE(exp, subq)
else:
return ~QuantEQ(exp, subq)
def predqlt(list, context):
[exp, lt, allany, p1, subq, p2] = list
from sqlsem import QuantLT, QuantGE
if allany=="ANY":
return QuantLT(exp, subq)
else:
return ~QuantGE(exp, subq)
def predqgt(list, context):
[exp, gt, allany, p1, subq, p2] = list
from sqlsem import QuantGT, QuantLE
if allany=="ANY":
return QuantGT(exp, subq)
else:
return ~QuantLE(exp, subq)
def predqle(list, context):
[exp, less, eq, allany, p1, subq, p2] = list
from sqlsem import QuantGT, QuantLE
if allany=="ANY":
return QuantLE(exp, subq)
else:
return ~QuantGT(exp, subq)
def predqge(list, context):
[exp, gt, eq, allany, p1, subq, p2] = list
from sqlsem import QuantGE, QuantLT
if allany=="ANY":
return QuantGE(exp, subq)
else:
return ~QuantLT(exp, subq)
# subquery expression
def subqexpr(list, context):
[p1, subq, p2] = list
from sqlsem import SubQueryExpression
return SubQueryExpression(subq)
def predin(list, context):
[exp, inkw, p1, subq, p2] = list
from sqlsem import InPredicate
return InPredicate(exp, subq)
def prednotin(list, context):
[exp, notkw, inkw, p1, subq, p2] = list
from sqlsem import InPredicate
return ~InPredicate(exp, subq)
def predinlits(list, context):
[exp, inkw, p1, lits, p2] = list
from sqlsem import InLits
return InLits(exp, lits)
def prednotinlits(list, context):
[exp, notkw, inkw, p1, lits, p2] = list
from sqlsem import InLits
return ~InLits(exp, lits)
bf1 = elt0
def booln(list, context):
[ e1, andst, e2 ] = list
return e1&e2
bool1 = elt0
def searchn(list, context):
[ e1, orst, e2 ] = list
return e1 | e2
search1 = elt0
colalias = elt0
# select list stuff
def selectstar(l,c):
return "*"
selectsome = elt0
select1 = elt0
# selectsub returns (expression, asname)
def select1(list, context):
[ (exp, name) ] = list
from sqlsem import TupleCollector
result = TupleCollector()
result.addbinding(name, exp)
return result
def selectn(list, context):
[ selectsubs, comma, select_sublist ] = list
(exp, name) = select_sublist
selectsubs.addbinding(name, exp)
return selectsubs
def selectit(list, context):
[exp] = list
return (exp, None) # no binding!
def selectname(list, context):
[exp, as_, alias] = list
return (exp, alias)
colalias = elt0
#### do the bindings.
# note: all reduction function defs must precede this assign
VARS = vars()
class punter:
def __init__(self, name):
self.name = name
def __call__(self, list, context):
print "punt:", self.name, list
return list
class tracer:
def __init__(self, name, fn):
self.name = name
self.fn = fn
def __call__(self, list, context):
print self.name, list
return self.fn(list, context)
def BindRules(sqlg):
for name in sqlg.RuleNameToIndex.keys():
if VARS.has_key(name):
#print "binding", name
sqlg.Bind(name, VARS[name]) # nondebug
#sqlg.Bind(name, tracer(name, VARS[name]) ) # debug
else:
print "unbound", name
sqlg.Bind(name, punter(name))
return sqlg | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/gadfly/sqlbind.py | sqlbind.py |
import gfsocket
def main():
import sys
try:
done=0
argv = sys.argv
[action, port, admin_password] = argv[1:4]
from string import atoi
port = atoi(port)
if len(argv)>4:
machine = argv[4]
else:
machine = None
print action, port, admin_password, machine
if action not in ["shutdown", "restart", "checkpoint"]:
print "bad action", action
print
return
dosimple(action, port, admin_password, machine)
done=1
finally:
if not done:
print __doc__
def dosimple(action, port, pw, machine=None):
import socket
if machine is None:
machine = socket.gethostname()
conn = gfclient("admin", port, pw, machine)
action = getattr(conn, action)
print action()
# copied from gfserve
# shut down the server (admin policy only)
# arguments = ()
# shutdown the server with no checkpoint
SHUTDOWN = "SHUTDOWN"
# restart the server (admin only)
# arguments = ()
# restart the server (recover)
# no checkpoint
RESTART = "RESTART"
# checkpoint the server (admin only)
# arguments = ()
# checkpoint the server
CHECKPOINT = "CHECKPOINT"
# exec prepared statement
# arguments = (prepared_name_string, dyn=None)
# execute the prepared statement with dynamic args.
# autocommit.
EXECUTE_PREPARED = "EXECUTE_PREPARED"
# exec any statement (only if not disabled)
# arguments = (statement_string, dyn=None)
# execute the statement with dynamic args.
# autocommit.
EXECUTE_STATEMENT = "EXECUTE_STATEMENT"
class gfclient:
closed = 0
def __init__(self, policy, port, password, machine=None):
import socket
self.policy = policy
self.port = port
self.password = password
if machine is None:
machine = socket.gethostname()
self.machine = machine
def open_connection(self):
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#print type(sock), sock
sock.connect((self.machine, self.port))
return sock
def send_action(self, action, arguments, socket):
gfsocket.send_certified_action(
self.policy, action, arguments, self.password, socket)
def checkpoint(self):
return self.simple_action(CHECKPOINT)
def simple_action(self, action, args=()):
"""only valid for admin policy: force a server checkpoint"""
sock = self.open_connection()
self.send_action(action, args, sock)
data = gfsocket.recv_data(sock)
data = gfsocket.interpret_response(data)
return data
def restart(self):
"""only valid for admin policy: force a server restart"""
return self.simple_action(RESTART)
def shutdown(self):
"""only valid for admin policy: shut down the server"""
return self.simple_action(SHUTDOWN)
def close(self):
self.closed = 1
def commit(self):
# right now all actions autocommit
pass
# cannot rollback, autocommit on success
rollback = commit
def cursor(self):
"""return a cursor to this policy"""
if self.closed:
raise ValueError, "connection is closed"
return gfClientCursor(self)
class gfClientCursor:
statement = None
results = None
description = None
def __init__(self, connection):
self.connection = connection
# should add fetchone fetchmany
def fetchall(self):
return self.results
def execute(self, statement=None, params=None):
con = self.connection
data = con.simple_action(EXECUTE_STATEMENT, (statement, params))
(self.description, self.results) = data
def execute_prepared(self, name, params=None):
con = self.connection
data = con.simple_action(EXECUTE_PREPARED, (name, params))
if data is None:
self.description = self.results = None
else:
(self.description, self.results) = data
def setoutputsizes(self, *args):
pass # not implemented
def setinputsizes(self):
pass # not implemented
if __name__=="__main__":
main() | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/gadfly/gfclient.py | gfclient.py |
idlgramstring = """
specification ::
## 1
@R r1a :: specification >> definition speclist
@R r1b :: speclist >> specification
@R r1c :: speclist >>
## 2 punct ;
@R r2a :: definition >> type_dcl ;
@R r2b :: definition >> const_dcl ;
@R r2c :: definition >> except_dcl ;
@R r2d :: definition >> interface_nt ;
@R r2e :: definition >> module_nt ;
## 3 identifier=term, module=kw puncts {}
@R r3 :: module_nt >> module identifier { specification }
## 4
@R r4a :: interface_nt >> interface_dcl
@R r4b :: interface_nt >> forward_dcl
## 5
@R r5 :: interface_dcl >> interface_header { interface_body }
## 6 interface=kw
@R r6 :: forward_dcl >> interface identifier
## 7 puncts []
@R r7 :: interface_header >> interface identifier [ inheritance_spec ]
## 8
@R r8a :: interface_body >>
@R r8b :: interface_body >> export interface_body
## 9
@R r9a :: export >> type_dcl
@R r9b :: export >> const_dcl
@R r9c :: export >> except_dcl
@R r9d :: export >> attr_dcl
@R r9e :: export >> op_dcl
## 10 punct ,:
@R r10a :: inheritance_spec >> : scoped_name_list
@R r10b :: scoped_name_list >> scoped_name
@R r10c :: scoped_name_list >> scoped_name_list , scoped_name
## 11
@R r11a :: scoped_name >> identifier
@R r11b :: scoped_name >> colon_colon identifier
@R r11d :: scoped_name >> scoped_name coloncolon identifier
## 12 const=kw punct =
@R r12 :: const_dcl >> const const_type identifier = const_expr
## 13
@R r13a :: const_type >> integer_type
@R r13b :: const_type >> char_type
@R r13c :: const_type >> boolean_type
@R r13d :: const_type >> floating_type
@R r13e :: const_type >> string_type
@R r13f :: const_type >> scoped_name
## 14
@R r14 :: const_expr >> or_expr
##15 punct |
@R r15a :: or_expr >> xor_expr
@R r15b :: or_expr >> or_expr | xor_expr
##16 punct ^
@R r16a :: xor_expr >> and_expr
@R r16b :: xor_expr >> xor_expr ^ and_expr
##17 punct &
@R r17a :: and_expr >> shift_expr
@R r17b :: and_expr >> and_expr & shift_expr
##18 punct > <
@R r18a :: shift_expr >> add_expr
@R r18b :: shift_expr >> shift_expr > > add_expr
@R r18c :: shift_expr >> shift_expr < < add_expr
##19 punct +-
@R r19a :: add_expr >> mult_expr
@R r19b :: add_expr >> add_expr + mult_expr
@R r19c :: add_expr >> add_expr - mult_expr
##20 punct */%
@R r20a :: mult_expr >> unary_expr
@R r20b :: mult_expr >> mult_expr * unary_expr
@R r20c :: mult_expr >> mult_expr / unary_expr
@R r20d :: mult_expr >> mult_expr % unary_expr
##21
@R r21a :: unary_expr >> unary_operator primary_expr
@R r21b :: unary_expr >> primary_expr
##22
@R r22a :: unary_operator >> -
@R r22b :: unary_operator >> +
@R r22c :: unary_operator >> ~
##23 punct ()
@R r23a :: primary_expr >> scoped_name
@R r23b :: primary_expr >> literal
@R r23c :: primary_expr >> ( const_expr )
##24 terms = *_literal (?) except boolean
@R r24a :: literal >> integer_literal
@R r24b :: literal >> string_literal
@R r24c :: literal >> character_literal
@R r24d :: literal >> floating_pt_literal
@R r24e :: literal >> boolean_literal
##25 kw TRUE FALSE
@R r25a :: boolean_literal >> TRUE
@R r25b :: boolean_literal >> FALSE
##26
@R r26 :: positive_int_literal >> const_expr
##27 kw typedef
@R r27a :: type_dcl >> typedef type_declarator
@R r27b :: type_dcl >> struct_type
@R r27c :: type_dcl >> union_type
@R r27d :: type_dcl >> enum_type
##28
@R r28 :: type_declarator >> type_spec declarators
##29
@R r29a :: type_spec >> simple_type_spec
@R r29b :: type_spec >> constr_type_spec
##30
@R r30a :: simple_type_spec >> base_type_spec
@R r30b :: simple_type_spec >> template_type_spec
@R r30c :: simple_type_spec >> scoped_name
##31
@R r31a :: base_type_spec >> floating_pt_type
@R r31b :: base_type_spec >> integer_type
@R r31c :: base_type_spec >> char_type
@R r31d :: base_type_spec >> boolean_type
@R r31e :: base_type_spec >> octet_type
@R r31f :: base_type_spec >> any_type
## 32
@R r32a :: template_type_spec >> sequence_type
@R r32b :: template_type_spec >> string_type
##33
@R r33a :: constr_type_spec >> struct_type
@R r33b :: constr_type_spec >> union_type
@R r33c :: constr_type_spec >> enum_type
##34
@R r34a :: declarators >> declarator
@R r34b :: declarators >> declarators , declarator
##35
@R r35a :: declarator >> simple_declarator
@R r35b :: declarator >> complex_declarator
##36
@R r36 :: simple_declarator >> identifier
##37
@R r37 :: complex_declarator >> array_declarator
##38 kw float double
@R r38a :: floating_pt_type >> float
@R r38b :: floating_pt_type >> double
##39
@R r39a :: integer_type >> signed_int
@R r39b :: integer_type >> unsigned_int
##40
@R r40 :: signed_int >> signed_long_int
@R r40 :: signed_int >> signed_short_int
##41 kw long
@R r41 :: signed_long_int >> long
##42 kw short
@R r42 :: signed_short_int >> short
##43
@R r43 :: unsigned_int >> unsigned_long_int
@R r43 :: unsigned_int >> unsigned_short_int
##44 kw unsigned
@R r44 :: unsigned_long_int >> unsigned long
##45
@R r45 :: unsigned_short_int >> unsigned short
##46 kw char
@R r46 :: char_type >> char
##47 kw boolean
@R r47 :: boolean_type >> boolean
##48 kw octet
@R r48 :: octet_type >> octet
##49 kw any
@R r49 :: any_type >> any
##50 kw struct
@R r50 :: struct_type >> struct identifier { member_list }
##51
@R r51a :: member_list >> member
@R r51b :: member_list >> member_list member
##52
@R r52 :: member >> type_spec declarators ;
##53 kw union switch
@R r53 :: union_type >>
union identifier switch ( switch_type_spec ) { switch_body }
##54
@R r54a :: switch_type_spec >> integer_type
@R r54b :: switch_type_spec >> char_type
@R r54c :: switch_type_spec >> boolean_type
@R r54d :: switch_type_spec >> enum_type
@R r54e :: switch_type_spec >> scoped_name
##55
@R r55a :: switch_body >> case_nt
@R r55b :: switch_body >> switch_body case_nt
##56
@R r56a :: case_nt >> case_labels element_spec ;
@R r56b :: case_labels >> case_label
@R r56c :: case_labels >> case_labels case_label
##57 kw default case
@R r57a :: case_label >> case const_expr :
@R r57b :: case_label >> default :
##58
@R r58 :: element_spec >> type_spec declarator
##59 kw enum
@R r59a :: enum_type >> enum identifier { enumerators }
@R r59b :: enumerators >> enumerator
@R r59c :: enumerators >> enumerators , enumerator
##60
@R r60 :: enumerator >> identifier
##61 kw sequence
@R r61 :: sequence_type >> sequence < simple_type_spec , positive_int_const >
##62 kw string
@R r62a :: string_type >> string < positive_int_const >
@R r62b :: string_type >> string
##63
@R r63a :: array_declarator >> identifier fixed_array_sizes
@R r63b :: fixed_array_sizes >> fixed_array_size
@R r63c :: fixed_array_sizes >> fixed_array_sizes fixed_array_size
##64
@R r64 :: fixed_array_size >> [ positive_int_const ]
##65 kw attribute readonly
@R r65a :: attr_dcl >> maybe_readonly attribute param_type_spec simple_declarators
@R r65b :: maybe_readonly >> readonly
@R r65c :: maybe_readonly >>
@R r65d :: simple_declarators >> simple_declarator
@R r65e :: simple_declarators >> simple_declarators , simple_declarator
##66 kw exception
@R r66a :: except_dcl >> exception identifier { members }
@R r66b :: members >>
@R r66c :: members >> member_list
##67
@R r67a :: op_dcl >>
maybe_op_attribute op_type_spec identifier parameter_dcls
maybe_raises_expr maybe_context_expr
@R r67b :: maybe_op_attribute >>
@R r67c :: maybe_op_attribute >> op_attribute
@R r67d :: maybe_raises_expr >>
@R r67e :: maybe_raises_expr >> raises_expr
@R r67f :: maybe_context_expr >>
@R r67g :: maybe_context_expr >> context_expr
##68 kw oneway
@R r68a :: op_attribute >> oneway
##69 kw void
@R r69a :: op_type_spec >> param_type_spec
@R r69b :: op_type_spec >> void
##70
@R r70a :: parameter_dcls >> ( parameterlist )
@R r70b :: parameter_dcls >> ( )
@R r70c :: parameterlist >> param_dcl
@R r70d :: parameterlist >> parameterlist , param_dcl
##71
@R r71 :: param_dcl >> param_attribute param_type_spec simple_declarator
##72 kw in out inout
@R r72 :: param_attribute >> in
@R r72 :: param_attribute >> out
@R r72 :: param_attribute >> inout
##73 kw raises
@R r73 :: raises_expr >> raises ( scoped_name_list )
##74 kw context
@R r74 :: context_expr >> context ( string_literal_list )
@R r74b :: string_literal_list >> string_literal
@R r74c :: string_literal_list >> string_literal_list , string_literal
@R r75 :: param_type_spec >> base_type_spec
@R r75 :: param_type_spec >> string_type
@R r75 :: param_type_spec >> scoped_name
"""
nonterms = """
colon_colon
param_attribute
unsigned_long_int unsigned_short_int param_dcl
parameterlist string_literal_list
members maybe_op_attribute maybe_raises_expr maybe_context_expr
op_type_spec parameter_dcls op_attribute raises_expr context_expr
maybe_readonly param_type_spec simple_declarators simple_declarator
fixed_array_sizes fixed_array_size
element_spec enumerator enumerators
switch_type_spec switch_body case_nt case_labels case_label
member_list member
signed_int unsigned_int signed_long_int signed_short_int
simple_declarator complex_declarator array_declarator
declarator
sequence_type string_type
floating_pt_type integer_type char_type boolean_type
octet_type any_type
base_type_spec template_type_spec
simple_type_spec constr_type_spec
type_spec declarators
type_declarator struct_type union_type enum_type
literal boolean_literal positive_int_literal
mult_expr unary_expr unary_operator primary_expr
or_expr xor_expr and_expr shift_expr add_expr
integer_type char_type boolean_type floating_type string_type
const_type const_expr
scoped_name_list scoped_name
attr_dcl op_dcl
inheritance_spec export
interface_header interface_body
interface_dcl forward_dcl
type_dcl const_dcl except_dcl interface_nt module_nt
specification definition speclist
"""
keywords = """
exception oneway void in out inout raises context
interface module const TRUE FALSE typedef float double long
unsigned short char boolean octet any struct union switch
enum string attribute readonly default case sequence ::
"""
# NOTE: FOR NECESSARY HACKERY REASONS :: IS A KEYWORD!
punctuations = ";{}()[],:|^&<>+-*/%~="
# dummy regexen
identifierre = "identifier"
integer_literalre = "123"
positive_int_constre = "999"
string_literalre = "'string'"
character_literalre= "'c'"
floating_pt_literalre = "1.23"
# dummy interp fun for all terminals
def echo (str):
return str
def DeclareTerminals(Grammar):
Grammar.Addterm("identifier", identifierre, echo)
Grammar.Addterm("integer_literal", integer_literalre, echo)
Grammar.Addterm("string_literal", string_literalre, echo)
Grammar.Addterm("character_literal", character_literalre, echo)
Grammar.Addterm("floating_pt_literal", floating_pt_literalre, echo)
Grammar.Addterm("positive_int_const", positive_int_constre, echo)
## we need to override LexDictionary to recognize :: as a SINGLE punctuation.
## (not possible using standard kjParsing, requires a special override)
import kjParser
class myLexDictionary(kjParser.LexDictionary):
def __init__(self):
kjParser.LexDictionary.__init__(self)
map = ((kjParser.KEYFLAG, "coloncolon"), "coloncolon")
self.keywordmap["::"] = map
self.keywordmap["coloncolon"] = map
def Token(self, String, StartPosition):
if String[StartPosition:StartPosition+2] == "::":
tok = self.keywordmap["::"]
return (tok, 2)
# default:
return kjParseBuild.LexDictionary.Token(self, String, StartPosition)
# default bind all rules
def GrammarBuild():
import kjParseBuild
idl = kjParseBuild.NullCGrammar()
idl.LexD = myLexDictionary()
#idl.SetCaseSensitivity(0) # grammar is not case sensitive for keywords
DeclareTerminals(idl)
idl.Keywords(keywords)
idl.punct(punctuations)
idl.Nonterms(nonterms)
#idl.comments([LISPCOMMENTREGEX])
idl.Declarerules(idlgramstring)
print "now compiling"
idl.Compile()
return idl
if __name__=="__main__": GrammarBuild() | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/gadfly/idl.py | idl.py |
# BUGS:
# Lexical error handling is not nice
# Parse error handling is not nice
#
# Lex analysis may be slow for big grammars
# Setting case sensitivity for keywords MUST happen BEFORE
# declaration of keywords.
import kjSet
import string
import re
import string
# set this flag for regression testing at each load
RUNTESTS = 0
# set this flag to enable warning for default reductions
WARNONDEFAULTS = 0
# some local constants
TERMFLAG = -1 # FLAG FOR TERMINAL
NOMATCHFLAG = -2 # FLAG FOR NO MATCH IN FSM
MOVETOFLAG = -3 # FLAG FOR "SHIFT" IN SN FSM
REDUCEFLAG = -4 # FLAG FOR REDUCTION IN FSM
TRANSFLAG = -5 # FLAG FOR TRANSIENT STATE IN FSM
KEYFLAG = -6 # FLAG FOR KEYWORD
NONTERMFLAG = -7 # FLAG FOR NONTERMINAL
TERMFLAG = -8 # FLAG FOR TERMINAL
EOFFLAG = "*" # FLAG for End of file
# set this string to the Module name (filename)
# used for dumping reconstructable objects
THISMODULE = "kjParser"
# regular expression for matching whitespace
WHITERE = "["+string.whitespace+"]+"
WHITEREGEX = re.compile(WHITERE)
# local errors
LexTokenError = "LexTokenError" # may happen on bad string
UnkTermError = "UnkTermError" # ditto
BadPunctError= "BadPunctError" # if try to make whitespace a punct
ParseInitError = "ParseInitError" # shouldn't happen?
#EOFError # may happen on bad string
FlowError = "FlowError" # shouldn't happen!!! (bug)
#SyntaxError # may happen on bad string
#TypeError
ReductError = "ReductError" # shouldn't happen?
NondetError = "NondetError" # shouldn't happen?
# the end of file is interpreted in the lexical stream as
# a terminal...
# this should be appended to the lexical stream:
ENDOFFILETOKEN = (TERMFLAG, EOFFLAG)
# in FSM use the following terminal to indicate eof
ENDOFFILETERM = (ENDOFFILETOKEN, EOFFLAG)
# Utility function for match conversion from regex to re
def RMATCH(re, key, start=0):
#print "RMATCH: %s -> %s <- start=%s" % (re.pattern, key, start)
group = re.match(key, start)
if group is None:
#print "RMATCH: -1"
return -1
len = group.end() - group.start()
#print "RMATCH: %s (%s)" % (len, group.group())
return len
# utility function for error diagnostics
def DumpStringWindow(Str, Pos, Offset=15):
L = []
L.append("near ::")
start = Pos-Offset
end = Pos+Offset
if start<0: start = 0
if end>len(Str): end = len(Str)
L.append(`Str[start:Pos]`+"*"+`Str[Pos:end]`)
from string import join
return join(L, "\n")
# lexical dictionary class
# this data structure is used by lexical parser below.
#
# basic operations:
# LD.punctuation(string)
# registers a string as a punctuation
# EG: LD.punctuation(":")
# Punctuations are treated as a special kind of keyword
# that is recognized even when not surrounded by whitespace.
# IE, "xend" will not be recognized as "x end", but "x;" will be
# recognized as "x ;" if "end" is a regular keyword but
# ";" is a punctuation. Only single character punctuations
# are supported (now), ie, ":=" must be recognized as
# ":" "=" above the lexical level.
#
# LD.comment(compiled_reg_expression)
# registers a comment pattern
# EG LD.comment(regex.compile("--.*\n"))
# asks to recognize ansi/sql comments like "-- correct?\n"
#
# LD.keyword(keyword_string, canonicalstring)
# specifies a keyword string that should map to the canonicalstring
# when translated to the lexical stream.
# EG: LD.keyword("begin","BEGIN"); LD.keyword("BEGIN","BEGIN")
# will recognize upcase or downcase begins, not mixed case.
# (automatic upcasing is allowed below at parser level).
#
# LD[compiled_reg_expression] = (TerminalFlag, Function) # assignment!
# specifies a regular expression that should be associated
# with the lexical terminal marker TerminalFlag
# EG: LD[regex.compile("[0-9]+")] = ("integer",string.atoi)
# the Function should be a function on one string argument
# that interprets the matching string as a value. if None is
# given, just the string itself will be used as the
# interpretation. (a better choice above would be a function
# which "tries" atoi first and uses atol on overflow).
# NOTE: ambiguity among regular expressions will be decided
# arbitrarily (fix?).
#
# LD[string] # retrieval!
# returns ((KEYFLAG, Keywordstring), Keywordstring)
# if the (entire) string matches a keyword or a
# punctuation Keywordstring.
# otherwise returns ((TERMFLAG, Terminalname), value)
# if the (entire) string matches the regular expression for
# a terminal flaged by Terminalname; value is the interpreted
# value. TerminalFlag better be something other than
# KEYFLAG!
# otherwise raises an error!
# comments not filtered here!
#
# the following additional functions are used for autodocumentation
# in declaring rules, etcetera.
# begin = LD.keyword("begin")
# sets variable "begin" to (KEYFLAG, "BEGIN") if
# "begin" maps to keyword "BEGIN" in LD
# integer = LD.terminal("integer")
# sets variable integer to ("integer", Function)
# if "integer" is a registered terminal Function is
# its associated interpretation function.
#
class LexDictionary:
def __init__(self):
# commentpatterns is simply a list of compiled regular expressions
# that represent comments
self.commentpatterns = []
# commentstrings is used for debugging/dumping/reconstruction etc.
self.commentstrings = []
# punctuationlist is a string of punctuations
self.punctuationlist = ""
# keywordmap is a dictionary mapping recognized keyword strings
# and punctuations to their constant representations.
self.keywordmap = KeywordDict()
# regexprlist is a list of triples (regex,Flag,function) mapping
# regular expressions to their flag and interpreter function.
self.regexprlist = []
def Dump(self):
print "comments = ", self.commentstrings
print "punctuations = ", self.punctuationlist
print "keywordmap ="
self.keywordmap.Dump()
print "regexprlist =", self.regexprlist
def __getitem__(self,key):
# try to match string to a keyword
try:
return self.keywordmap[key]
except KeyError:
# try to match a regular expression
found = 0 # so far not found
length = len(key)
for triple in self.regexprlist:
(regexpr, Flag, Function) = triple
index = RMATCH(regexpr,key)
if index == length:
found = 1
# use the function to interpret the string, if given
if Function != None:
value = Function(key)
else:
value = key
# NONLOCAL RETURN
return (Flag, value)
#endfor
raise LexTokenError, "no match for string: " + `key`
#enddef __getitem__
# LD.keyword("this") will make a new keyword "this" if not found
#
def keyword(self,str):
# upcase the string, if needed
if self.keywordmap.caseInsensitive:
str = string.upper(str)
if not self.keywordmap.has_key(str):
# redundancy for to avoid excess construction during parsing
token = (KEYFLAG,str)
self.keywordmap[str] = (token,str)
else:
(token, str2) = self.keywordmap[str]
return token
# LD.terminal("this") will just look for "this"
# LD.terminal("this", RE, F) will register a new terminal
# RE must be a compiled regular expression or string reg ex
# F must be an interpretation function
#
def terminal(self, string, RegExpr=None, Function=None):
if RegExpr != None and Function != None:
if type(RegExpr) == type(""):
RegExpr = re.compile(RegExpr)
self[ RegExpr ] = ( string, Function)
for triple in self.regexprlist:
(regexpr,token,Function) = triple
if token[1] == string:
# nonlocal exit
return token
#endfor
# error if no exit by now
raise UnkTermError, "no such terminal"
def __setitem__(self,key,value):
if type(key) == type(''):
# if it's a string it must be a keyword
if self.keywordmap.caseInsensitive:
value = string.upper(value)
key = string.upper(key)
self.keywordmap[key] = ( (KEYFLAG, value), value)
else:
# otherwise it better be a compiled regular expression (not
#verified)
(Name, Function) = value
Flag = (TERMFLAG, Name)
regexpr = key
self.regexprlist = self.regexprlist + \
[ (regexpr, Flag, Function) ]
# register a regular expression as a comment
def comment(self, string):
# regexpr better be a uncompiled string regular expression! (not verified)
regexpr = re.compile(string)
self.commentpatterns = self.commentpatterns + [ regexpr ]
self.commentstrings = self.commentstrings + [ string ]
# register a string as a punctuation
def punctuation(self,Instring):
if type(Instring) != type("") or len(Instring)!=1:
raise BadPunctError, "punctuation must be string of length 1"
if Instring in string.whitespace:
raise BadPunctError, "punctuation may not be whitespace"
self.punctuationlist = self.punctuationlist + Instring
return self.keyword(Instring)
# testing and altering case sensitivity behavior
def isCaseSensitive(self):
return not self.keywordmap.caseInsensitive
# setting case sensitivity MUST happen before keyword
# declarations!
def SetCaseSensitivity(self, Boolean):
self.keywordmap.caseInsensitive = not Boolean
# function to do same as __getitem__ above but looking _inside_ a string
# instead of at the whole string
# returns (token,skip)
# where token is one of
# ((KEYFLAG,name),name) or ((TERMFLAG,termname),value)
# and skip is the length of substring of string that matches thetoken
def Token(self, String, StartPosition):
finished = 0 # dummy, exit should be nonlocal
totalOffset = 0
while not finished:
# flag EOF if past end of string?
if len(String) <= StartPosition:
return (ENDOFFILETERM, 0)
# skip whitespace
whitespacefound = 0
skip = RMATCH(WHITEREGEX,String, StartPosition)
if skip > 0:
StartPosition = StartPosition + skip
totalOffset = totalOffset + skip
whitespacefound = 1
# try to find comment, keyword, term in that order:
# looking for comment
commentfound = 0
for commentexpr in self.commentpatterns:
offset = RMATCH(commentexpr,String,StartPosition)
if offset != -1:
if offset<1:
info = DumpStringWindow(String,StartPosition)
raise LexTokenError, "zero length comment "+info
commentfound = 1
StartPosition = StartPosition + offset
totalOffset = totalOffset + offset
# looking for a keyword
keypair = self.keywordmap.hasPrefix(String,StartPosition,
self.punctuationlist)
if keypair != 0:
return ( keypair[0], keypair[1] + totalOffset)
# looking for terminal
for (regexpr, Flag, Function) in self.regexprlist:
offset = RMATCH(regexpr,String,StartPosition)
if offset != -1:
matchstring = String[StartPosition : offset+StartPosition]
if Function != None:
value = Function(matchstring)
else:
value = matchstring
return ((Flag, value) , offset + totalOffset)
if not (commentfound or whitespacefound):
info = DumpStringWindow(String,StartPosition)
raise LexTokenError, "Lexical parse failure "+info
#endwhile
#enddef
#endclass LexDictionary
# alternate, experimental implementation
class lexdictionary:
def __init__(self):
self.skip = ""
self.commentstrings = []
self.punctuationlist = ""
self.keywordmap = KeywordDict()
self.termlist = [] # list of (term, regex, flag, interpret_fn)
self.uncompiled = 1 # only compile after full initialization.
self.laststring= self.lastindex= self.lastresult = None
def Dump(self, *k):
raise "sorry", "not implemented"
__getitem__ = Dump
def keyword(self, str):
kwm = self.keywordmap
if kwm.caseInsensitive:
str = string.upper(str)
try:
(token, str2) = kwm[str]
except:
token = (KEYFLAG, str)
self.keywordmap[str] = (token,str)
return token
def terminal(self, str, regexstr=None, Function=None):
if regexstr is not None:
flag = (TERMFLAG, str)
self.termlist.append( (str, regexstr, flag, Function) )
return flag
else:
for (s,fl,fn) in self.termlist:
if fl[1]==str:
return fl
else:
raise UnkTermError, "no such terminal"
__setitem__ = Dump
def comment(self, str):
self.commentstrings.append(str)
def punctuation(self, Instring):
if type(Instring) != type("") or len(Instring)!=1:
raise BadPunctError, "punctuation must be string of length 1"
if Instring in string.whitespace:
raise BadPunctError, "punctuation may not be whitespace"
self.punctuationlist = self.punctuationlist + Instring
return self.keyword(Instring)
def SetCaseSensitivity(self, Boolean):
self.keywordmap.caseInsensitive = not Boolean
def Token(self, String, StartPosition):
# shortcut for reductions.
if self.laststring is String and self.lastindex == StartPosition:
#print "lastresult", self.lastresult
return self.lastresult
self.lastindex = StartPosition
self.laststring = String
#print `String[StartPosition: StartPosition+60]`
if self.uncompiled:
self.compile()
self.uncompiled = None
finished = 0
totalOffset = 0
skipprog = self.skipprog
keypairfn = self.keywordmap.hasPrefix
punctlist = self.punctuationlist
termregex = self.termregex
while not finished:
if len(String) <= StartPosition:
result = self.lastresult = (ENDOFFILETERM, 0)
return result
# skip ws and comments
#skip = skipprog.match(String, StartPosition)
skip = RMATCH(skipprog, String, StartPosition)
if skip>0:
if skip==0:
info = DumpStringWindow(String, StartPosition)
raise LexTokenError, \
"zero length whitespace or comment "+info
StartPosition = StartPosition + skip
totalOffset = totalOffset + skip
continue
# look for keyword
keypair = keypairfn(String, StartPosition, punctlist)
if keypair!=0:
#print "keyword", keypair
result = self.lastresult = (keypair[0], keypair[1]+totalOffset)
return result
# look for terminal
#print "Termregex: %s --> %s <-- start=%s" % (termregex.pattern, String, StartPosition)
offset = termregex.match(String, StartPosition)
if offset is not None:
g = offset.group
for (term, regex, flag, fn) in self.termlist:
test = g(term)
if test:
#print "terminal", test
if fn is not None:
value = fn(test)
else:
value = test
result = self.lastresult = (
(flag, value), offset.end() - offset.start() + totalOffset)
return result
# error if we get here
info = DumpStringWindow(String, StartPosition)
raise LexTokenError, "Lexical token not found "+info
def isCaseSensitive(self):
return not self.keywordmap.caseInsensitive
def compile(self):
from string import joinfields, whitespace
import re
skipregexen = self.commentstrings + [WHITERE]
skipregex = "(" + joinfields(skipregexen, ")|(") + ")"
#print skipregex; import sys; sys.exit(1)
self.skipprog = re.compile(skipregex)
termregexen = []
termnames = []
for (term, rgex, flag, fn) in self.termlist:
fragment = "(?P<%s>%s)" % (term, rgex)
termregexen.append(fragment)
termnames.append(term)
termregex = joinfields(termregexen, "|")
self.termregex = re.compile(termregex)
self.termnames = termnames
LexDictionary = lexdictionary ##### test!
# a utility class: dictionary of prefixes
# should be generalized to allow upcasing of keyword matches
class KeywordDict:
def __init__(self, caseInsensitive = 0):
self.FirstcharDict = {}
self.KeyDict = {}
self.caseInsensitive = caseInsensitive
def Dump(self):
if self.caseInsensitive:
print " case insensitive"
else:
print " case sensitive"
keys = self.KeyDict.keys()
print " keyDict has ", len(keys), " elts"
for key in keys:
print " ", key," maps to ",self.KeyDict[key]
firstchars = self.FirstcharDict.keys()
print " firstcharDict has ", len(firstchars), " elts"
for char in firstchars:
print " ", char," maps to ",self.FirstcharDict[char]
# set item assumes value has correct case already, if case sensitive
def __setitem__(self, key, value):
if len(key)<1:
raise LexTokenError, "Keyword of length 0"
if self.caseInsensitive:
KEY = string.upper(key)
else:
KEY = key
firstchar = KEY[0:1]
if self.FirstcharDict.has_key(firstchar):
self.FirstcharDict[firstchar] = \
self.FirstcharDict[firstchar] + [(KEY, value)]
else:
self.FirstcharDict[firstchar] = [(KEY, value)]
self.KeyDict[KEY] = value
# if String has a registered keyword at start position
# return its canonical representation and offset, else 0
# keywords that are not punctuations should be
# recognized only if followed
# by a punctuation or whitespace char
#
def hasPrefix(self,String,StartPosition,punctuationlist):
First = String[StartPosition:StartPosition+1]
fcd = self.FirstcharDict
caseins = self.caseInsensitive
if caseins:
First = string.upper(First)
if fcd.has_key(First):
Keylist = fcd[First]
else:
return 0
for (key,value) in Keylist:
offset = len(key)
EndPosition = StartPosition+offset
match = String[StartPosition : EndPosition]
if caseins:
match = string.upper(match)
if key == match:
if len(key)==1 and key in punctuationlist:
# punctuations are recognized regardless of nextchar
return (value,offset)
else:
# nonpuncts must have punct or whitespace following
#(uses punct as single char convention)
if EndPosition == len(String):
return (value, offset)
else:
nextchar = String[EndPosition]
if nextchar in string.whitespace\
or nextchar in punctuationlist:
return (value, offset)
return 0 # if no exit inside for loop, fail
def __getitem__(self,key):
if self.caseInsensitive:
key = string.upper(key)
return self.KeyDict[key]
def has_key(self,key):
if self.caseInsensitive:
key = string.upper(key)
return self.KeyDict.has_key(key)
#endclass KeywordDict:
# LexStringWalker walks through a string looking for
# substrings recognized by a lexical dictionary
#
# ERROR REPORTING NEEDS IMPROVEMENT
class LexStringWalker:
def __init__(self, String, LexDict):
self.Position = 0
self.NextPosition = 0
self.String = String
self.LexDict = LexDict
self.PastEOF = 0
self.Done = 0
def DUMP(self):
return DumpStringWindow(self.String,self.Position)
#reset not defined
def more(self):
return not self.PastEOF
def getmember(self):
(Token,skip) = self.LexDict.Token(self.String, self.Position)
self.NextPosition = self.Position + skip
if Token == ENDOFFILETERM:
self.PastEOF = 1
return Token
def next(self):
if self.Done:
data = self.DUMP()
raise LexTokenError, "no next past end of file "+data
elif self.PastEOF:
self.Done=1
elif self.NextPosition > self.Position:
self.Position = self.NextPosition
else:
dummy = self.getmember()
if self.NextPosition <= self.Position:
data = self.DUMP()
raise LexTokenError, "Lexical walker not advancing "+data
self.Position = self.NextPosition
#endclass LexStringWalker
# the parse class:
# Based loosely on Aho+Ullman, Principles of Compiler Design, Ch.6.
# except that they don't describe how to handle boundary
# conditions, I made them up myself.
#
# Note: This could be implemented using just functions; it's implemented
# as a class to facilitate diagnostics and debugging in case of
# failures of various sorts.
#
# a parse accepts
# a rule list
#
# a lexically analysed stream with methods
# stream.getmember() returns the current token on the stream
# stream.next() moves on to next token
# stream.more() returns false if current token is the last token
#
# and a FSM (finite state machine) with methods
# FSM.root_nonTerminal
# the nonterminal at which to start parsing
# FSM.initial_state
# the initial state to start at
# FSM.successful_final_state
# the final state to go to upon successful parse
# FSM.map(Current_State,Current_Token)
# returns either
# (TERMFLAG, 0)
# if Current_State is terminal (final or reduction).
# (NOMATCHFLAG, 0)
# if Current_State is nonterminal, but the Current_Token
# and Next_Token do not lead to a valid state in the FSM
# (MOVETOFLAG, Next_State)
# if Current_State is nonterminal and Current_Token,
# Next_token map to Next_State from Current_State.
# (REDUCEFLAG, Rulenum)
# if Current_State indicates a reduction at Current_Token
# for rule Rule number Rule
#
# and a Stack with methods (replaced with dictionary)
# (init: {-1:0} )
# Stack.Top() returns top of stack (no pop)
# ( Stack[Stack[-1]] )
# Stack.Push(Object)
# ( Stack[-1]=Stack[-1]+1; Stack[Stack[-1]]=Object )
# Stack.MakeEmpty()
# ( Stack[-1]=0 )
# Stack.IsEmpty()
# ( Stack[-1] == 0 )
# Stack.Pop()
# ( Stack[-1] = Stack[-1]-1 )
# stack contents created by Parser will be of form (State,Value)
# where Value was inserted at FSM state State.
# Value of form either (KEYFLAG, Name)
# (NontermName, reductionvalue)
# or (TerminalName, value)
#
# and an optional parameter Evaluate which if 0 indicates that
# rules should be evaluated, otherwise indicates that rules
# should just be reduced and the reduction structure should
# be used as the result of the rule
#
# rule objects must support methods
# Rule.reduce(Stack)
# pops off the elements corresponding to the body of the Rule
# from the stack and returns (NewStack,Red) where NewStack is
# the stack minus the body and Red is the result of evaluating the
# reduction function on this instance of the rule.
# Rule.Nonterm
# the nonterminal at the head of the rule
class ParserObj:
# Evaluate determines whether rules should be evaluated
# after reductions. Context is an argument passed to the
# list reduction function
#
def __init__(self, Rulelist, Stream, FSM, Stack, \
Evaluate=1, \
Context=None):
self.Rules = Rulelist
self.LexStream = Stream
self.FSM = FSM
self.Stack = Stack
self.Context = Context
# start with empty stack, initial_state, no nonterminal
#self.Stack[-1] = 0# self.Stack.MakeEmpty()
self.Stack[:] = []
self.State = FSM.initial_state
self.currentNonterm = None
self.Evaluate = Evaluate
# DoOneReduction accepts tokens from the stream and pushes
# them onto the stack until a reduction state is reached.
#
# Resolve the reduction
#
def DoOneReduction(self):
current=self.State
FSM=self.FSM
Stack = self.Stack
Context = self.Context
Stream = self.LexStream
# the internal FSM.StateTokenMap dictionary is used directly here.
STMap = FSM.StateTokenMap
#if FSM.final_state(current):
# raise ParseInitError, 'trying to reduce starting at final state'
tokenVal = Stream.getmember()
#print "tokenVal", tokenVal
token = tokenVal[0]
# push the token and traverse FSM until terminal state is reached
#(flag, nextThing) = FSM.map(current, token)
key = (current, token)
try:
(flag, nextThing) = STMap[key][0]
except KeyError:
flag = NOMATCHFLAG
while flag == MOVETOFLAG:
nextState = nextThing
#print current, " shift ", token,
# no sanity check, possible infinite loop
# push current token and next state
ThingToPush = (nextState, tokenVal)
#print "pushing ", ThingToPush
#Stack[-1]=Stack[-1]+1; Stack[Stack[-1]]=ThingToPush
Stack.append(ThingToPush)
#Stack.Push( ThingToPush )
# move to next token, next state
Stream.next()
# error if end of stream
if not Stream.more(): # optimized Stream.PastEOF (?)
data = Stream.DUMP()
raise EOFError, 'end of stream during parse '+data
current = nextState
tokenVal = Stream.getmember()
token = tokenVal[0]
#MAP = FSM.map(current,token)
key = (current, token)
try:
(flag, nextThing) = STMap[key][0]
except KeyError:
flag = NOMATCHFLAG
# at end of while loop we should be at a reduction state
if flag == REDUCEFLAG:
rulenum = nextThing
#print current, " reduce ", token, self.Rules[rulenum]
# normal case
# perform reduction
rule = self.Rules[rulenum]
Nonterm = rule.Nonterm
self.currentNonterm = Nonterm
(Stack, reduct) = rule.reduce( Stack , Context )
# self.Stack = Stack #not needed, unless stack rep changes
GotoState = self.GotoState(rule)
# push the Gotostate and result of rule reduction on stack
ThingToPush = (GotoState, (Nonterm, reduct) )
# push the result of the reduction and exit normally
#print "pushing ", ThingToPush
#Stack[-1]=Stack[-1]+1; Stack[Stack[-1]]=ThingToPush
Stack.append(ThingToPush)
#Stack.Push(ThingToPush)
self.State=GotoState
return 1 # normal successful completion
# some error cases
elif flag == NOMATCHFLAG:
self.ParseError(current,tokenVal, "nomatch1")
#elif FSM.final_state(current):
# raise BadFinalError, 'unexpected final state reached in reduction'
else:
data = Stream.DUMP()
s = """
flag = %s
map = %s """ % (flag, FSM.map(current,token))
data = data + s
raise FlowError, 'unexpected else '+data
#enddef DoOneReduction
# compute the state to goto after a reduction is performed
# on a rule.
# Algorithm: determine the state at beginning of reduction
# and the next state indicated by the head nonterminal of the rule.
# special case: empty stack and root nonterminal > success.
#
def GotoState(self, rule):
FSM = self.FSM
Stack = self.Stack
Head = rule.Nonterm
if len(Stack)==0: #Stack[-1]==0: #Stack.IsEmpty():
BeforeState = FSM.initial_state
else:
BeforeState = Stack[-1][0] #Stack[Stack[-1]][0] #Stack.Top()[0]
# is this right? if the stack is empty and the Head
# is the root nonterm, then goto is final state
if len(Stack)==0 and Head == FSM.root_nonTerminal:#Stack.isEmpty()
Result = FSM.successful_final_state
else:
# consider eliminating the call to .map here? (efficiency)
(flag, Result) = FSM.map(BeforeState, Head)
if flag != MOVETOFLAG:
#FSM.DUMP()
self.ParseError(BeforeState, Head, "notmoveto")
return Result
def ParseError( self, State, Token, *rest):
# make this parse error nicer (add diagnostic methods?)
L = [""]
L.append("*******************************")
L.append("current state = "+`State`)
L.append("expects: ")
expects = ""
for (flag,name) in self.FSM.Expects(State):
if flag in (TERMFLAG, KEYFLAG):
expects = expects + `name`+ ", "
L.append(expects)
L.append(`rest`)
L.append("current token = " + `Token`)
#print "Stack =",
#self.StackDump(5)
#print
from string import join
data = self.LexStream.DUMP() + join(L, "\n")
raise SyntaxError, 'unexpected token sequence.' + data
def StackDump(self, N):
Stack = self.Stack
Topkey = len(Stack)
if Topkey>N:
Start = Topkey - N
else:
Start = 1
for i in range(Start,Topkey+1):
print " :: ", Stack[i],
# execute parsing until done:
def GO(self):
while self.State != self.FSM.successful_final_state:
#self.FSM.final_state(self.State):
self.DoOneReduction()
# should I check that stack has only one elt here?
# return result of last reduction
return self.Stack[-1][1] #self.Stack.Top()[1]
#endclass ParserObj
# function for declaring a variable to represent a nonterminal:
# eg Program = nonterminal("program")
# included for convenient autodocumentation
#
def nonterminal(string):
return (NONTERMFLAG, string)
# declaring a terminal WITHOUT INSTALLING IT IN A LexDict
def termrep(string):
return (TERMFLAG, string)
# the rule class
# a rule is defined by a goal nonterminal marker of form
# (NONTERMFLAG, Name)
# and a list defining the body which must contain elts of form
# (KEYFLAG, Name) or (NONTERMFLAG, Name) of (TERMFLAG, Name)
# and a reduction function which takes a list of the same size
# as the BodyList (consisting of the results of the evaluations of
# the previous reductions)
# and returns an interpretation for the body
# the following function is used as a default reduction function
# for rules
def DefaultReductFun( RuleResultsList, Context ):
if WARNONDEFAULTS:
print "warn: default reduction."
print " ", RuleResultsList
return RuleResultsList
class ParseRule:
def __init__(self, goalNonTerm, BodyList, \
ReductFunction = DefaultReductFun):
#print BodyList
# check some of the arguments (very limited!)
if len(goalNonTerm) != 2 or goalNonTerm[0] != NONTERMFLAG:
raise TypeError, "goal of rule must be nonterminal"
for m in BodyList:
#print m
if len(m) != 2:
raise TypeError, "invalid body form for rule"
self.Nonterm = goalNonTerm
self.Body = BodyList
self.ReductFun = ReductFunction
# for dumping/reconstruction: LOSES THE INTERPRETATION FUNCTION!
def __repr__(self):
return THISMODULE + ".ParseRule" + `self.components()`
# marshal-able components of a rule
def components(self):
return (self.Nonterm, self.Body)
# rule.reduce(Stack) pops of the stack elements corresponding
# to the body of the rule and prepares the appropriate reduction
# object for evaluation (or not) at higher levels
#
def reduce(self, Stack, Context=None):
#print "reducing", Stack
Blength = len(self.Body)
#print Blength, len(self.Body)
# pop off previous results from stack corresponding to body
BodyResults = [None] * Blength
#BodyNames = [None] * Blength # for debug
#print "popping: "
for i in range(1,Blength+1):
Bindex = Blength - i # stack contents pop off in reverse order
# get and destructure the rule body entry
RuleEntry = self.Body[Bindex]
( REkind , REname ) = RuleEntry
# get and destructure the stack entry
PoppedValue = Stack[-i] #Stack.Top()
#print PoppedValue,
#del Stack[-1]# = Stack[-1]-1 #Stack.Pop()
SETokVal = PoppedValue[1]
SEvalue = SETokVal[1]
SEname = SETokVal[0][1]
# the names from rule and stack must match (?)
if SEname != REname:
print SEname, REname
print self
raise ReductError, " token names don't match"
# store the values for the reduction
BodyResults[Bindex] = SEvalue
#BodyNames[Bindex] = SEname # debug
#endfor
del Stack[len(Stack)-Blength:]
#print "reduced", Stack
#print
# evaluate the reduction, in context
reduct = self.ReductFun(BodyResults, Context)
if WARNONDEFAULTS and self.ReductFun is DefaultReductFun:
# should check whether name is defined before this...
print " default used on ", self.Name
#Reduction( self.ReductFun, BodyResults, BodyNames )
return (Stack, reduct)
#enddef ParseRule.reduce
#endclass ParseRule
# for debugging: look through a rule list
# and print names of rules that have default binding
#
def PrintDefaultBindings(rulelist):
for r in rulelist:
if r.ReductFun is DefaultReductFun:
print r.Name
# the FSM class
#
class FSMachine:
def __init__(self, rootNonTerm):
# start and success state conventions
startState=1
successState=0
self.root_nonTerminal = rootNonTerm
self.initial_state = startState
self.successful_final_state = successState
# the list of states of the FSM, implemented as a dictionary
# entries are identified by their index
# content is
# a list whose first elt is either TRANSFLAG, or TERMFLAG
# other list elts may be added by other layers (parse generator)
# indicating the kind of the state.
self.States = {}
# allocate start and success states
self.States[startState]=[TRANSFLAG]
self.States[successState]=[TERMFLAG]
# the most recently allocated state
self.maxState= startState
# the map of current token+state number to next state
#with entries of form (tokenname,state):nextstate_sequence
#
self.StateTokenMap = {}
#enddef FSM()
# ForbiddenMark is for filtering out maps to an error state
def DUMP(self, DumpMapData=1, DumpStateData=1, ForbiddenMark={}):
print "root nonterminal is ", self.root_nonTerminal
print "start at ", self.initial_state
print "end at ", self.successful_final_state
print "number of states: ", self.maxState
if DumpStateData:
print
for State in range(0,self.maxState+1):
Data = self.States[State]
print State, ": ", Data
if DumpMapData:
print
for key in self.StateTokenMap.keys():
map = self.StateTokenMap[key]
if map[0][0] == MOVETOFLAG:
ToStateData = self.States[map[0][1]]
if len(ToStateData) < 2:
Mark = None
else:
Mark = ToStateData[1]
if Mark != ForbiddenMark:
print key, " > ", map, " = ", ToStateData
else:
print key, " > reduction to rule number ", map[0][1]
# what tokens does a state expect?
def Expects(self, State):
keys = self.StateTokenMap.keys()
Tokens = kjSet.NewSet( [] )
for (state1,token) in keys:
if State == state1:
kjSet.addMember(token,Tokens)
return kjSet.get_elts(Tokens)
# "allocate" a new state of specified kind
# kind must either be TRANSFLAG, TERMFLAG or a rule object
# returns the number of the new state
def NewState(self, kind, AdditionalInfo = []):
if not kind in (TRANSFLAG,TERMFLAG,REDUCEFLAG):
raise TypeError, "unknown state kind"
available = self.maxState+1
self.States[available] = [kind] + AdditionalInfo
self.maxState = available
return available
# Install a reduction transition in the FSM:
# a reduction is represented by mapping to a rule index
# no nondeterminism is allowed.
def SetReduction(self, fromState, TokenRep, Rulenum):
key = (fromState, TokenRep)
if not self.StateTokenMap.has_key(key):
self.StateTokenMap[ key ] = ((REDUCEFLAG, Rulenum),)
else:
raise ReductError, "attempt to set ambiguous reduction"
# Install a "shift" or "goto transition in the FSM:
# supports nondeterminism by storing a sequence of possible transitions
#
def SetMap(self, fromState, TokenRep, toState):
key = (fromState, TokenRep)
if self.StateTokenMap.has_key(key):
Old = self.StateTokenMap[key]
if Old[0][0] != MOVETOFLAG:
# if the old value was not an integer, not a "normal state":
# complain:
raise NondetError, \
"attempt to make inappropriate transition ambiguous"
self.StateTokenMap[ key ] = Old + ((MOVETOFLAG,toState),)
else:
self.StateTokenMap[ key ] = ((MOVETOFLAG,toState),)
# Find the action indicated by fsm on
# (current_state, current_token) input.
#
# note: in the event of nondeterministic choice this chooses
# the first possibility listed.
# ParseObj.DoOneReduction() currently uses the internal structure
# of StateTokenMap directly, rather than using this function.
#
def map(self, current_state, current_token):
StateEntry = self.States[current_state][0]
if StateEntry == TERMFLAG:
return (TERMFLAG, 0)
elif StateEntry == TRANSFLAG:
# try to find a transition for this token and state
key = (current_state, current_token)
try:
TMap = self.StateTokenMap[key]
#print "TMap ", TMap
#print "key ", key
#print
return TMap[0]
except KeyError:
return (NOMATCHFLAG, 0)
else:
raise FlowError, "unexpected else (2)"
#enddef map
#endclass FSMachine
# the grammar class:
# a grammar consists of
# - a LexDict lexical dictionary;
# - a deterministic FSMachine;
# - a Rulelist
# and optionally a dictionary that maps Rulenames
# to Rulelist indices (used for dumping and externally)
#
class Grammar:
def __init__(self, LexD, DFA, RuleL, RuleNameDict = None):
# for auto initialization set LexD,DFA,RuleL to None
if LexD == None and DFA == None and RuleL == None:
self.LexD = LexDictionary()
# use a dummy root nonterminal -- must fix elsewhere!
self.DFA = FSMachine("ERROR")
self.RuleL = []
else:
self.LexD = LexD
self.DFA = DFA
self.RuleL = RuleL
if RuleNameDict != None:
self.AddNameDict(RuleNameDict)
self.CleanUp()
#enddef __init__
# look for default bindings
def PrintDefaults(self):
print "Default bindings on:"
PrintDefaultBindings(self.RuleL)
# setting case sensitivity: must happen before keyword installation
# in LexD.
def SetCaseSensitivity( self, Boolean ):
self.LexD.SetCaseSensitivity( Boolean )
# this may be silly, but to save some space in construction
# a token dictionary may be used that facilitates sharing of
# token representations. This method either initializes
# the dictionary or disposes of it if it exists
def CleanUp(self):
self.IndexToToken = {}
# this dictionary is used by automatically
# generated grammars to determine whether
# a string represents a nonterminal
self.NonTermDict = {}
# similarly for terminals
self.TermDict = {}
# this string may be used to keep a printable
# representation of the rules of the grammar
# (usually in automatic grammar generation
self.RuleString = ""
# to associate a token to an integer use
# self.IndexToToken[int] = tokenrep
# this method associates rules to names using a
# RuleNameDict dictionary which maps names to rule indices.
# after invocation
# self.RuleNameToIndex[ name ] gives the index
# in self.RuleL for the rule associated with name, and
# self.RuleL[index].Name gives the name associated
# with the rule self.RuleL[index]
#
def AddNameDict(self, RuleNameDict):
self.RuleNameToIndex = RuleNameDict
# add a Name attribute to the rules of the rule list
for ruleName in RuleNameDict.keys():
index = RuleNameDict[ ruleName ]
self.RuleL[ index ].Name = ruleName
# parse a string using the grammar, return result and context
def DoParse( self, String, Context = None, DoReductions = 1 ):
# construct the ParserObj
Stream = LexStringWalker( String, self.LexD )
Stack = [] # {-1:0} #Walkers.SimpleStack()
ParseOb = ParserObj( self.RuleL, Stream, self.DFA, Stack, \
DoReductions, Context )
# do the parse
ParseResult = ParseOb.GO()
# return final result of reduction and the context
return (ParseResult[1], Context)
#enddef DoParse
# parse a string using the grammar, but only return
# the result of the last reduction, without the context
def DoParse1( self, String, Context=None, DoReductions=1 ):
return self.DoParse(String, Context, DoReductions)[0]
# if the Name dictionary has been initialized
# this method will (re)bind a reduction function to
# a rule associated with Rulename
#
def Bind( self, Rulename, NewFunction ):
ruleindex = self.RuleNameToIndex[ Rulename ]
rule = self.RuleL[ ruleindex ]
rule.ReductFun = NewFunction
#enddef Bind
# bind a terminal to a regular expression and interp function
# in the lexical dictionary (convenience)
def Addterm( self, termname, regexpstr, funct ):
self.TermDict[ termname ] =\
self.LexD.terminal( termname, regexpstr, funct )
#endclass Grammar
# function to create a "null grammar"
def NullGrammar():
return Grammar(None,None,None,{})
# unmarshalling a marshalled grammar created by
# buildmodule.CGrammar.MarshalDump(Tofile)
# tightly coupled with buildmodule code...
# file should be open and "pointing to" the marshalled rep.
#
# warning: doesn't bind semantics!
#
def UnMarshalGram(file):
Grammar = NullGrammar()
UnMarshal = UnMarshaller(file, Grammar)
UnMarshal.MakeLex()
UnMarshal.MakeRules()
UnMarshal.MakeTransitions()
UnMarshal.Cleanup()
return UnMarshal.Gram
# unmarshalling object for unmarshalling grammar from a file
#
class UnMarshaller:
def __init__(self, file, Grammar):
import marshal
self.Gram = Grammar
BigList = marshal.load(file)
if type(BigList) != type([]):
raise FlowError, "bad type for unmarshalled list"
if len(BigList) != 9:
raise FlowError, "unmarshalled list of wrong size"
self.tokens = BigList[0]
self.punct = BigList[1]
self.comments = BigList[2]
self.RuleTups = BigList[3]
self.MaxStates = BigList[4]
self.reducts = BigList[5]
self.moveTos = BigList[6]
self.Root = BigList[7]
self.CaseSensitivity = BigList[8]
Grammar.SetCaseSensitivity( self.CaseSensitivity )
def MakeLex(self):
Grammar=self.Gram
LexD = Grammar.LexD
# punctuations
LexD.punctuationlist = self.punct
# comments
for commentregex in self.comments:
LexD.comment(commentregex)
#LexD.commentstring = self.comments
# keywords, terminals, nonterms
# rewrite the tokens list for sharing and extra safety
LexTokens = {}
tokens = self.tokens
for tokenindex in range(len(tokens)):
(kind,name) = tokens[tokenindex]
if kind == KEYFLAG:
tokens[tokenindex] = LexD.keyword(name)
elif not kind in [TERMFLAG, NONTERMFLAG]:
raise FlowError, "unknown token type"
# not needed
self.tokens = tokens
def MakeRules(self):
Grammar = self.Gram
Grammar.DFA.root_nonTerminal = self.Root
NameIndex = Grammar.RuleNameToIndex
RuleTuples = self.RuleTups
nRules = len(RuleTuples)
RuleList = [None] * nRules
for index in range(nRules):
(Name, Components) = RuleTuples[index]
rule = apply(ParseRule, Components)
rule.Name = Name
RuleList[index] = rule
NameIndex[Name] = index
Grammar.RuleL = RuleList
def MakeTransitions(self):
Grammar = self.Gram
DFA = Grammar.DFA
StateTokenMap = DFA.StateTokenMap
tokens = self.tokens
# record the state number
DFA.maxState = self.MaxStates
# this is historical, unfortunately... CLEAN IT UP SOMEDAY!
# THE DFA.States DICT IS NOT NEEDED (?) (here)
for state in range(1, self.MaxStates+1):
DFA.States[state] = [TRANSFLAG]
# record the reductions
for (fromState, TokenIndex, rulenum) in self.reducts:
DFA.SetReduction(fromState, tokens[TokenIndex], rulenum)
# record the transitions
for (fromState, TokenIndex, ToState) in self.moveTos:
DFA.SetMap(fromState, tokens[TokenIndex], ToState)
def Cleanup(self):
Grammar = self.Gram
Grammar.CleanUp()
################# FOLLOWING CODE IS FOR REGRESSION TESTING ONLY
################# DELETE IT IF YOU WANT/NEED
#### All tests for this module deleted, since
#### ParseBuild module tests are sufficient. | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/gadfly/kjParser.py | kjParser.py |
### needs more thorough testing!
#import sys # for debug
def kjtabletest(x):
#print "kjtabletest"
try:
return x.is_kjtable
except:
return 0
unhashable = "unhashable key error"
class kjGraph:
is_kjtable = 1
def __init__(self, *args):
#print "kjGraph.__init__", args
key_to_list = self.key_to_list = {}
self.dirty = 0
self.hashed = None
#print args
if args:
if len(args)>1:
raise ValueError, "only 1 or 0 argument supported"
from types import IntType, ListType, TupleType
arg = args[0]
targ = type(arg)
test = key_to_list.has_key
if type(arg) is IntType:
return # ignore int initializer (presize not implemented)
elif type(arg) is ListType or type(arg) is TupleType:
for (x,y) in arg:
if test(x):
key_to_list[x].append(y)
else:
key_to_list[x] = [y]
return
aclass = arg.__class__
if aclass is kjGraph:
aktl = arg.key_to_list
for k in aktl.keys():
key_to_list[k] = aktl[k][:]
return
if aclass is kjDict or aclass is kjSet:
adict = arg.dict
for k in adict.keys():
key_to_list[k] = [ adict[k] ]
return
raise ValueError, "arg for kjGraph must be tuple, list, or kjTable"
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.items())
def _setitems(self, thing):
#print "kjGraph._setitem", thing
#print "setitems", thing
if self.hashed is not None:
raise ValueError, "table has been hashed, it is immutable"
try:
for (k,v) in thing:
#print k,v, "going"
#inlined __setitem__
try:
klist = self.key_to_list[k]
#print "klist gotten"
except KeyError:
try:
klist = self.key_to_list[k] = []
except TypeError:
raise unhashable
if v not in klist:
klist.append(v)
except (TypeError, KeyError):
#print sys.exc_type, sys.exc_value
if kjtabletest(thing):
self._setitems(thing._pairs())
self.dirty = thing.dirty
else: raise ValueError, "cannot setitems with %s" % type(thing)
except unhashable:
raise TypeError, "unhashable type"
def __setitem__(self, item, value):
ktl = self.key_to_list
if ktl.has_key(item):
l = ktl[item]
if value not in l:
l.append(value)
else:
ktl[item] = [value]
def __getitem__(self, item):
return self.key_to_list[item][0]
def __delitem__(self, item):
self.dirty = 1
del self.key_to_list[item]
def choose_key(self):
return self.key_to_list.keys()[0]
def _pairs(self, justtot=0):
myitems = self.key_to_list.items()
tot = 0
for (k, v) in myitems:
tot = tot + len(v)
if justtot: return tot
else:
result = [None]*tot
i = 0
for (k,v) in myitems:
for x in v:
result[i] = (k,x)
i = i+1
return result
def __len__(self):
v = self.key_to_list.values()
lv = map(len, v)
from operator import add
return reduce(add, lv, 0)
def items(self):
return self._pairs()
def values(self):
v = self.key_to_list.values()
from operator import add
tot = reduce(add, map(len, v), 0)
result = [None] * tot
count = 0
for l in v:
next = count + len(l)
result[count:next] = l
count = next
return result
def keys(self):
return self.key_to_list.keys()
def member(self, k, v):
ktl = self.key_to_list
if ktl.has_key(k):
return v in ktl[k]
return 0
_member = member # because member redefined for kjSet
def add(self, k, v):
ktl = self.key_to_list
if ktl.has_key(k):
l = ktl[k]
if v not in l:
l.append(v)
else:
ktl[k] = [v]
def delete_arc(self, k, v):
self.dirty = 1
if self.hashed is not None:
raise ValueError, "table has been hashed, it is immutable"
try:
l = self.key_to_list[k]
i = l.index(v)
del l[i]
if not l:
del self.key_to_list[k]
except:
raise KeyError, "not in table"# % (k,v)
def has_key(self, k):
return self.key_to_list.has_key(k)
def subset(self, other):
oc = other.__class__
if oc is kjGraph:
oktl = other.key_to_list
sktl = self.key_to_list
otest = oktl.has_key
for k in sktl.keys():
if otest(k):
l = sktl[k]
ol = oktl[k]
for x in l:
if x not in ol:
return 0
else:
return 0
return 1
elif oc is kjSet or oc is kjDict:
sktl = self.key_to_list
odict = other.dict
otest = odict.has_key
for k in sktl.keys():
if otest(k):
l = sktl[k]
ov = odict[k]
for x in l:
if ov!=x: return 0
else:
return 0
return 1
def neighbors(self, k):
try:
return self.key_to_list[k][:]
except:
return []
def reachable(self, k):
try:
horizon = self.key_to_list[k]
except:
return kjSet()
else:
if not horizon: return []
d = {}
for x in horizon: d[x] = 1
done = 0
while horizon:
newhorizon = []
for n in horizon:
for n2 in self.neighbors(n):
if not d.has_key(n2):
newhorizon.append(n2)
d[n2] = 1
horizon = newhorizon
return kjSet(d.keys())
def items(self):
return self._pairs()
# ????
def ident(self):
result = kjDict(self)
result.dirty = self.dirty or result.dirty
return result
def tclosure(self):
# quick and dirty
try:
raise self
except (kjSet, kjDict):
raise ValueError, "tclosure only defined on graphs"
except kjGraph:
pass
except:
raise ValueError, "tclosure only defined on graphs"
result = kjGraph(self)
result.dirty = self.dirty
addit = result.add
while 1:
#print result
more = result*result
if more.subset(result):
return result
for (x,y) in more.items():
addit(x,y)
def Clean(self):
if self.dirty: return None
return self
def Wash(self):
self.dirty = 0
def Soil(self):
self.dirty = 1
def remap(self, X):
# really only should be defined for kjdict, but whatever
return kjDict(X*self).Clean()
def dump(self, seq):
result = map(None, seq)
for i in range(len(result)):
result[i] = self[result[i]]
if len(seq) == 1:
return result[0]
return tuple(result)
def __hash__(self): # should test better
"""in conformance with kjbuckets, permit unhashable keys"""
if self.hashed is not None:
return self.hashed
items = self._pairs()
for i in xrange(len(items)):
(a,b) = items[i]
try:
b = hash(b)
except:
b = 1877777
items[i] = hash(a)^~b
items.sort()
result = self.hashed = hash(tuple(items))
return result
def __cmp__(self, other):
#print "kjGraph.__cmp__"
ls = len(self)
lo = len(other)
test = cmp(ls, lo)
if test:
return test
si = self._pairs()
oi = other._pairs()
si.sort()
oi.sort()
return cmp(si, oi)
def __nonzero__(self):
if self.key_to_list: return 1
return 0
def __add__(self, other):
result = kjGraph(self)
rktl = result.key_to_list
rtest = rktl.has_key
result.dirty = self.dirty or other.dirty
oc = other.__class__
if oc is kjGraph:
oktl = other.key_to_list
for k in oktl.keys():
l = oktl[k]
if rtest(k):
rl = rktl[k]
for x in l:
if x not in rl:
rl.append(x)
else:
rktl[k] = l[:]
elif oc is kjSet or oc is kjDict:
odict = other.dict
for k in odict.keys():
ov = odict[k]
if rtest(k):
rl = rktl[k]
if ov not in rl:
rl.append(ov)
else:
rktl[k] = [ov]
else:
raise ValueError, "kjGraph adds only with kjTable"
return result
__or__ = __add__
def __sub__(self, other):
result = kjGraph()
rktl = result.key_to_list
sktl = self.key_to_list
oc = other.__class__
if oc is kjGraph:
oktl = other.key_to_list
otest = oktl.has_key
for k in sktl.keys():
l = sktl[k][:]
if otest(k):
ol = oktl[k]
for x in ol:
if x in l:
l.remove(x)
if l:
rktl[k] = l
else:
rktl[k] = l
elif oc is kjSet or oc is kjDict:
odict = other.dict
otest = odict.has_key
for k in sktl.keys():
l = sktl[k][:]
if otest(k):
ov = odict[k]
if ov in l:
l.remove(ov)
if l:
rktl[k] = l
else:
raise ValueError, "kjGraph diffs only with kjTable"
return result
def __mul__(self, other):
result = kjGraph()
rktl = result.key_to_list
sktl = self.key_to_list
oc = other.__class__
if oc is kjGraph:
oktl = other.key_to_list
otest = other.has_key
for sk in sktl.keys():
sklist = []
for sv in sktl[sk]:
if otest(sv):
sklist[0:0] = oktl[sv]
if sklist:
rktl[sk] = sklist
elif oc is kjSet or oc is kjDict:
odict = other.dict
otest = odict.has_key
for sk in sktl.keys():
sklist=[]
for sv in sktl[sk]:
if otest(sv):
sklist.append(odict[sv])
if sklist:
rktl[sk] = sklist
else:
raise ValueError, "kjGraph composes only with kjTable"
return result
def __invert__(self):
result = self.__class__()
pairs = self._pairs()
for i in xrange(len(pairs)):
(k,v) = pairs[i]
pairs[i] = (v,k)
result._setitems(pairs)
result.dirty = self.dirty or result.dirty
return result
def __and__(self, other):
sktl = self.key_to_list
oc = other.__class__
if oc is kjGraph:
result = kjGraph()
rktl = result.key_to_list
oktl = other.key_to_list
otest = oktl.has_key
for k in self.keys():
if otest(k):
l = sktl[k]
ol = oktl[k]
rl = []
for x in l:
if x in ol:
rl.append(x)
if rl:
rktl[k] = rl
elif oc is kjSet or oc is kjDict:
result = oc() # less general!
rdict = result.dict
odict = other.dict
stest = sktl.has_key
for k in odict.keys():
if stest(k):
v = odict[k]
l = sktl[k]
if v in l:
rdict[k] = v
else:
raise ValueError, "kjGraph intersects only with kjTable"
result.dirty = self.dirty or other.dirty
return result
def __coerce__(self, other):
return (self, other) # ?is this sufficient?
class kjDict(kjGraph):
def __init__(self, *args):
#print "kjDict.__init__", args
self.hashed = None
dict = self.dict = {}
self.dirty = 0
if not args: return
if len(args)==1:
from types import TupleType, ListType, IntType
arg0 = args[0]
targ0 = type(arg0)
if targ0 is IntType: return
if targ0 is ListType or targ0 is TupleType:
otest = dict.has_key
for (a,b) in arg0:
if otest(a):
if dict[a]!=b:
self.dirty = 1
dict[a] = b
return
argc = arg0.__class__
if argc is kjGraph:
ktl = arg0.key_to_list
for k in ktl.keys():
l = ktl[k]
if len(l)>1: self.dirty=1
for v in l:
dict[k] = v
return
if argc is kjSet or argc is kjDict:
adict = arg0.dict
for (k,v) in adict.items():
dict[k]=v
return
raise ValueError, "kjDict initializes only from list, tuple, kjTable, or int"
def _setitems(self, thing):
#print "kjDict._setitem", thing
if self.hashed is not None:
raise KeyError, "table hashed, cannot modify"
dict = self.dict
try:
for (k,v) in thing:
if dict.has_key(k) and dict[k]!=v:
self.dirty = 1
dict[k] = v
except:
self._setitems(thing._pairs()) # maybe too tricky!
def dump(self, dumper):
ld = len(dumper)
if ld==1:
return self.dict[dumper[0]]
else:
sdict = self.dict
result = [None] * ld
for i in xrange(ld):
result[i] = sdict[ dumper[i] ]
return tuple(result)
def __setitem__(self, item, value):
if self.hashed is not None:
raise ValueError, "table has been hashed, it is immutable"
d = self.dict
if d.has_key(item):
if d[item]!=value:
self.dirty = 1
self.dict[item]=value
def __getitem__(self, item):
return self.dict[item]
def __delitem__(self, item):
if self.hashed is not None:
raise ValueError, "table has been hashed, it is immutable"
self.dirty = 1
del self.dict[item]
def choose_key(self):
return self.dict.keys()[0]
def __len__(self):
return len(self.dict)
def _pairs(self, justtot=0):
if justtot: return len(self.dict)
return self.dict.items()
def values(self):
return self.dict.values()
def keys(self):
return self.dict.keys()
def items(self):
return self.dict.items()
def remap(self, X):
if X.__class__ is kjGraph:
if self.dirty or X.dirty: return None
result = kjDict()
resultd = result.dict
selfd = self.dict
inself = selfd.has_key
inresult = resultd.has_key
ktl = X.key_to_list
for k in ktl.keys():
for v in ktl[k]:
if inself(v):
map = selfd[v]
if inresult(k):
if resultd[k]!=map:
return None
else:
resultd[k]=map
return result
else:
return (kjDict(X*self)).Clean()
def __cmp__(s,o):
from types import InstanceType
if type(o) is not InstanceType:
return -1
oc = o.__class__
if oc is kjDict or oc is kjSet:
return cmp(s.dict, o.dict)
return kjGraph.__cmp__(s, o)
def __hash__(s):
h = s.hashed
if h is not None: return h
return kjGraph.__hash__(s)
def __add__(s,o):
oc = o.__class__
if oc is kjDict or oc is kjSet:
result = kjDict()
result.dirty = s.dirty or o.dirty
rdict = result.dict
rtest = result.has_key
sdict = s.dict
for k in sdict.keys():
rdict[k] = sdict[k]
odict = o.dict
for k in odict.keys():
if rtest(k):
if rdict[k]!=odict[k]:
result.dirty=1
else:
rdict[k] = odict[k]
return result
if oc is kjGraph:
return kjGraph.__add__(o,s)
else:
raise ValueError, "kjDict unions only with kjTable"
__or__ = __add__
def __and__(s,o):
oc = o.__class__
if oc is kjDict or oc is kjSet:
result = oc()
result.dirty = s.dirty or o.dirty
rdict = result.dict
odict = o.dict
sdict = s.dict
stest = sdict.has_key
for k in odict.keys():
v = odict[k]
if stest(k) and sdict[k]==v:
rdict[k] = v
return result
elif oc is kjGraph:
return kjGraph.__and__(o,s)
def __sub__(s,o):
oc = o.__class__
result = kjDict()
result.dirty = s.dirty or o.dirty
sdict = s.dict
rdict = result.dict
if oc is kjDict:
odict = o.dict
otest = odict.has_key
for k in sdict.keys():
v = sdict[k]
if otest(k):
if odict[k]!=v:
rdict[k] = v
else:
rdict[k] = v
return result
if oc is kjGraph:
oktl = o.key_to_list
otest = oktl.has_key
for k in sdict.keys():
v = sdict[k]
if otest(k):
if v not in oktl[k]:
rdict[k] = v
else:
rdict[k] = v
return result
raise ValueError, "kjDict only diffs with kjGraph, kjDict"
def __mul__(s,o):
oc = o.__class__
sdict = s.dict
if oc is kjDict or oc is kjSet:
result = kjDict()
result.dirty = s.dirty or o.dirty
rdict = result.dict
odict = o.dict
otest = odict.has_key
for k in sdict.keys():
kv = sdict[k]
if otest(kv):
rdict[k] = odict[kv]
return result
elif oc is kjGraph:
return kjGraph(s) * o
else:
raise ValueError, "kjDict only composes with kjTable"
def member(self, k, v):
d = self.dict
try:
return d[k] == v
except:
return 0
_member = member
def delete_arc(self, k, v):
if self.dict[k] == v:
del self.dict[k]
else:
raise KeyError, "pair not in table"
def has_key(self, k):
return self.dict.has_key(k)
def neighbors(self, k):
try:
return [ self.dict[k] ]
except: return []
def reachable(self, k):
result = {}
d = self.dict
try:
while 1:
next = d[k]
if result.has_key(next): break
result[next] = 1
k = next
except KeyError:
pass
return kjSet(result.keys())
def __invert__(self):
result = kjDict()
dr = result.dict
drtest = dr.has_key
ds = self.dict
for (a,b) in ds.items():
if drtest(b):
result.dirty=1
dr[b]=a
result.dirty = self.dirty or result.dirty
return result
def __nonzero__(self):
if self.dict: return 1
return 0
def subset(s, o):
oc = o.__class__
sdict = s.dict
if oc is kjDict or oc is kjSet:
odict = o.dict
otest = odict.has_key
for k in sdict.keys():
v = sdict[k]
if otest(k):
if odict[k]!=v:
return 0
else:
return 0
elif oc is kjGraph:
oktl = o.key_to_list
otest = oktl.has_key
for k in sdict.keys():
v = sdict[k]
if otest(k):
if v not in oktl[k]:
return 0
else:
return 0
else:
raise ValueError, "kjDict subset test only for kjTable"
return 1
def add(s, k, v):
if s.hashed is not None:
raise ValueError, "table has been hashed, immutable"
sdict = s.dict
if sdict.has_key(k):
if sdict[k]!=v:
s.dirty = 1
sdict[k] = v
class kjSet(kjDict):
def __init__(self, *args):
#print "kjSet.__init__", args
# usual cases first
dict = self.dict = {}
self.hashed = None
self.dirty = 0
largs = len(args)
if largs<1: return
if largs>1:
raise ValueError, "at most one argument supported"
from types import IntType, TupleType, ListType
arg0 = args[0]
targ0 = type(arg0)
if targ0 is IntType: return
if targ0 is TupleType or targ0 is ListType:
for x in arg0:
dict[x] = x
return
argc = arg0.__class__
if argc is kjDict or argc is kjSet:
stuff = arg0.dict.keys()
elif argc is kjGraph:
stuff = arg0.key_to_list.keys()
else:
raise ValueError, "kjSet from kjTable, int, list, tuple only"
for x in stuff:
dict[x] = x
def __add__(s,o):
oc = o.__class__
if oc is kjSet:
result = kjSet()
result.dirty = s.dirty or o.dirty
rdict = result.dict
for x in s.dict.keys():
rdict[x]=x
for x in o.dict.keys():
rdict[x]=x
return result
elif oc is kjDict:
return kjDict.__add__(o,s)
elif oc is kjGraph:
return kjGraph.__add__(o,s)
__or__ = __add__
def __sub__(s,o):
if o.__class__ is kjSet:
result = kjSet()
result.dirty = s.dirty or o.dirty
rdict = result.dict
otest = o.dict.has_key
for x in s.dict.keys():
if not otest(x):
rdict[x] = x
return result
else:
return kjDict.__sub__(s,o)
def __and__(s,o):
oc = o.__class__
if oc is kjSet or oc is kjDict:
result = kjSet()
result.dirty = s.dirty or o.dirty
rdict = result.dict
odict = o.dict
otest = odict.has_key
for x in s.dict.keys():
if otest(x) and odict[x]==x:
rdict[x] = x
return result
elif oc is kjGraph:
return kjGraph.__and__(o,s)
raise ValueError, "kjSet only intersects with kjTable"
# illegal methods
values = keys = remap = None
def __repr__(self):
return "kjSet(%s)" % self.items()
def _setelts(self, items):
#print "kjSet.setelts", items
try:
items = items._pairs()
except:
items = list(items)
for i in xrange(len(items)):
items[i] = (items[i], items[i])
self._setitems(items)
else:
items = list(items)
for i in xrange(len(items)):
items[i] = (items[i][0], items[i][0])
self._setitems(items)
# hack!
#D = self.dict
#for x in D.keys():
# D[x] = x
def _pairs(self, justtot=0):
if justtot: return kjDict._pairs(self, justtot=1)
pairs = kjDict.keys(self)
for i in xrange(len(pairs)):
pairs[i] = (pairs[i], pairs[i])
return pairs
member = kjDict.has_key
items = kjDict.keys
#def neighbors(self, x):
# raise ValueError, "operation on kjSet undefined"
#reachable = neighbors
def __getitem__(self, item):
test = self.dict.has_key(item)
if test: return 1
raise KeyError, "item not in set"
def __setitem__(self, item, ignore):
d = self.dict
if self.hashed:
raise ValueError, "table hashed, immutable"
d[item] = item
def add(self, elt):
if self.hashed:
raise ValueError, "table hashed, immutable"
self.dict[elt] = elt
def __mul__(s,o):
oc = o.__class__
if oc is kjSet:
return s.__and__(o)
else:
return kjDict.__mul__(s, o)
def more_general(t1, t2):
try:
raise t1
except kjSet:
try:
raise t2
except (kjGraph, kjDict, kjSet):
return t2.__class__
except kjDict:
try:
raise t2
except kjSet:
return t1.__class__
except (kjDict, kjGraph):
return t2.__class__
except kjGraph:
return t1.__class__
except:
raise ValueError, "cannot coerce, not kjtable"
def less_general(t1,t2):
try:
raise t1
except kjSet:
return t1.__class__
except kjDict:
try:
raise t2
except kjSet:
return t2.__class__
except (kjDict, kjGraph):
return t1.__class__
except kjGraph:
return t2.__class__
except:
raise ValueError, "cannot coerce, not kjtable"
def kjUndump(t1, t2):
result = kjDict()
rdict = result.dict
lt1 = len(t1)
if lt1 == 1:
rdict[t1[0]] = t2
else:
# tightly bound to implementation
for i in xrange(lt1):
rdict[t1[i]] = t2[i]
return result
def test():
global S, D, G
G = kjGraph()
r3 = range(3)
r = map(None, r3, r3)
for i in range(3):
G[i] = i+1
D = kjDict(G)
D[9]=0
G[0]=10
S = kjSet(G)
S[-1] = 5
print "%s.remap(%s) = %s" % (D, G, D.remap(G))
print "init test"
for X in (S, D, G, r, tuple(r), 1):
print "ARG", X
for C in (kjGraph, kjSet, kjDict):
print "AS", C
T = C(X)
T2 = C()
print X, T, T2
ALL = (S, D, G)
for X in ALL:
print "X=", X
print "key", X.choose_key()
print "len", len(X)
print "items", X.items()
print X, "Clean before", X.Clean()
del X[2]
print X, "Clean after", X.Clean()
if not X.subset(X):
raise "trivial subset fails", X
if not X==X:
raise "trivial cmp fails", X
if not X:
raise "nonzero fails", X
if X is S:
if not S.member(0):
raise "huh 1?"
if S.member(123):
raise "huh 2?", S
S.add(999)
del S[1]
if not S.has_key(999):
raise "huh 3?", S
else:
print "values", X.values()
print "keys", X.keys()
print X, "inverted", ~X
if not X.member(0,1):
raise "member test fails (0,1)", X
print "adding to", X
X.add(999,888)
print "added", X
X.delete_arc(999,888)
print "deleted", X
if X.member(999,888):
raise "member test fails (999,888)", X
if X.has_key(999):
raise "has_key fails 999", X
if not X.has_key(0):
raise "has_key fails 0", X
for Y in ALL:
print "Y", Y
if (X!=S and Y!=S):
print "diff", X, Y
print "%s-%s=%s" % (X,Y,X-Y)
elif X==S:
D = kjSet(Y)
print "diff", X, D
print "%s-%s=%s" % (X,D,X-D)
print "%s+%s=%s" % (X,Y,X+Y)
print "%s&%s=%s" % (X,Y,X&Y)
print "%s*%s=%s" % (X,Y,X*Y)
x,y = cmp(X,Y), cmp(Y,X)
if x!=-y: raise "bad cmp!", (X, Y)
print "cmp(X,Y), -cmp(Y,X)", x,-y
print "X.subset(Y)", X.subset(Y) | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/gadfly/kjbuckets0.py | kjbuckets0.py |
def NewSet(Sequence):
Result = {}
for Elt in Sequence:
Result[Elt] = 1
return Result
def Empty(Set):
if Set == {}:
return 1
else:
return 0
def get_elts(Set):
return Set.keys()
def member(Elt,Set):
return Set.has_key(Elt)
# in place mutators:
# returns if no change otherwise 1
def addMember(Elt,Set):
change = 0
if not Set.has_key(Elt):
Set[Elt] = 1
change = 1
return change
def Augment(Set, OtherSet):
change = 0
for Elt in OtherSet.keys():
if not Set.has_key(Elt):
Set[Elt] = 1
change = 1
return change
def Mask(Set, OtherSet):
change = 0
for Elt in OtherSet.keys():
if Set.has_key(Elt):
del Set[Elt]
change = 1
return change
# side effect free functions
def Intersection(Set1, Set2):
Result = {}
for Elt in Set1.keys():
if Set2.has_key(Elt):
Result[Elt] = 1
return Result
def Difference(Set1, Set2):
Result = {}
for Elt in Set1.keys():
if not Set2.has_key(Elt):
Result[Elt] = 1
return Result
def Union(Set1,Set2):
Result = {}
Augment(Result,Set1)
Augment(Result,Set2)
return Result
def Subset(Set1,Set2):
Result = 1
for Elt in Set1.keys():
if not Set2.has_key(Elt):
Result = 0
return Result # nonlocal
return Result
def Same(Set1,Set2):
if Subset(Set1,Set2) and Subset(Set2,Set1):
return 1
else:
return 0
# directed graphs as Dictionaries of Sets
# also only works for immutable nodes
def NewDG(pairlist):
Result = {}
for (source,dest) in pairlist:
AddArc(Result, source, dest)
return Result
def GetPairs(Graph):
result = []
Sources = Graph.keys()
for S in Sources:
Dests = get_elts( Graph[S] )
ThesePairs = [None] * len(Dests)
for i in range(0,len(Dests)):
D = Dests[i]
ThesePairs[i] = (S, D)
result = result + ThesePairs
return result
def AddArc(Graph, Source, Dest):
change = 0
if Graph.has_key(Source):
Adjacent = Graph[Source]
if not member(Dest,Adjacent):
addMember(Dest,Adjacent)
change = 1
else:
Graph[Source] = NewSet( [ Dest ] )
change = 1
return change
def Neighbors(Graph,Source):
if Graph.has_key(Source):
return get_elts(Graph[Source])
else:
return []
def HasArc(Graph, Source, Dest):
result = 0
if Graph.has_key(Source) and member(Dest, Graph[Source]):
result = 1
return result
def Sources(Graph):
return Graph.keys()
# when G1, G2 and G3 are different graphs this results in
# G1 = G1 U ( G2 o G3 )
# If G1 is identical to one of G2,G3 the result is somewhat
# nondeterministic (depends on dictionary implementation).
# However, guaranteed that AddComposition(G,G,G) returns
# G1 U (G1 o G1) <= G <= TC(G1)
# where G1 is G's original value and TC(G1) is its transitive closure
# hence this function can be used for brute force transitive closure
#
def AddComposition(G1, G2, G3):
change = 0
for G2Source in Sources(G2):
for Middle in Neighbors(G2,G2Source):
for G3Dest in Neighbors(G3, Middle):
if not HasArc(G1, G2Source, G3Dest):
change = 1
AddArc(G1, G2Source, G3Dest)
return change
# in place transitive closure of a graph
def TransClose(Graph):
change = AddComposition(Graph, Graph, Graph)
somechange = change
while change:
change = AddComposition(Graph, Graph, Graph)
if not somechange:
somechange = change
return somechange
########### SQueue stuff
#
# A GrabBag should be used to hold objects temporarily for future
# use. You can put things in and take them out, with autodelete
# that's all!
# make a new baggy with nothing in it
# BG[0] is insert cursor BG[1] is delete cursor, others are elts
#
OLD = 1
NEW = 0
START = 2
def NewBG():
B = [None]*8 #default size
B[OLD] = START
B[NEW] = START
return B
def BGempty(B):
# other ops must maintain this: old == new iff empty
return B[OLD] == B[NEW]
# may return new, larger structure
# must be used with assignment... B = BGadd(e,B)
def BGadd(elt, B):
cursor = B[NEW]
oldlen = len(B)
# look for an available position
while B[cursor] != None:
cursor = cursor+1
if cursor >= oldlen: cursor = START
if cursor == B[NEW]: #back to beginning
break
# resize if wrapped
if B[cursor] != None:
B = B + [None] * oldlen
cursor = oldlen
B[OLD] = START
if B[cursor] != None:
raise IndexError, "can't insert?"
# add the elt
B[cursor] = (elt,)
B[NEW] = cursor
# B nonempty so OLD and NEW should differ.
if B[OLD] == cursor:
B[NEW] = cursor + 1
if B[NEW]<=len(B): B[NEW] = START
return B
def BGgetdel(B):
# find something to delete:
cursor = B[OLD]
blen = len(B)
while B[cursor]==None:
cursor = cursor+1
if cursor>=blen: cursor = START
if cursor == B[OLD]: break # wrapped
if B[cursor] == None:
raise IndexError, "delete from empty grabbag(?)"
# test to see if bag is empty (position cursor2 at nonempty slot)
cursor2 = cursor+1
if cursor2>=blen: cursor2 = START
while B[cursor2]==None:
cursor2 = cursor2+1
if cursor2>=blen: cursor2 = START
# since B[cursor] not yet deleted while will terminate
# get and delete the elt
(result,) = B[cursor]
B[cursor] = None
# cursor == cursor2 iff bag is empty
B[OLD] = cursor2
if B[NEW] == cursor2: B[NEW] = cursor
return result
def BGtest(n):
B = NewBG()
rn = range(n)
rn2 = range(n-2)
for i in rn:
for j in rn:
B = BGadd( (i,j), B)
B = BGadd( (j,i), B)
x = BGgetdel(B)
for j in rn2:
y = BGgetdel(B)
print (i, x, y)
return B | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/gadfly/kjSet.py | kjSet.py |
from select import select
# responses
SUCCESS = "SUCCESS"
EXCEPTION = "EXCEPTION"
def reply_exception(exception, info, socket):
"""send an exception back to the client"""
# any error is invisible to client
from gfserve import ServerError
import sys
try:
reply( (EXCEPTION, (exception, info)), socket)
except:
#info = "%s %s" % (sys.exc_type, sys.exc_value)
socket.close()
#raise ServerError, "reply_exception failed: "+`info`
def reply_success(data, socket):
"""report success with data back to client"""
reply( (SUCCESS, data), socket)
def reply(data, socket):
from marshal import dumps
marshaldata = dumps(data)
send_packet(socket, marshaldata)
socket.close()
def send_packet(socket, data):
"""blast out a length marked packet"""
send_len(data, socket)
socket.send(data)
def send_len(data, socket):
"""send length of data as cr terminated int rep"""
info = `len(data)`+"\n"
socket.send(info)
def send_certified_action(actor_name, action, arguments, password, socket):
from marshal import dumps
marshaldata = dumps( (action, arguments) )
cert = certificate(marshaldata, password)
#print actor_name, cert, marshaldata
marshaldata = dumps( (actor_name, cert, marshaldata) )
send_packet(socket, marshaldata)
def unpack_certified_data(data):
from marshal import loads
# sanity check
unpack = (actor_name, certificate, marshaldata) = loads(data)
return unpack
def recv_data(socket, timeout=10):
"""receive data or time out"""
from time import time
endtime = time() + timeout
reader = Packet_Reader(socket)
done = 0
while not done:
timeout = endtime - time()
if timeout<0:
raise IOError, "socket time out (1)"
(readable, dummy, error) = select([socket], [], [socket], timeout)
if error:
raise IOError, "socket in error state"
if not readable:
raise IOError, "socket time out (2)"
reader.poll()
done = (reader.mode==READY)
return reader.data
def interpret_response(data):
"""interpret response data, raise exception if needed"""
from marshal import loads
(indicator, data) = loads(data)
if indicator==SUCCESS:
return data
elif indicator==EXCEPTION:
# ???
raise EXCEPTION, data
else:
raise ValueError, "unknown indicator: "+`indicator`
# packet reader modes
LEN = "LEN"
DATA = "DATA"
READY = "READY"
ERROR = "ERROR"
BLOCK_SIZE = 4028
LEN_LIMIT = BLOCK_SIZE * 10
class Packet_Reader:
"""nonblocking pseudo-packet reader."""
# packets come in as decimal_len\ndata
# (note: cr! not crlf)
# kick too large requests if set
limit_len = LEN_LIMIT
def __init__(self, socket):
self.socket = socket
self.length = None
self.length_remaining = None
self.len_list = []
self.data_list = []
self.received = ""
self.data = None
self.mode = LEN
def __len__(self):
if self.mode is LEN:
raise ValueError, "still reading length"
return self.length
def get_data(self):
if self.mode is not READY:
raise ValueError, "still reading"
return self.data
def poll(self):
mode = self.mode
if mode is READY:
raise ValueError, "data is ready"
if mode is ERROR:
raise ValueError, "socket error previously detected"
socket = self.socket
(readable, dummy, error) = select([socket], [], [socket], 0)
if error:
self.socket.close()
self.mode = ERROR
raise ValueError, "socket is in error state"
if readable:
if mode is LEN:
self.read_len()
# note: do not fall thru automatically
elif mode is DATA:
self.read_data()
def read_len(self):
"""assume socket is readable now, read length"""
socket = self.socket
received = self.received
len_list = self.len_list
if not received:
# 10 bytes at a time until len is read.
received = socket.recv(10)
while received:
# consume, test one char
input = received[0]
received = received[1:]
if input == "\n":
# done reading length
from string import join, atoi
try:
length = self.length = atoi(join(len_list, ""))
except:
self.mode = ERROR
socket.close()
raise ValueError, "bad len string? "+`len_list`
self.received = received
self.length_remaining = length
self.mode = DATA
limit_len = self.limit_len
if limit_len and length>limit_len:
raise ValueError, "Length too big: "+`(length, limit_len)`
return
if len(len_list)>10:
self.mode = ERROR
socket.close()
raise ValueError, "len_list too long: "+`len_list`
len_list.append(input)
if not received:
(readable, dummy, error) = select(\
[socket], [], [socket], 0)
if error:
self.mode = ERROR
socket.close()
raise ValueError, "socket in error state"
if readable:
received = socket.recv(10)
# remember extra data received.
self.received = received
def read_data(self):
# assume socket is readable
socket = self.socket
received = self.received
length_remaining = self.length_remaining
data_list = self.data_list
if received:
data_list.append(received)
self.received = ""
length_remaining = length_remaining - len(received)
recv_len = max(length_remaining, BLOCK_SIZE)
received = socket.recv(recv_len)
if received:
data_list.append(received)
length_remaining = length_remaining - len(received)
if length_remaining<1:
self.mode = READY
from string import join
self.data = join(data_list, "")
self.length_remaining = length_remaining
def certificate(String, password):
"""generate a certificate for a string, using a password"""
from md5 import new
if not String:
raise ValueError, "cannot generate certificate for empty string"
taggedstring = password + String
return new(taggedstring).digest()
def certify(String, cert, password):
"""check a certificate for a string"""
return certificate(String, password) == cert | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/gadfly/gfsocket.py | gfsocket.py |
import sqlsem
# ordering of ddef storage is important so, eg, index defs
# follow table defs.
class Ordered_DDF:
"""mixin for DDF statement sorting, subclass defines s.cmp(o)"""
def __cmp__(self, other):
try:
#print "comparing", self.name, other.name
try:
sc = self.__class__
oc = other.__class__
#print sc, oc
except:
#print "punting 1", -1
return -1
if sc in ddf_order and oc in ddf_order:
test = cmp(ddf_order.index(sc), ddf_order.index(oc))
#print "ddforder", test
if test: return test
return self.cmp(other)
else:
test = cmp(sc, oc)
#print "punting 2", test
return test
except:
#import sys
#print "exception!"
#print sys.exc_type, sys.exc_value
return -1
def __coerce__(self, other):
return (self, other)
def cmp(self, other):
"""redefine if no name field"""
return cmp(self.name, other.name)
CTFMT = """\
CREATE TABLE %s (
%s
)"""
class CreateTable(Ordered_DDF):
"""create table operation"""
def __init__(self, name, colelts):
self.name = name
self.colelts = colelts
self.indb = None # db in which to create
def initargs(self):
return (self.name, [])
def marshaldata(self):
from sqlsem import serialize
return map(serialize, self.colelts)
def demarshal(self, args):
from sqlsem import deserialize
self.colelts = map(deserialize, args)
def __repr__(self):
from string import join
elts = list(self.colelts)
elts = map(repr, elts)
return CTFMT % (self.name, join(elts, ",\n "))
def relbind(self, db):
"""check that table doesn't already exist"""
if db.has_relation(self.name):
raise NameError, "cannot create %s, exists" % (self.name,)
self.indb = db
return self
def eval(self, dyn=None):
"create the relation now"
# datatypes currently happily ignored :)
db = self.indb
if db is None:
raise ValueError, "unbound or executed"
self.indb = None
name = self.name
if db.has_relation(self.name):
raise NameError, "relation %s exists, cannot create" % (self.name,)
db.touched = 1
attnames = []
for x in self.colelts:
attnames.append(x.colid)
from gfdb0 import Relation0
r = Relation0(attnames)
# must store if new (unset for reloads)
r.touched = 1
db[name] = r
db.add_datadef(name, self)
log = db.log
if log is not None:
log.log(self)
viewfmt = """\
CREATE VIEW %s (%s) AS
%s"""
class CreateView(sqlsem.SimpleRecursive, Ordered_DDF):
"""CREATE VIEW name (namelist) AS selection"""
# note: no check for cross-references on drops!
def __init__(self, name, namelist, selection):
self.name = name
self.namelist = namelist
self.selection = selection
self.indb = None
def __repr__(self):
return viewfmt % (self.name, self.namelist, self.selection)
def initargs(self):
return (self.name, self.namelist, self.selection)
def relbind(self, db):
self.indb = db
name = self.name
if db.has_datadef(name):
raise NameError, "(view) datadef %s exists" % name
# don't bind the selection yet
return self
def eval(self, dyn=None):
"create the view"
db = self.indb
name = self.name
if db is None:
raise ValueError, "create view %s unbound or executed" % name
self.indb = None
if db.has_relation(name):
raise ValueError, "create view %s, name exists" % name
db.touched = 1
from gfdb0 import View
v = View(self.name, self.namelist, self.selection, db)
db[name] = v
db.add_datadef(name, self)
log = db.log
if log is not None:
log.log(self)
CREATEINDEXFMT = """\
CREATE %sINDEX %s ON %s (
%s
)"""
class CreateIndex(sqlsem.SimpleRecursive, Ordered_DDF):
"""create index operation"""
def __init__(self, name, tablename, atts, unique=0):
self.name = name
self.tablename=tablename
self.atts = atts
self.indb = None
self.target = None
self.unique = unique
def initargs(self):
return (self.name, self.tablename, self.atts, self.unique)
def __cmp__(self, other):
oc = other.__class__
if oc is CreateTable:
return 1 # after all create tables
sc = self.__class__
if oc is not sc:
return cmp(sc, oc)
else:
return cmp(self.name, other.name)
def __coerce__(self, other):
return (self, other)
def __repr__(self):
from string import join
un = ""
if self.unique: un="UNIQUE "
innards = join(self.atts, ",\n ")
return CREATEINDEXFMT % (un, self.name, self.tablename, innards)
def relbind(self, db):
name = self.name
self.indb = db
if db.has_datadef(name):
raise NameError, `name`+": data def exists"
try:
self.target = db.get_for_update(self.tablename) #db[self.tablename]
except:
raise NameError, `self.tablename`+": no such relation"
return self
def eval(self, dyn=None):
from gfdb0 import Index
db = self.indb
if db is None:
raise ValueError, "create index unbound or executed"
self.indb = None
rel = self.target
if rel is None:
raise ValueError, "create index not bound to relation"
db.touched = 1
self.the_index = the_index = Index(self.name, self.atts, unique=self.unique)
rel.add_index(the_index)
name = self.name
db.add_datadef(name, self)
db.add_index(name, the_index)
log = db.log
if log is not None:
log.log(self)
class DropIndex(sqlsem.SimpleRecursive):
def __init__(self, name):
self.name = name
self.indb = None
def initargs(self):
return (self.name,)
def __repr__(self):
return "DROP INDEX %s" % (self.name,)
def relbind(self, db):
self.indb = db
if not db.has_datadef(self.name):
raise NameError, `self.name`+": no such index"
return self
def eval(self, dyn=None):
db = self.indb
self.indb=None
if db is None:
raise ValueError, "drop index executed or unbound"
db.touched = 1
indexname = self.name
createindex = db.datadefs[indexname]
index = createindex.the_index
relname = createindex.tablename
rel = db[relname]
rel.drop_index(index)
db.drop_datadef(indexname)
db.drop_index(indexname)
log = db.log
if log is not None:
log.log(self)
class DropTable(sqlsem.SimpleRecursive):
def __init__(self, name):
self.name = name
self.indb = None
def initargs(self):
return (self.name,)
def __repr__(self):
return "DROP TABLE %s" % (self.name,)
def relbind(self, db):
self.indb = db
name = self.name
if not db.has_relation(name):
raise NameError, `self.name` + ": cannot delete, no such table/view"
self.check_kind(name, db)
return self
def check_kind(self, name, db):
if db[name].is_view:
raise ValueError, "%s is VIEW, can't DROP TABLE" % name
def eval(self, dyn):
db = self.indb
if db is None:
raise ValueError, "unbound or executed"
db.touched = 1
self.indb = None
self.relbind(db)
name = self.name
rel = db[name]
rel.drop_indices(db)
db.drop_datadef(name)
del db[name]
log = db.log
if log is not None:
log.log(self)
class DropView(DropTable):
"""DROP VIEW name"""
def __repr__(self):
return "DROP VIEW %s" % self.name
def check_kind(self, name, db):
if not db[name].is_view:
raise ValueError, "%s is TABLE, can't DROP VIEW" % name
COLDEFFMT = "%s %s %s %s"
class ColumnDef(sqlsem.SimpleRecursive):
def __init__(self, colid, datatype, defaults, constraints):
self.colid = colid
self.datatype = datatype
self.defaults = defaults
self.constraints = constraints
def initargs(self):
return (self.colid, self.datatype, self.defaults, self.constraints)
def __repr__(self):
defaults = self.defaults
if defaults is None: defaults=""
constraints = self.constraints
if constraints is None: constraints = ""
return COLDEFFMT % (self.colid, self.datatype, defaults, constraints)
def evalcond(cond, eqs, target, dyn, rassns, translate, invtrans):
"""factored out shared op between Update and Delete."""
if dyn:
#print "dyn", dyn
from sqlsem import dynamic_binding
dynbind = dynamic_binding(len(dyn), dyn)
if len(dynbind)>1:
raise ValueError, "only one dynamic binding allowed for UPDATE"
dynbind1 = dynbind = dynbind[0]
if eqs is not None:
dynbind1 = dynbind.remap(eqs)
if dynbind1 is None:
# inconsistent
return
dynbind = dynbind1 + dynbind
if rassns is not None:
rassns = rassns + invtrans * dynbind
if rassns.Clean() is None:
# inconsistent
return
else:
rassns = invtrans * dynbind
#print "dynbind", dynbind
#print "rassn", rassns
else:
dynbind = None
# get tuple set, try to use an index
index = None
if rassns is not None:
known = rassns.keys()
index = target.choose_index(known)
if index is None:
(tuples, seqnums) = target.rows(1)
else:
#print "using index", index.name
(tuples, seqnums) = index.matches(rassns)
ltuples = len(tuples)
buffer = [0] * ltuples
rtups = range(ltuples)
for i in rtups:
tup = tuples[i]
#print tup
ttup = translate * tup
if dynbind:
ttup = (ttup + dynbind).Clean()
if ttup is not None:
buffer[i] = ttup
#print "buffer", buffer
#print "cond", cond
#for x in buffer:
#print "before", x
test = cond(buffer)
#print "test", test
return (test, rtups, seqnums, tuples)
UPDFMT = """\
UPDATE %s
SET %s
WHERE %s"""
# optimize to use indices and single call to "cond"
class UpdateOp(sqlsem.SimpleRecursive):
def __init__(self, name, assns, condition):
self.name = name
self.assns = assns
self.condition = condition
def initargs(self):
return (self.name, self.assns, self.condition)
def __repr__(self):
return UPDFMT % (self.name, self.assns, self.condition)
def relbind(self, db):
self.indb = db
name = self.name
target = self.target = db.get_for_update(name)
(attb, relb, amb, ambatts) = db.bindings( [ (name, name) ] )
assns = self.assns = self.assns.relbind(attb, db)
cond = self.condition = self.condition.relbind(attb, db)
constraints = cond.constraints
if constraints is not None:
eqs = self.eqs = constraints.eqs
cassns = constraints.assns
else:
cassns = eqs = self.eqs = None
#print constraints, eqs
# check that atts of assns are atts of target
#print dir(assns)
resultatts = assns.attorder
from sqlsem import kjbuckets
kjSet = kjbuckets.kjSet
kjGraph = kjbuckets.kjGraph
resultatts = kjSet(resultatts)
allatts = kjSet(target.attribute_names)
self.preserved = allatts - resultatts
huh = resultatts - allatts
if huh:
raise NameError, "%s lacks %s attributes" % (name, huh.items())
# compute projection
assnsatts = kjGraph(assns.domain().items()).neighbors(name)
condatts = kjGraph(cond.domain().items()).neighbors(name)
condatts = condatts+assnsatts
#print "condatts", condatts
translate = kjbuckets.kjDict()
for att in condatts:
translate[ (name, att) ] = att
self.translate = translate
invtrans= self.invtrans = ~translate
if cassns is not None:
self.rassns = invtrans * cassns
else:
self.rassns = None
#print "cassns,rassns", cassns, self.rassns
#print translate
# compute domain of self.assns
# (do nothing with it, should add sanity check!)
assns_domain = self.assns.domain()
return self
def eval(self, dyn=None):
indb = self.indb
name = self.name
cond = self.condition
cond.uncache()
assns = self.assns
assns.uncache()
translate = self.translate
preserved = self.preserved
target = self.target
rassns = self.rassns
eqs = self.eqs
invtrans = self.invtrans
#print "assns", assns, assns.__class__
#print "cond", cond
#print "eqs", eqs
#print "target", target
#print "dyn", dyn
#print "rassns", rassns
#print "translate", translate
#print "invtrans", invtrans
(test, rtups, seqnums, tuples) = evalcond(
cond, eqs, target, dyn, rassns, translate, invtrans)
# shortcut
if not test: return
self.indb.touched = 1
tt = type
from types import IntType
#print test
(tps, attorder) = assns.map(test)
count = 0
newseqs = list(rtups)
newtups = list(rtups)
for i in rtups:
new = tps[i]
if tt(new) is not IntType and new is not None:
seqnum = seqnums[i]
old = tuples[i]
if preserved:
new = new + preserved*old
newtups[count] = new
newseqs[count] = seqnum
count = count + 1
if count:
newseqs = newseqs[:count]
newtups = newtups[:count]
target.reset_tuples(newtups, newseqs)
log = indb.log
if log is not None and not log.is_scratch:
from sqlsem import Reset_Tuples
op = Reset_Tuples(self.name)
op.set_data(newtups, newseqs, target)
log.log(op)
class DeleteOp(sqlsem.SimpleRecursive):
def __init__(self, name, where):
self.name = name
self.condition = where
def initargs(self):
return (self.name, self.condition)
def __repr__(self):
return "DELETE FROM %s WHERE %s" % (self.name, self.condition)
def relbind(self, db):
self.indb = db
name = self.name
target = self.target = db.get_for_update(name)
(attb, relb, amb, ambatts) = db.bindings( [ (name, name) ] )
cond = self.condition = self.condition.relbind(attb, db)
# compute domain of cond
# do nothing with it (should add sanity check)
cond_domain = cond.domain()
constraints = cond.constraints
if constraints is not None:
cassns = constraints.assns
self.eqs = constraints.eqs
else:
self.eqs = cassns = None
# compute projection/rename
from sqlsem import kjbuckets
condatts = kjbuckets.kjGraph(cond.domain().items()).neighbors(name)
translate = kjbuckets.kjDict()
for att in condatts:
translate[(name, att)] = att
self.translate = translate
invtrans = self.invtrans = ~translate
if cassns is not None:
self.rassns = invtrans * cassns
else:
self.rassns = None
return self
def eval(self, dyn=None):
# note, very similar to update case...
indb = self.indb
name = self.name
target = self.target
tuples = target.tuples
eqs = self.eqs
rassns = self.rassns
cond = self.condition
cond.uncache()
translate = self.translate
invtrans = self.invtrans
(test, rtups, seqnums, tuples) = evalcond(
cond, eqs, target, dyn, rassns, translate, invtrans)
# shortcut
if not test: return
indb.touched = 1
tt = type
from types import IntType
count = 0
newseqs = list(rtups)
#print "rtups", rtups
for i in rtups:
new = test[i]
if tt(new) is not IntType and new is not None:
seqnum = seqnums[i]
newseqs[count] = seqnum
count = count + 1
#print "newseqs", newseqs
#print "count", count
if count:
newseqs = newseqs[:count]
target.erase_tuples(newseqs)
log = indb.log
if log is not None and not log.is_scratch:
from sqlsem import Erase_Tuples
op = Erase_Tuples(self.name)
op.set_data(newseqs, target)
log.log(op)
INSFMT = """\
INSERT INTO %s
%s
%s"""
class InsertOp(sqlsem.SimpleRecursive):
def __init__(self, name, optcolids, insertspec):
self.name = name
self.optcolids = optcolids
self.insertspec = insertspec
self.target = None # target relation
self.collector = None # name map for attribute translation
def initargs(self):
return (self.name, self.optcolids, self.insertspec)
def __repr__(self):
return INSFMT % (self.name, self.optcolids, self.insertspec)
def relbind(self, db):
self.indb = db
name = self.name
# determine target relation
target = self.target = db.get_for_update(name)
targetatts = target.attributes()
from sqlsem import kjbuckets
kjSet = kjbuckets.kjSet
targetset = kjSet(targetatts)
# check or set colid bindings
colids = self.optcolids
if colids is None:
colids = self.optcolids = target.attributes()
colset = kjSet(colids)
### for now all attributes must be in colset
cdiff = colset-targetset
if cdiff:
raise NameError, "%s: no such attributes in %s" % (cdiff.items(), name)
cdiff = targetset-colset
### temporary!!!
if cdiff:
raise NameError, "%s: not set in insert on %s" % (cdiff.items(), name)
# bind the insertspec
insertspec = self.insertspec
self.insertspec = insertspec = insertspec.relbind(db)
# create a collector for result
from sqlsem import TupleCollector
collector = self.collector = TupleCollector()
# get ordered list of expressions to eval on bound attributes of insertspec
resultexps = insertspec.resultexps()
if len(resultexps)!=len(colset):
raise ValueError, "result and colset of differing length %s:%s" % (colset,resultexps)
pairs = map(None, colids, resultexps)
for (col,exp) in pairs:
collector.addbinding(col, exp)
return self
def eval(self, dyn=None):
resultbts = self.insertspec.eval(dyn)
#print "resultbts", resultbts
# shortcut
if not resultbts: return
indb = self.indb
indb.touched = 1
(resulttups, resultatts) = self.collector.map(resultbts)
#print "resulttups", resulttups
if resulttups:
target = self.target
target.add_tuples(resulttups)
#target.regenerate_indices()
log = indb.log
if log is not None and not log.is_scratch:
from sqlsem import Add_Tuples
op = Add_Tuples(self.name)
op.set_data(resulttups, target)
log.log(op)
Insert_dummy_arg = [ ( (1,1), 1 ) ]
class InsertValues(sqlsem.SimpleRecursive):
def __init__(self, List):
self.list = List
def initargs(self):
return (self.list,)
def __repr__(self):
return "VALUES " +` tuple(self.list) `
def resultexps(self):
return self.list
def relbind(self, db):
l = self.list
bindings = {}
for i in xrange(len(self.list)):
li = l[i]
l[i] = li.relbind(bindings, db)
# do nothing with domain, for now
li_domain = li.domain()
return self
def eval(self, dyn=None):
if dyn:
from sqlsem import dynamic_binding
dynbt = dynamic_binding(len(dyn), dyn)
else:
# dummy value to prevent triviality
from sqlsem import kjbuckets
dynbt = [kjbuckets.kjDict(Insert_dummy_arg)]
#print "bindings", dynbt.assns
return dynbt # ??
class InsertSubSelect(sqlsem.SimpleRecursive):
def __init__(self, subsel):
self.subsel = subsel
def initargs(self):
return (self.subsel,)
def __repr__(self):
return "[subsel] %s" % (self.subsel,)
def resultexps(self):
# get list of result bindings
subsel = self.subsel
atts = self.subsel.attributes()
# bind each as "result.name"
exps = []
from sqlsem import BoundAttribute
for a in atts:
exps.append( BoundAttribute("result", a) )
return exps # temp
def relbind(self, db):
subsel = self.subsel
self.subsel = subsel.relbind(db)
# do nothing with domain for now
#subsel_domain = subsel.domain()
return self
def eval(self, dyn=None):
subsel = self.subsel
subsel.uncache()
rel = subsel.eval(dyn)
tups = rel.rows()
from sqlsem import BoundTuple ### temp
from sqlsem import kjbuckets
kjDict = kjbuckets.kjDict
for i in xrange(len(tups)):
tupsi = tups[i]
new = kjDict()
for k in tupsi.keys():
new[ ("result", k) ] = tupsi[k]
tups[i] = new
return tups
# ordering for archiving datadefs
ddf_order = [CreateTable, CreateIndex, CreateView] | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/gadfly/sqlmod.py | sqlmod.py |
pyg = context = None
#import pygram
from pygram import newlineresult
# reduction rules:
# only need to consider
# expressions, assignments, def, class, global, import, from, for
#
# expressions return a list of unqualified names, not known set
# qualified names are automatically put in context as refs
#
# assignments set left names, ref right names
#
# def sets new name for function and args,
# refs other names
#
# class adds new name for class
# refs other names
#
# global forces global interpretation for name
#
# import adds FIRST names
# from sets names
# for sets names
#
# related rules
# ASSIGNMENT REQUIRES SPECIAL TREATMENT
#@R assn1 :: assn >> testlist = testlist
def assn1(list, context):
[t1, e, t2] = list
return assn(t1, t2)
#@R assnn :: assn >> testlist = assn
def assnn(list, context):
[t1, e, a1] = list
return assn(t1, a1)
# @R assn1c :: assn >> testlist , = testlist
def assn1c(list, context):
[t1, c, e, t2] = list
return assn(t1, t2)
# @R assn1c2 :: assn >> testlist , = testlist ,
def assn1c2(list, context):
del list[-1]
return assn1c(list, context)
# @R assnnc :: assn >> testlist , = assn
def assnnc(list, context):
return assn1c(list, context)
def assn(left, right):
result = right
for x in left:
(ln, ri, op, name) = x
if op == "ref":
result.append( (ln, ri, "set", name) )
else:
result.append(x)
return result
#@R except2 :: except_clause >> except test , test
def except2(list, context):
[e, t1, c, t2] = list
result = t1
for (ln, ri, op, name) in t2:
result.append( (ln, ri, "set", name) )
return result
#@R smassn :: small_stmt >> assn
# ignored
#@R rfrom :: import_stmt >> from dotted_name import name_list
#@R rfromc :: import_stmt >> from dotted_name import name_list ,
def rfrom(list, context):
#print rfrom, list
[f, d, i, n] = list
# ignore d
return n
def rfromc(list, context):
return rfrom(list[:-1])
def mark(kind, thing, context):
L = context.LexD
lineno = L.lineno
# are we reducing on a newline?
if L.lastresult==newlineresult:
lineno = lineno-1
return (lineno, -L.realindex, kind, thing)
#@R dn1 :: dotted_name >> NAME
def dn1(list, context):
#print "dn1", list
#L = context.LexD
return [ mark("set", list[0], context) ]
#return [ (L.lineno, -L.realindex, "set", list[0]) ]
# handles import case, make name set local
#@R nlistn :: name_list >> name_list , NAME
def nlistn(list, context):
#print "nlistn", list
[nl, c, n] = list
#L = context.LexD
#nl.append( (L.lineno, -L.realindex, "set", n) )
nl.append( mark("set", n, context) )
return nl
#@R nlist1 :: name_list >> NAME
def nlist1(list, context):
#print "nlist1", list
#L = context.LexD
#return [ (L.lineno, -L.realindex, "set", list[0]) ]
return [ mark("set", list[0], context) ]
# ignore lhs in calls with keywords.
#@R namearg :: argument >> test = test
def namearg(list, context):
[t1, e, t2] = list
return t2
# handles from case, make names set local
#@R global1 :: global_stmt >> global NAME
def global1(list, context):
#print "global1", list
#L = context.LexD
#return [ (L.lineno, -L.realindex, "global", list[1]) ]
return [ mark("global", list[1], context) ]
#@R globaln :: global_stmt >> global_stmt , NAME
# handles global, make names global (not set or reffed)
def globaln(list, context):
#print "globaln", list
[g, c, n] = list
#L = context.LexD
#g.append( (L.lineno, -L.realindex, "global", n) )
g.append( mark("global", n, context) )
return g
#@R for1 :: for_stmt >>
#for exprlist in testlist :
# suite
def for1(list, context):
#print "for1", list
[f, e, i, t, c, s] = list
refs = t + s
return assn(e, refs)
#@R for2 :: for_stmt >>
#for exprlist in testlist :
# suite
#else :
# suite
def for2(list,context):
#print "for2", list
[f, e, i, t, c1, s1, el, c2, s2] = list
refs = t + s1 + s2
return assn(e, refs)
###
#@R class1 :: classdef >> class NAME : suite
def class1(list, context):
[c, n, cl, s] = list
return Class(n, [], s, context)
#@R class2 :: classdef >> class NAME ( testlist ) : suite
def class2(list, context):
[c, n, opn, t, cls, cl, s] = list
return Class(n, t, s, context)
def Class(name, testlist, suite, context):
globals = analyse_scope(name, suite, context, unused_ok=1)
context.defer_globals(globals)
result = testlist
L = context.LexD
# try to correct lineno
lineno = L.lineno
realindex = L.realindex
for (ln, ri, op, n) in testlist+suite:
lineno = min(lineno, ln)
result.append((lineno, -realindex, "set", name))
#result.append( mark("set", name, context) )
# supress complaints about unreffed classes
result.append((lineno+1, -realindex, "qref", name))
#result.append( mark("qref", name, context) )
return result
# vararsglist requires special treatment.
# return (innerscope, outerscope) pair of lists
# @R params1 :: parameters >> ( varargslist )
def params1(l, c):
return l[1]
params1c = params1
#@R params2 :: varargslist >>
def params2(l, c):
return ([], [])
#@R params3 :: varargslist >> arg
def params3(l, c):
return l[0]
#@R params4 :: varargslist >> varargslist , arg
def params4(l, c):
#print "params4", l
[v, c, a] = l
v[0][0:0] = a[0]
v[1][0:0] = a[1]
return v
#@R argd :: arg >> NAME = test
def argd(l, c):
[n, e, t] = l
#L = c.LexD
#return ([(L.lineno, -L.realindex, "set", n)], t)
return ([ mark("set", n, c) ], t)
#@R arg2 :: arg >> fpdef
def arg2(l, c):
return l[0]
#@R arg3 :: arg >> * NAME
def arg3(l, c):
del l[0]
return fpdef1(l, c)
#@R arg4 :: arg >> ** NAME
def arg4(l, c):
#del l[0]
return arg3(l, c)
#@R fpdef1 :: fpdef >> NAME
def fpdef1(l, c):
[n] = l
#LexD = c.LexD
return ([ mark("set", n, c) ], [])
#@R fpdef2 :: fpdef >> ( fplist )
def fpdef2(l, c):
return l[1]
## @R fpdef2c :: fpdef >> ( fplist , )
#fpdef2c = fpdef2
##31
#@R fplist1 :: fplist >> fpdef
def fplist1(l, c):
#print l
return l[0]
#@R fplistn :: fplist >> fplist , fpdef
fplistn = params4
#@R rdef :: funcdef >> def NAME parameters : suite
def rdef(list, context):
#print "rdef", list
[ddef, name, parameters, c, suite] = list
(l, g) = parameters
globals = analyse_scope(name, l + suite, context)
# for embedded function defs global internal refs must be deferred.
context.defer_globals(globals)
result = g
L = context.LexD
# try to steal a lineno from other declarations:
lineno = L.lineno
index = L.realindex
for (ln, ri, op, n) in l+g+suite:
lineno = min(lineno, ln)
if name is not None:
result.append((lineno, -index, "set", name))
# Note: this is to prevent complaints about unreffed functions
result.append((lineno+1, -index, "qref", name))
return result
#@R testlambda1 :: test >> lambda varargslist : test
def testlambda1(list, context):
[l, v, c, t] = list
return rdef(["def", None, v, ":", t], context)
def analyse_scope(sname, var_accesses, context, unused_ok=0):
var_accesses.sort()
result = []
globals = {}
locals = {}
# scan for globals
for x in var_accesses:
(ln, ri, op, name) = x
if op == "global":
globals[name] = ln
#result.append(x) (ignore global sets in local context)
# scan for locals
for (ln, ri, op, name) in var_accesses:
if op == "set" and not locals.has_key(name):
if globals.has_key(name):
context.complain(
"Warning: set of global %s in local context %s" % (`name`, `sname`))
result.append( (ln, ri, op, name) )
pass # ignore global set in local context
else:
locals[name] = [ln, 0] # line assigned, #refs
# scan for use before assign, etc.
for x in var_accesses:
(ln, ri, op, name) = x
if locals.has_key(name):
if op in ["ref", "qref"]:
set = locals[name]
set[1] = set[1] + 1
assnln = set[0]
if (ln <= assnln):
context.complain(
"(%s) local %s ref at %s before assign at %s" % (
sname, `name`, ln, `assnln`))
elif op not in ("global", "set"):
# ignore global sets in local context.
result.append(x)
# scan for no use
if not unused_ok:
for (name, set) in locals.items():
[where, count] = set
if count<1:
context.complain(
"(%s) %s defined before %s not used" % (sname, `name`, where))
return result
### note, need to make special case for qualified names
#@R powera :: power >> atom trailerlist
def powera(list, context):
#print "powera", list
[a, (t, full)] = list
if a and full:
# atom is a qualified name
(ln, ri, op, n) = a[0]
result = [ (ln, ri, "qref", n) ]
else:
result = a
result = result + t
#print "returning", result
return result
#@R trailerlist0 :: trailerlist >>
def trailerlist0(list, context):
return ([], 0) # empty trailerlist
#@R trailerlistn :: trailerlist >> trailer trailerlist
def trailerlistn(list, context):
#print "trailerlistn", list
result = list[0] + list[1][0]
for i in xrange(len(result)):
(a, b, op, d) = result[i]
result[i] = (a, b, "qref", d)
return (result, 1)
# make name+parameters set local reduce suite...
def default_reduction(list, context):
# append all lists
from types import ListType
#print "defred", list
#return
result = []
for x in list:
if type(x)==ListType:
if result == []:
if len(x)>0 and type(x[0])==ListType:
raise "oops", x
result = x
else:
for y in x:
result.append(y)
return result
def aname(list, context):
#print "aname", list, context
L = context.LexD
# note -L.realindex makes rhs of assignment seem before lhs in sort.
return [ (L.lineno, -L.realindex, "ref", list[0]) ]
# the highest level reduction!
# all1 :: all >> file_input DEDENT
def all1(list, context):
stuff = list[0]
context.when_done(stuff)
# first test
def BindRules(pyg):
for name in pyg.RuleNameToIndex.keys():
pyg.Bind(name, default_reduction)
pyg.Bind("all1", all1)
pyg.Bind("testlambda1", testlambda1)
pyg.Bind("except2", except2)
pyg.Bind("namearg", namearg)
pyg.Bind("rfrom", rfrom)
pyg.Bind("rfromc", rfromc)
pyg.Bind("class1", class1)
pyg.Bind("class2", class2)
pyg.Bind("aname", aname)
pyg.Bind("assn1", assn1)
pyg.Bind("assnn", assnn)
pyg.Bind("assn1c", assn1c)
pyg.Bind("assn1c2", assn1c2)
pyg.Bind("assnnc", assnnc)
pyg.Bind("dn1", dn1)
pyg.Bind("nlistn", nlistn)
pyg.Bind("nlist1", nlist1)
pyg.Bind("global1", global1)
pyg.Bind("globaln", globaln)
pyg.Bind("for1", for1)
pyg.Bind("for2", for2)
pyg.Bind("powera", powera)
pyg.Bind("trailerlist0", trailerlist0)
pyg.Bind("trailerlistn", trailerlistn)
pyg.Bind("params1", params1)
pyg.Bind("params1c", params1c)
pyg.Bind("params2", params2)
pyg.Bind("params3", params3)
pyg.Bind("params4", params4)
pyg.Bind("argd", argd)
pyg.Bind("arg2", arg2)
pyg.Bind("arg3", arg3)
pyg.Bind("arg4", arg4)
pyg.Bind("fpdef1", fpdef1)
pyg.Bind("fpdef2", fpdef2)
# pyg.Bind("fpdef2c", fpdef2c)
pyg.Bind("fplist1" , fplist1 )
pyg.Bind("fplistn" , fplistn)
pyg.Bind("rdef" , rdef)
# pyg.Bind( , )
class globalContext:
def __init__(self, lexd):
self.deferred = []
self.LexD = lexd
def complain(self, str):
print str
def defer_globals(self, globals):
self.deferred[0:0] = globals
def when_done(self, list):
stuff = list + self.deferred + self.patch_globals()
globals = analyse_scope("<module global>", stuff, self)
seen = {}
for (ln, ri, op, name) in globals:
if not seen.has_key(name) and op!="set":
seen[name] = name
self.complain(
"%s: (%s) %s not defined in module?" % (ln, op, `name`))
self.deferred = [] # reset state.
def patch_globals(self):
# patch in global names
import __builtin__
names = dir(__builtin__)
list = names[:]
list2 = names[:]
for i in xrange(len(list)):
list[i] = (-2, -900, "set", names[i])
list2[i] = (-1, -900, "qref", names[i])
return list + list2
teststring = """
class x(y,z):
'''
a doc string
blah
'''
def test(this, that):
w = that+this+x, n
x = 1
return w
"""
def go():
import sys
try:
file = sys.argv[1]
except IndexError:
print "required input file missing, defaulting to test string"
data = teststring
else:
data = open(file).read()
print "setup"
(pyg, context) = setup()
print "now parsing"
lint(data, pyg, context)
def setup():
global pyg, context
import pygram
pyg = pygram.unMarshalpygram()
BindRules(pyg)
context = globalContext(pyg.LexD)
return (pyg, context)
def lint(data, pygin=None, contextin=None):
if pygin is None: pygin = pyg
if contextin is None: contextin = context
pygin.DoParse1(data, contextin)
def lintdir(directory_name):
"""lint all files recursively in directory"""
from find import find
print "\n\nrecursively linting %s\n\n" % directory_name
(pyg, context) = setup()
python_files = find("*.py", directory_name)
for x in python_files:
print "\n\n [ %s ]\n\n" % x
lint( open(x).read(), pyg, context )
print "\014"
if __name__=="__main__": go() | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/gadfly/kjpylint.py | kjpylint.py |
import string
### The string representation for the grammar.
### Since this is used only by GrammarBuild()
### it could be put in a separate file with GrammarBuild()
### to save space/load time after Grammar compilation.
###
GRAMMARSTRING ="""
Value :: ## indicates Value is the root nonterminal for the grammar
@R SetqRule :: Value >> ( setq var Value )
@R ListRule :: Value >> ( ListTail
@R TailFull :: ListTail >> Value ListTail
@R TailEmpty :: ListTail >> )
@R Varrule :: Value >> var
@R Intrule :: Value >> int
@R Strrule :: Value >> str
"""
### the name of the file in which to create the compiled
### grammar declarations
COMPILEDFILENAME = "TESTLispG2.py"
### declare comment form(s) as regular expressions
LISPCOMMENTREGEX = ";.*"
### declare regular expression string constants for terminals
#integer terminal:::::::
INTREGEX = "["+string.digits+"]+"
#string terminal::::::::
STRREGEX = '"[^\n"]*"'
#var terminal::::::::
VARREGEX = "["+string.letters+"]["+string.letters+string.digits+"]*"
### declare interpretation functions for terminals
# int interpretation function: translates string to int:
# Could use string.atoi without the extra level of indirection
# but for demo purposes here it is.
#
def intInterp( str ):
return string.atoi(str)
# interpretation function for strings strips off the surrounding quotes.
def stripQuotes( str ):
if len(str)<2:
TypeError, "string too short?"
return str[1:len(str)-1]
# interpretation function for vars just returns the recognized string
def echo(string):
return string
# This function declares the nonterminals both in the
# "grammar generation phase" and in loading the compiled
# grammar after generation
#
def DeclareTerminals(Grammar):
Grammar.Addterm("int", INTREGEX, intInterp)
Grammar.Addterm("str", STRREGEX, stripQuotes)
Grammar.Addterm("var", VARREGEX, echo)
### declare the rule reduction interpretation functions.
# EchoValue() serves for Intrule and Strrule, since
# we just want to echo the value returned by the
# respective terminal interpretation functions.
#
# Parser delivers list of form [ interpreted_value ]
def EchoValue( list, Context ):
if len(list)!=1:
raise TypeError, "this shouldn't happen! (1)"
return list[0]
# for Varrule interpreter must try to look up the value
# in the Context dictionary
#
# Parser delivers list of form [ var_name ]
def VarValue( list, Context ):
if len(list)!=1:
raise TypeError, "Huh? (2)"
varName = list[0]
if Context.has_key(varName):
return Context[varName]
else:
raise NameError, "no such lisp variable in context "+varName
# for an empty tail, return the empty list
#
# Parser delivers list of form [")"]
def NilTail( list, Context ):
if len(list) != 1 or list[0] != ")":
return TypeError, "Bad reduction?"
return []
# For a full tail, add the new element to the front of the list
#
# Parser delivers list of form [Value, TailValue]
def AddToList( list, Context ):
if len(list) !=2:
return TypeError, "Bad reduction?"
return [ list[0] ] + list[1]
# For a list, simply return the list determined by the tail
#
# Parser delivers list of form ["(", TailValue ]
def MakeList( list, Context ):
if len(list)!=2 or list[0]!="(":
raise TypeError, "Bad reduction? (3)"
return list[1]
# For a setq, declare a new variable in the Context dictionary
#
# Parser delivers list of form # ["(", "setq", varName, Value, ")"]
def DoSetq( list, Context):
if len(list) != 5\
or list[0] != "("\
or list[1] != "setq"\
or list[4] != ")":
print list
raise TypeError, "Bad reduction? (4)"
VarName = list[2]
if type(VarName) != type(''):
raise TypeError, "Bad var name? (5)"
Value = list[3]
# add or set the variable in the Context dictionary
Context[ VarName ] = Value
return Value
# This function Binds the named rules of the Grammar string to their
# interpretation functions in a Grammar.
#
def BindRules(Grammar):
Grammar.Bind( "Intrule", EchoValue )
Grammar.Bind( "Strrule", EchoValue )
Grammar.Bind( "Varrule", VarValue )
Grammar.Bind( "TailEmpty", NilTail )
Grammar.Bind( "TailFull", AddToList )
Grammar.Bind( "ListRule", MakeList )
Grammar.Bind( "SetqRule", DoSetq )
# This function generates the grammar and dumps it to a file.
# Since it will be used only once (after debugging),
# it probably should be put in another file save memory/load-time.
#
# the result returned is a Grammar Object that can be used
# for testing/debugging purposes.
#
# (maybe this should be made into a generic function?)
def GrammarBuild():
import kjParseBuild
# initialize a Null compilable grammar to define
LispG = kjParseBuild.NullCGrammar()
# declare terminals for the grammar
DeclareTerminals(LispG)
# declare the keywords for the grammar
# defun is not used, included here for demo purposes only
LispG.Keywords("setq defun")
# Declare punctuations
# dot is not used here
LispG.punct("().")
# Declare Nonterms
LispG.Nonterms("Value ListTail")
# Declare comment forms
LispG.comments([LISPCOMMENTREGEX])
# Declare rules
LispG.Declarerules(GRAMMARSTRING)
# Compile the grammar
LispG.Compile()
# Write the grammar to a file except for
# the function bindings (which must be rebound)
outfile = open(COMPILEDFILENAME, "w")
LispG.Reconstruct("LispG",outfile,"GRAMMAR")
outfile.close()
# for debugging purposes only, bind the rules
# in the generated grammar
BindRules(LispG)
# return the generated Grammar
return LispG
# this function initializes the compiled grammar from
# generated file.
def LoadLispG():
import TESTLispG2
# make sure we have most recent version (during debugging)
reload(TESTLispG2)
# evaluate the grammar function from generated file
LispG = TESTLispG2.GRAMMAR()
# bind the semantics functions
DeclareTerminals(LispG)
BindRules(LispG)
return LispG
########## test grammar generation
# do generation
Dummy = GrammarBuild()
# load the grammar from the file as LispG
LispG = LoadLispG()
# declare an initial context, and do some tests.
Context = { "x":3 }
test1 = LispG.DoParse1( "()", Context)
test2 = LispG.DoParse1( "(123)", Context)
test3 = LispG.DoParse1( "(x)", Context)
test4 = LispG.DoParse1( '" a string "', Context)
test5 = LispG.DoParse1( "(setq y (1 2 3) )", Context )
test6 = LispG.DoParse1( '(setq x ("a string" "another" 0))', Context )
test7str = """
; this is a lisp comment
(setq abc (("a" x)
("b" (setq d 12))
("c" y) ) ; another lisp comment
)
"""
test7 = LispG.DoParse1( test7str, Context) | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/gadfly/DumbLispGen.py | DumbLispGen.py |
INSTALLDIR = "."
## simple relational algebra using only the equality predicate
## note: string values cannot contain ;
## statement sequencing using ; handled at higher level
relalg_rules = """
statement ::
@R statementassn :: statement >> assignment
@R statementexpr :: statement >> rexpr
@R assignment1 :: assignment >> name = rexpr
@R assignmentn :: assignment >> name = assignment
@R union :: rexpr >> rexpr U rterm
@R rterm :: rexpr >> rterm
@R minus :: rexpr >> rexpr - rterm
@R intersect :: rterm >> rterm intersect rfactor
@R join :: rterm >> rterm join rfactor
@R rfactor :: rterm >> rfactor
@R projection :: rfactor >> projection [ names ] rfactor
@R names0 :: names >>
@R namesn :: names >> names1
@R names11 :: names1 >> name
@R names1n :: names1 >> names1 name
@R selection :: rfactor >> selection ( condition ) rfactor
@R conditionor :: condition >> condition | boolfactor
@R condfactor :: condition >> boolfactor
@R factorand :: boolfactor >> boolfactor & boolprimary
@R factorprime :: boolfactor >> boolprimary
@R notprimary :: boolprimary >> ~ boolprimary
@R primarycondition :: boolprimary >> ( condition )
@R primaryeq :: boolprimary >> expression = expression
@R expname :: expression >> name
@R expvalue :: expression >> value
@R rename :: rfactor >> rename [ names ] to [ names ] rfactor
@R named :: rfactor >> name
@R factorexpr :: rfactor >> ( rexpr )
@R relationval :: rfactor >> [ names ] ( rows )
@R rows0 :: rows >>
@R rowsn :: rows >> somerows
@R somerows1 :: somerows >> row
@R somerowsn :: somerows >> somerows , row
@R emptyrow :: row >> NIL
@R row1 :: row >> value
@R rown :: row >> row value
@R valuenum :: value >> number
@R valuestr :: value >> string
"""
keywords = """
selection intersect rename projection to NIL U join
"""
puncts = """=^~|,-[]()&"""
nonterms = """
statement assignment rexpr rterm value rfactor
names names1 condition boolfactor boolprimary
expression rows somerows row
"""
try:
from kjbuckets import *
except ImportError:
from kjbuckets0 import *
class relation:
def __init__(self, names, rows):
#print "relation init", names, rows
names = self.names = tuple(names)
nameset = self.nameset = kjSet(names)
for r in rows:
if nameset != kjSet(r.keys()):
raise ValueError, \
"bad names: "+`(names, r.items())`
self.rows = kjSet(rows)
def __repr__(self):
from string import join
names = self.names
rows = self.rows.items()
if not rows:
nns = join(names)
replist = [nns, "="*len(nns), " --<empty>--"]
return join(replist, "\n")
#print names, rows
nnames = len(names)
if nnames==1:
replist = [names[0]]
else:
replist = [names]
for r in rows:
elt = r.dump(names)
replist.append(r.dump(names))
#print replist
if nnames==1:
replist = maxrep(replist)
else:
transpose = apply(map, tuple([None] + replist))
adjusted = map(maxrep, transpose)
replist = apply(map, tuple([None] + adjusted))
replist = map(join, replist)
replist.insert(1, "=" * len(replist[0]))
#print replist
return join(replist, "\n")
def maxrep(list):
list = map(str, list)
maxlen = max( map(len, list) )
for i in range(len(list)):
item = list[i]
litem = len(item)
list[i] = item + (" " * (maxlen-litem))
return list
# context is a simple dictionary of named relations
def elt0(l, c):
return l[0]
statementassn = elt0
def statementexpr(l, c):
from string import split, join
print
print " --- expression result ---"
print
data = str(l[0])
print " "+ join(split(data, "\n"), "\n ")
def assignment1(l, c):
[name, eq, val] = l
c[name] = val
return val
assignmentn = assignment1
def check_compat(v1, v2):
names1, names2 = v1.names, v2.names
if names1 != names2:
raise ValueError, \
"operands not union compatible "+`(names1, names2)`
return names1, v1.rows, v2.rows
def union(l, c):
[v1, U, v2] = l
names1, r1, r2 = check_compat(v1, v2)
return relation(names1, (r1+r2).items())
rterm = elt0
def minus(l, c):
[v1, m, v2] = l
names1, r1, r2 = check_compat(v1, v2)
return relation(names1, (r1-r2).items())
def intersect(l, c):
[v1, i, v2] = l
names1, r1, r2 = check_compat(v1, v2)
return relation(names1, (r1&r2).items())
def join(l, c):
[v1, j, v2] = l
n1, n2 = v1.names, v2.names
r1, r2 = v1.rows.items(), v2.rows.items()
n1s, n2s = kjSet(n1), kjSet(n2)
common = tuple((n1s&n2s).items())
result = kjSet()
if common:
# simple hashjoin
G = kjGraph()
for a in r1:
G[a.dump(common)] = a
for b in r2:
for a in G.neighbors(b.dump(common)):
result[a+b] = 1
else:
for a in r1:
for b in r2:
result[a+b] = 1
return relation( (n1s+n2s).items(), result.items() )
rfactor = elt0
def projection(l, c):
[p, b1, names, b2, val] = l
proj = kjSet(names)
result = kjSet()
for row in val.rows.items():
result[ proj * row ] = 1
return relation( names, result.items())
def emptylist(l, c):
return []
names0 = emptylist
namesn = elt0
def names11(l, c):
return l
def names1n(l, c):
[ns, n] = l
ns.append(n)
return ns
def selection(l, c):
[sel, p1, cond, p2, val] = l
return cond.filter(val)
## conditions are not optimized at all!
class conditionor:
def __init__(self, l, c):
[self.c1, op, self.c2] = l
def filter(self, val):
v1 = self.c1.filter(val)
v2 = self.c2.filter(val)
return relation(v1.names, (v1.rows+v2.rows).items())
condfactor = elt0
class factorand(conditionor):
def filter(self, val):
v1 = self.c1.filter(val)
v2 = self.c2.filter(val)
return relation(v1.names, (v1.rows&v2.rows).items())
factorprime = elt0
class notprimary:
def __init__(self, l, c):
[n, self.c1] = l
def filter(self, val):
v1 = self.c1.filter(val)
return relation(v1.names, (val.rows-v1.rows).items())
def elt1(l, c):
return l[1]
primarycondition = elt1
class primaryeq:
def __init__(self, l, c):
[self.e1, eq, self.e2] = l
def filter(self, val):
rows = val.rows.items()
e1v = self.e1.value(rows)
e2v = self.e2.value(rows)
result = kjSet()
for (r, v1, v2) in map(None, rows, e1v, e2v):
if v1==v2:
result[r] = 1
return relation(val.names, result.items())
class expname:
def __init__(self, l, c):
self.name = l[0]
def value(self, rows):
name = self.name
r = list(rows)
for i in xrange(len(r)):
r[i] = r[i][name]
return r
class expvalue(expname):
def value(self, rows):
return [self.name] * len(rows)
def rename(l, c):
[ren, b1, names, b2, to, b3, names2, b4, val] = l
if len(names)!=len(names2):
raise ValueError, "names lengths must match"+`(names1, names2)`
remap = kjDict(map(None, names2, names))
oldnames = kjSet(val.names)
addnames = kjSet(names2)
remnames = kjSet(names)
keepnames = oldnames - remnames
remap = remap + keepnames
if not remnames.subset(oldnames):
#print remnames, oldnames
raise ValueError, "old names not present"+`(names, val.names)`
newnames = keepnames+addnames
rows = val.rows.items()
for i in range(len(rows)):
rows[i] = remap*rows[i]
return relation(newnames.items(), rows)
def named(l, c):
[name] = l
return c[name]
def relationval(l, c):
[b1, names, b2, p1, rows, p2] = l
names = tuple(names)
ln = len(names)
for i in xrange(len(rows)):
this = rows[i]
lt = len(this)
if lt!=ln:
raise ValueError, "names, vals don't match"+`(names,this)`
if len(this)==1:
this = this[0]
else:
this = tuple(this)
rows[i] = kjUndump(names, this)
return relation(names, rows)
rows0 = emptylist
rowsn = elt0
def somerows1(l, c):
#print "somerows1", l
return l
def somerowsn(l, c):
#print "somerowsn", l
[sr, c, r] = l
sr.append(r)
return sr
emptyrow = emptylist
row1 = somerows1
def factorexpr(l, c):
return l[1]
def rown(l, c):
#print "rows", l
[r, v] = l
r.append(v)
return r
valuenum = valuestr = elt0
## snarfed from sqlbind
# note: all reduction function defs must precede this assign
VARS = vars()
class punter:
def __init__(self, name):
self.name = name
def __call__(self, list, context):
print "punt:", self.name, list
return list
class tracer:
def __init__(self, name, fn):
self.name = name
self.fn = fn
def __call__(self, list, context):
print "tracing", self.name, list
test = self.fn(list, context)
print self.name, "returns", test
return test
def BindRules(sqlg):
for name in sqlg.RuleNameToIndex.keys():
if VARS.has_key(name):
#print "binding", name
sqlg.Bind(name, VARS[name]) # nondebug
#sqlg.Bind(name, tracer(name, VARS[name]) ) # debug
else:
print "unbound", name
sqlg.Bind(name, punter(name))
return sqlg
## snarfed from sqlgen
MARSHALFILE = "relalg.mar"
import string
alphanum = string.letters+string.digits + "_"
userdefre = "[%s][%s]*" % (string.letters +"_", alphanum)
RACOMMENTREGEX = "COMMENT .*"
def userdeffn(str):
return str
charstre = "'[^\n']*'"
def charstfn(str):
return str[1:-1]
numlitre = "[%s][%s\.]*" % (string.digits, alphanum) # not really...
def numlitfn(str):
"""Note: this is "safe" because regex
filters out dangerous things."""
return eval(str)
def DeclareTerminals(Grammar):
Grammar.Addterm("name", userdefre, userdeffn)
Grammar.Addterm("string", charstre, charstfn)
Grammar.Addterm("number", numlitre, numlitfn)
def Buildrelalg(filename=MARSHALFILE):
import kjParseBuild
SQLG = kjParseBuild.NullCGrammar()
#SQLG.SetCaseSensitivity(0)
DeclareTerminals(SQLG)
SQLG.Keywords(keywords)
SQLG.punct(puncts)
SQLG.Nonterms(nonterms)
# should add comments
SQLG.comments([RACOMMENTREGEX])
SQLG.Declarerules(relalg_rules)
print "working..."
SQLG.Compile()
filename = INSTALLDIR+"/"+filename
print "dumping to", filename
outfile = open(filename, "wb")
SQLG.MarshalDump(outfile)
outfile.close()
return SQLG
def reloadrelalg(filename=MARSHALFILE):
import kjParser
filename = INSTALLDIR+"/"+filename
infile = open(filename, "rb")
SQLG = kjParser.UnMarshalGram(infile)
infile.close()
DeclareTerminals(SQLG)
BindRules(SQLG)
return SQLG
def runfile(f):
from string import split, join
ragram = reloadrelalg()
context = {}
#f = open(filename, "r")
data = f.read()
#f.close()
from string import split, strip
commands = split(data, ";")
for c in commands:
if not strip(c): continue
print " COMMAND:"
data = str(c)
pdata = " "+join(split(c, "\n"), "\n ")
print pdata
test = ragram.DoParse1(c, context)
print
# c:\python\python relalg.py ratest.txt
if __name__=="__main__":
try:
done = 0
import sys
argv = sys.argv
if len(argv)>1:
command = argv[1]
if command=="make":
print "building relational algebra grammar"
Buildrelalg()
done = 1
else:
runfile(sys.stdin)
done = 1
finally:
if not done:
print __doc__ | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/gadfly/relalg.py | relalg.py |
## someday add subquery precedence to allow more general selects.
sqlrules = """
statement_list ::
@R stat1 :: statement_list >> statement
@R statn :: statement_list >> statement ; statement_list
@R dropindexstat :: statement >> drop_index_statement
@R createindexstat :: statement >> create_index_statement
@R selstat :: statement >> select_statement
@R insstat :: statement >> insert_statement
@R createtablestat :: statement >> create_table_statement
@R droptablestat :: statement >> drop_table_statement
@R delstat :: statement >> delete_statement_searched
@R updatestat :: statement >> update_statement_searched
@R createviewstat :: statement >> create_view_statement
@R dropviewstat :: statement >> drop_view_statement
## drop view statement
@R dropview :: drop_view_statement >> DROP VIEW user_defined_name
## create view statement
@R createview :: create_view_statement >>
CREATE VIEW user_defined_name optnamelist AS select_statement
@R optnamelist0 :: optnamelist >>
@R optnamelistn :: optnamelist >> ( namelist )
## drop index statement
@R dropindex :: drop_index_statement >> DROP INDEX user_defined_name
## create index statement
@R createindex :: create_index_statement >>
CREATE INDEX user_defined_name
ON user_defined_name
( namelist )
@R createuniqueindex :: create_index_statement >>
CREATE UNIQUE INDEX user_defined_name
ON user_defined_name
( namelist )
@R names1 :: namelist >> user_defined_name
@R namesn :: namelist >> namelist , user_defined_name
## update statement
@R update :: update_statement_searched >>
UPDATE user_defined_name
SET assns
optwhere
@R assn1 :: assns >> assn
@R assnn :: assns >> assns , assn
@R assn :: assn >> column_identifier = expression
#####
## delete statement
@R deletefrom :: delete_statement_searched >> DELETE FROM user_defined_name optwhere
## drop table
@R droptable :: drop_table_statement >> DROP TABLE user_defined_name
## create table statement ( restricted )
@R createtable :: create_table_statement >>
CREATE TABLE user_defined_name ( colelts )
@R colelts1 :: colelts >> colelt
@R coleltsn :: colelts >> colelts , colelt
@R coleltid :: colelt >> column_definition
@R coleltconstraint :: colelt >> column_constraint_definition
## column constraints deferred
@R coldef :: column_definition >>
column_identifier data_type optdefault optcolconstraints
## optdefault deferred
@R optdef0 :: optdefault >>
## optcolconstraint deferred
@R optcolconstr0 :: optcolconstraints >>
@R stringtype :: data_type >> character_string_type
@R exnumtype :: data_type >> exact_numeric_type
@R appnumtype :: data_type >> approximate_numeric_type
@R integer :: exact_numeric_type >> INTEGER
@R float :: approximate_numeric_type >> FLOAT
@R varchar :: character_string_type >> VARCHAR
@R varcharn :: character_string_type >> VARCHAR ( numeric_literal )
## insert statement
@R insert1 :: insert_statement >>
INSERT INTO table_name optcolids insert_spec
@R optcolids0 :: optcolids >>
@R optcolids1 :: optcolids >> ( colids )
@R colids1 :: colids >> column_identifier
@R colidsn :: colids >> colids , column_identifier
@R insert_values :: insert_spec >> VALUES ( litlist )
@R insert_query :: insert_spec >> sub_query
@R litlist1 :: litlist >> sliteral
@R litlistn :: litlist >> litlist , sliteral
@R sliteral0 :: sliteral >> literal
@R sliteralp :: sliteral >> + literal
## hack to permit complexes
@R sliterals :: sliteral >> sliteral + literal
@R sliterald :: sliteral >> sliteral - literal
@R sliteralm :: sliteral >> - literal
## select statement
@R subselect :: sub_query >>
SELECT alldistinct select_list
FROM table_reference_list
optwhere optgroup opthaving optunion
## @R psubselect :: sub_query >> ( sub_query )
@R selectx :: select_statement >>
sub_query
optorder_by
@R ad0 :: alldistinct >>
@R adall :: alldistinct >> ALL
@R addistinct :: alldistinct >> DISTINCT
@R where0 :: optwhere >>
@R where1 :: optwhere >> WHERE search_condition
@R group0 :: optgroup >>
@R group1 :: optgroup >> GROUP BY colnamelist
@R colnames1 :: colnamelist >> column_name
@R colnamesn :: colnamelist >> colnamelist , column_name
@R having0 :: opthaving >>
@R having1 :: opthaving >> HAVING search_condition
@R union0 :: optunion >>
@R union1 :: optunion >> UNION alldistinct sub_query
@R except1 :: optunion >> EXCEPT sub_query
@R intersect1 :: optunion >> INTERSECT sub_query
@R order0 :: optorder_by >>
@R order1 :: optorder_by >> ORDER BY sortspeclist
##@R orderby :: order_by_clause >> ORDER BY sortspeclist
@R sortspec1 :: sortspeclist >> sort_specification
@R sortspecn :: sortspeclist >> sortspeclist , sort_specification
## really, should be unsigned int
@R sortint :: sort_specification >> numeric_literal opt_ord
@R sortcol :: sort_specification >> column_name opt_ord
@R optord0 :: opt_ord >>
@R optordasc :: opt_ord >> ASC
@R optorddesc :: opt_ord >> DESC
## table reference list (nasty hack alert)
@R trl1 :: table_reference_list >> user_defined_name
@R trln :: table_reference_list >> user_defined_name , table_reference_list
@R trl1a :: table_reference_list >> user_defined_name user_defined_name
@R trlna :: table_reference_list >> user_defined_name user_defined_name , table_reference_list
@R trl1as :: table_reference_list >> user_defined_name AS user_defined_name
@R trlnas :: table_reference_list >> user_defined_name AS user_defined_name , table_reference_list
## select list
@R selectstar :: select_list >> *
@R selectsome :: select_list >> selectsubs
@R select1 :: selectsubs >> select_sublist
@R selectn :: selectsubs >> selectsubs , select_sublist
@R selectit :: select_sublist >> expression
@R selectname :: select_sublist >> expression AS column_alias
@R colalias :: column_alias >> user_defined_name
## search condition
@R search1 :: search_condition >> boolean_term
@R searchn :: search_condition >> boolean_term OR search_condition
@R bool1 :: boolean_term >> boolean_factor
@R booln :: boolean_term >> boolean_factor AND boolean_term
@R bf1 :: boolean_factor >> boolean_primary
@R notbf :: boolean_factor >> NOT boolean_primary
@R bp1 :: boolean_primary >> predicate
@R bps :: boolean_primary >> ( search_condition )
## predicate (simple for now!!!)
@R predicate1 :: predicate >> comparison_predicate
## comparison predicate (simple for now!!!)
@R predicateeq :: comparison_predicate >> expression = expression
@R predicatelt :: comparison_predicate >> expression < expression
@R predicategt :: comparison_predicate >> expression > expression
@R predicatele :: comparison_predicate >> expression < = expression
@R predicatege :: comparison_predicate >> expression > = expression
@R predicatene :: comparison_predicate >> expression < > expression
@R predbetween :: comparison_predicate >> expression BETWEEN expression AND expression
@R prednotbetween :: comparison_predicate >>
expression NOT BETWEEN expression AND expression
## exists predicate
@R predexists :: predicate >> exists_predicate
@R exists :: exists_predicate >> EXISTS ( sub_query )
## quantified predicate
@R predqeq :: predicate >> expression = allany ( sub_query )
@R predqne :: predicate >> expression < > allany ( sub_query )
@R predqlt :: predicate >> expression < allany ( sub_query )
@R predqgt :: predicate >> expression > allany ( sub_query )
@R predqle :: predicate >> expression < = allany ( sub_query )
@R predqge :: predicate >> expression > = allany ( sub_query )
@R nnall :: allany >> ALL
@R nnany :: allany >> ANY
## in predicate
@R predin :: predicate >> expression IN ( sub_query )
@R prednotin :: predicate >> expression NOT IN ( sub_query )
@R predinlits :: predicate >> expression IN ( litlist )
@R prednotinlits :: predicate >> expression NOT IN ( litlist )
## subquery expression
@R subqexpr :: expression >> ( sub_query )
## expression (simple for now!!!)
@R exp1 :: expression >> term
@R expplus :: expression >> expression + term
@R expminus :: expression >> expression - term
@R term1 :: term >> factor
@R termtimes :: term >> term * factor
@R termdiv :: term >> term / factor
@R factor1 :: factor >> primary
@R plusfactor :: factor >> + factor
@R minusfactor :: factor >> - factor
@R primary1 :: primary >> column_name
@R primarylit :: primary >> literal
@R primaryexp :: primary >> ( expression )
@R primaryset :: primary >> set_function_reference
@R stringlit :: literal >> character_string_literal
@R stringstring :: literal >> literal character_string_literal
@R numlit :: literal >> numeric_literal
## set functions (nasty hack!)
@R countstar :: set_function_reference >> COUNT ( * )
@R distinctcount :: set_function_reference >> COUNT ( DISTINCT expression )
@R allcount :: set_function_reference >> COUNT ( expression )
@R distinctset :: set_function_reference >> aggregate ( DISTINCT expression )
@R allset :: set_function_reference >> aggregate ( expression )
@R average :: aggregate >> AVG
##@R count :: aggregate >> COUNT
@R maximum :: aggregate >> MAX
@R minimum :: aggregate >> MIN
@R summation :: aggregate >> SUM
@R median :: aggregate >> MEDIAN
## dynamic parameter (varies quite a bit from ODBC spec)
@R dynamic :: literal >> ?
## column name
@R columnname1 :: column_name >> column_identifier
@R columnname2 :: column_name >> table_name . column_identifier
@R tablename1 :: table_name >> user_defined_name
@R columnid1 :: column_identifier >> user_defined_name
"""
nonterms = """
sliteral
exists_predicate set_function_reference aggregate
sortspeclist sort_specification opt_ord
drop_table_statement delete_statement_searched update_statement_searched
assns assn
insert_statement litlist colelt optcolconstraints optdefault
optcolids insert_spec create_table_statement
colids colelts column_constraint_definition
column_definition data_type character_string_type
exact_numeric_type approximate_numeric_type
expression term factor primary literal
comparison_predicate column_alias column_identifier table_name
boolean_term boolean_factor boolean_primary predicate
selectsubs expression alias sub_query
statement_list statement select_statement alldistinct subselect
select_list table_reference_list optwhere optgroup opthaving
order_by_clause select_sublist
optunion optorder_by search_condition colnamelist column_name
table_reference table_name create_index_statement namelist
drop_index_statement allany create_view_statement drop_view_statement
optnamelist
"""
keywords = """
INDEX ON ANY IN VIEW AS
EXCEPT INTERSECT
EXISTS AVG COUNT MAX MIN SUM MEDIAN
UPDATE DROP DELETE FROM SET
INSERT INTO VALUES CREATE TABLE INTEGER FLOAT VARCHAR
AND OR NOT
SELECT FROM WHERE HAVING GROUP BY UNION ALL DISTINCT AS ORDER
ASC DESC BETWEEN UNIQUE
"""
puncts = """.,*;=<>{}()?+-/"""
# terminals user_defined_name, character_string_literal,
# numeric_literal | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/gadfly/sqlgram.py | sqlgram.py |
import socket, gadfly
from gfsocket import \
reply_exception, reply_success, Packet_Reader, certify
def main():
"""start up the server."""
import sys
try:
done = 0
argv = sys.argv
nargs = len(argv)
#print nargs, argv
if nargs<5:
sys.stderr.write("gfserve: not enough arguments: %s\n\n" % argv)
sys.stderr.write(__doc__)
return
[port, db, dr, pw] = argv[1:5]
print "gfserve startup port=%s db=%s, dr=%s password omitted" % (
port, db, dr)
from string import atoi
port = atoi(port)
startup = None
if nargs>5:
startup = argv[5]
print "gfserve: load startup module %s" % startup
S = Server(port, db, dr, pw, startup)
S.init()
print "gfserve: server initialized, setting stderr=stdout"
sys.stderr = sys.stdout
print "gfserve: starting the server"
S.start()
done = 1
finally:
if not done:
print __doc__
# general error
ServerError = "ServerError"
# no such prepared name
PreparedNameError = "PreparedNameError"
# actions
# shut down the server (admin policy only)
# arguments = ()
# shutdown the server with no checkpoint
SHUTDOWN = "SHUTDOWN"
# restart the server (admin only)
# arguments = ()
# restart the server (recover)
# no checkpoint
RESTART = "RESTART"
# checkpoint the server (admin only)
# arguments = ()
# checkpoint the server
CHECKPOINT = "CHECKPOINT"
# exec prepared statement
# arguments = (prepared_name_string, dyn=None)
# execute the prepared statement with dynamic args.
# autocommit.
EXECUTE_PREPARED = "EXECUTE_PREPARED"
# exec any statement (only if not disabled)
# arguments = (statement_string, dyn=None)
# execute the statement with dynamic args.
# autocommit.
EXECUTE_STATEMENT = "EXECUTE_STATEMENT"
ACTIONS = [SHUTDOWN, RESTART, CHECKPOINT,
EXECUTE_PREPARED, EXECUTE_STATEMENT]
class Server:
"""database server: listen for commands"""
verbose = 1
# wait X minutes on each server loop
select_timeout = 60*5
# do a checkpoint each X times thru server loop
check_loop = 5
# for now works like finger/http
# == each command is a separate connection.
# all sql commands constitute separate transactions
# which are automatically committed upon success.
# for now commands come in as
# 1 length (marshalled int)
# 2 (password, data) (marshalled tuple)
# responses come back as
# 1 length (marshalled int)
# 2 results (marshalled value)
def __init__(self, port, db, dr, pw, startup=None):
self.port = port
self.db = db
self.dr = dr
self.pw = pw
self.startup = startup
self.connection = None
self.socket = None
# prepared cursors dictionary.
self.cursors = {}
self.policies = {}
self.admin_policy = None
def start(self):
"""after init, listen for commands."""
from gfsocket import READY, ERROR, unpack_certified_data
import sys
verbose = self.verbose
socket = self.socket
connection = self.connection
policies = self.policies
admin_policy = self.admin_policy
from select import select
pending_connects = {}
while 1:
try:
# main loop
if self.check_loop<0: self.check_loop=5
for i in xrange(self.check_loop):
if verbose:
print "main loop on", socket, connection
# checkpoint loop
sockets = [socket]
if pending_connects:
sockets = sockets + pending_connects.keys()
# wait for availability
if verbose:
print "server: waiting for connection(s)"
(readables, dummy, errors) = select(\
sockets, [], sockets[:], self.select_timeout)
if socket in errors:
raise ServerError, \
"listening socket in error state: aborting"
# clean up error connection sockets
for s in errors:
del pending_connects[s]
s.close()
# get a new connection, if available
if socket in readables:
readables.remove(socket)
(conn, addr) = socket.accept()
if 1 or verbose:
print "connect %s" % (addr,)
reader = Packet_Reader(conn)
pending_connects[conn] = reader
# poll readable pending connections, if possible
for conn in readables:
reader = pending_connects[conn]
mode = reader.mode
if not mode==READY:
if mode == ERROR:
# shouldn't happen
try:
conn.close()
del pending_connects[conn]
except: pass
continue
else:
try:
reader.poll()
finally:
pass # AFTER DEBUG CHANGE THIS!
# in blocking mode, service ready request,
# commit on no error
for conn in pending_connects.keys():
reader = pending_connects[conn]
mode = reader.mode
if mode == ERROR:
try:
del pending_connects[conn]
conn.close()
except: pass
elif mode == READY:
try:
del pending_connects[conn]
data = reader.data
(actor_name, cert, md) = \
unpack_certified_data(data)
# find the policy for this actor
if not policies.has_key(actor_name):
if verbose:
print "no such policy: "+actor_name
reply_exception(NameError,
"no such policy: "+actor_name, conn)
policy = None
else:
if verbose:
print "executing for", actor_name
policy = policies[actor_name]
policy.action(cert, md, conn)
except SHUTDOWN:
if policy is admin_policy:
print \
"shutdown on admin policy: terminating"
connection.close()
socket.close()
# NORMAL TERMINATION:
return
except RESTART:
if policy is admin_policy:
print \
"restart from admin policy: restarting connection"
connection.restart()
except CHECKPOINT:
if policy is admin_policy:
print \
"checkpoint from admin policy: checkpointing now."
connection.checkpoint()
except:
tb = sys.exc_traceback
info = "%s %s" % (sys.exc_type,
str(sys.exc_value))
if verbose:
from traceback import print_tb
print_tb(tb)
print "error in executing action: "+info
reply_exception(
ServerError, "exception: "+info, conn)
#break # stop after first request serviced!
except:
# except of main while 1 try statement
tb = sys.exc_traceback
ty = sys.exc_type
va = sys.exc_value
print "UNEXPECTED EXCEPTION ON MAINLOOP"
from traceback import print_tb
print_tb(tb)
print "exception:", ty, va
if not pending_connects:
pending_connects = {}
print "server: checkpointing"
connection.checkpoint()
def init(self):
self.getconnection()
self.startup_load()
# get socket last in case of failure earlier
self.getsocket()
HOST = ""
BACKLOG = 5
def getsocket(self):
"""get the listening socket"""
verbose = self.verbose
import socket, sys
if verbose:
print "initializing listener socket"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
if verbose:
print "trying to set REUSEADDR",\
sock.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR)
sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
except:
if verbose:
print "set of REUSEADDR failed", sys.exc_type, sys.exc_value
pass
sock.bind((self.HOST, self.port))
sock.listen(self.BACKLOG)
self.socket = sock
return sock
def getconnection(self):
"""get the db connection"""
from gadfly import gadfly
c = self.connection = gadfly(self.db, self.dr)
# don't automatically checkpoint upon commit
c.autocheckpoint = 0
def startup_load(self):
"""setup the policies and load startup module"""
admin_policy = self.get_admin_policy()
module_name = self.startup
if module_name:
module = __import__(module_name)
# startup(admin_policy, connection, Server_instance)
test = module.startup(admin_policy, self.connection, self)
if test is not None:
self.policies = test
self.policies["admin"] = admin_policy
def get_admin_policy(self):
"""return the admin policy for priviledged access."""
p = self.admin_policy = Policy(
"admin", self.pw, self.connection, queries=1)
return p
class Policy:
"""security policy"""
verbose = 0
# allow arbitrary sql statments
general_queries = 0
# dictionary of named accesses as strings
named_accesses = None
# dictionary of prepared named accesses
prepared_cursors = None
def __init__(self, name, password, connection, queries=0):
"""create a policy (name, password, connection)
name is the name of the policy
password is the access policy (None for no password)
connection is the database connection.
set queries to allow general accesses (unrestricted)
"""
if self.verbose:
print "policy.__init__", name
self.general_queries = queries
self.name = name
self.password = password
self.connection = connection
self.socket = None
self.named_accesses = {}
self.prepared_cursors = {}
def __setitem__(self, name, value):
if self.verbose:
print "policy", self.name, ":", (name, value)
from types import StringType
if type(name) is not StringType or type(value) is not StringType:
raise ValueError, "cursor names and contents must be strings"
self.named_accesses[name] = value
def execute_named(self, name, params=None):
"""execute a named (prepared) sql statement"""
if self.verbose:
print "policy", self.name, "executes", name, params
na = self.named_accesses
pc = self.prepared_cursors
con = self.connection
if not na.has_key(name):
raise PreparedNameError, "unknown access name: %s" % name
stat = na[name]
if pc.has_key(name):
# get prepared query
cursor = pc[name]
else:
# prepare a new cursor
pc[name] = cursor = con.cursor()
return self.execute(cursor, stat, params)
def execute(self, cursor, statement, params=None):
"""execute a statement in a cursor"""
if self.verbose:
print "policy", self.name, "executes", statement, params
cursor.execute(statement, params)
# immediate commit!
self.connection.commit()
try:
result = cursor.fetchall()
description = cursor.description
result = (description, result)
except:
result = None
return result
def execute_any_statement(self, statement, params=None):
"""execute any statement."""
if self.verbose:
print "policy", self.name, "executes", statement, params
con = self.connection
cursor = con.cursor()
return self.execute(cursor, statement, params)
def action(self, certificate, datastring, socket):
"""perform a database/server action after checking certificate"""
verbose = self.verbose
if verbose:
print "policy", self.name, "action..."
# make sure the certificate checks out
if not self.certify(datastring, certificate, self.password):
raise ServerError, "password certification failure"
# unpack the datastring
from marshal import loads
test = loads(datastring)
#if verbose:
#print "data is", test
(action, moredata) = test
import sys
if action in ACTIONS:
action = "policy_"+action
myaction = getattr(self, action)
try:
data = apply(myaction, moredata+(socket,))
#self.reply_success(data)
# pass up server level requests as exceptions
except SHUTDOWN, detail:
raise SHUTDOWN, detail
except RESTART, detail:
raise RESTART, detail
except CHECKPOINT, detail:
raise CHECKPOINT, detail
except:
tb = sys.exc_traceback
exceptiondata = "%s\n%s" %(sys.exc_type,
str(sys.exc_value))
if verbose:
from traceback import print_tb
print_tb(tb)
self.reply_exception(ServerError,
"unexpected exception: "+exceptiondata, socket)
raise ServerError, exceptiondata
else:
raise ServerError, "unknown action: "+`action`
def certify(self, datastring, certificate, password):
# hook for subclassing
return certify(datastring, certificate, password)
def policy_SHUTDOWN(self, socket):
self.reply_success("attempting server shutdown", socket)
raise SHUTDOWN, "please shut down the server"
def policy_RESTART(self, socket):
self.reply_success("attempting server restart", socket)
raise RESTART, "please restart the server"
def policy_CHECKPOINT(self, socket):
self.reply_success("attempting server checkpoint", socket)
raise CHECKPOINT, "please checkpoint the server"
def policy_EXECUTE_PREPARED(self, name, dyn, socket):
try:
result = self.execute_named(name, dyn)
self.reply_success(result, socket)
except PreparedNameError, detail:
self.reply_exception(PreparedNameError,
"no such prepared statement: "+name,
socket)
def policy_EXECUTE_STATEMENT(self, stat, dyn, socket):
if not self.general_queries:
self.reply_exception(ServerError,
"general statements disallowed on this policy",
socket)
raise ServerError, "illegal statement attempt for: "+self.name
result = self.execute_any_statement(stat, dyn)
self.reply_success(result, socket)
def reply_exception(self, exc, info, socket):
# hook for subclassing
reply_exception(exc, info, socket)
def reply_success(self, data, socket):
# hook for subclassing
reply_success(data, socket)
if __name__=="__main__": main() | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/gadfly/gfserve.py | gfserve.py |
# set this variable to regenerate the grammar on each load
REGENERATEONLOAD = 1
import string
GRAMMARSTRING ="""
Value :: ## indicates Value is the root nonterminal for the grammar
@R SetqRule :: Value >> ( setq var Value )
@R ListRule :: Value >> ( ListTail
@R TailFull :: ListTail >> Value ListTail
@R TailEmpty :: ListTail >> )
@R Varrule :: Value >> var
@R Intrule :: Value >> int
@R Strrule :: Value >> str
@R PrintRule :: Value >> ( print Value )
"""
COMPILEDFILENAME = "TESTLispG.py"
MARSHALLEDFILENAME = "TESTLispG.mar"
LISPCOMMENTREGEX = ";.*"
INTREGEX = "["+string.digits+"]+"
STRREGEX = '"[^\n"]*"'
VARREGEX = "["+string.letters+"]["+string.letters+string.digits+"]*"
### declare interpretation functions and regex's for terminals
def intInterp( str ):
return string.atoi(str)
def stripQuotes( str ):
return str[1:len(str)-1]
def echo(string):
return string
def DeclareTerminals(Grammar):
Grammar.Addterm("int", INTREGEX, intInterp)
Grammar.Addterm("str", STRREGEX, stripQuotes)
Grammar.Addterm("var", VARREGEX, echo)
### declare the rule reduction interpretation functions.
def EchoValue( list, Context ):
return list[0]
def VarValue( list, Context ):
varName = list[0]
if Context.has_key(varName):
return Context[varName]
else:
raise NameError, "no such lisp variable in context "+varName
def NilTail( list, Context ):
return []
def AddToList( list, Context ):
return [ list[0] ] + list[1]
def MakeList( list, Context ):
return list[1]
def DoSetq( list, Context):
Context[ list[2] ] = list[3]
return list[3]
def DoPrint( list, Context ):
print list[2]
return list[2]
def BindRules(Grammar):
Grammar.Bind( "Intrule", EchoValue )
Grammar.Bind( "Strrule", EchoValue )
Grammar.Bind( "Varrule", VarValue )
Grammar.Bind( "TailEmpty", NilTail )
Grammar.Bind( "TailFull", AddToList )
Grammar.Bind( "ListRule", MakeList )
Grammar.Bind( "SetqRule", DoSetq )
Grammar.Bind( "PrintRule", DoPrint )
# This function generates the grammar and dumps it to a file.
def GrammarBuild():
import kjParseBuild
LispG = kjParseBuild.NullCGrammar()
LispG.SetCaseSensitivity(0) # grammar is not case sensitive for keywords
DeclareTerminals(LispG)
LispG.Keywords("setq print")
LispG.punct("().")
LispG.Nonterms("Value ListTail")
LispG.comments([LISPCOMMENTREGEX])
LispG.Declarerules(GRAMMARSTRING)
LispG.Compile()
print "dumping as python to "+COMPILEDFILENAME
outfile = open(COMPILEDFILENAME, "w")
LispG.Reconstruct("LispG",outfile,"GRAMMAR")
outfile.close()
print "dumping as binary to "+MARSHALLEDFILENAME
outfile = open(MARSHALLEDFILENAME, "w")
LispG.MarshalDump(outfile)
outfile.close()
BindRules(LispG)
return LispG
# this function initializes the compiled grammar from the generated file.
def LoadLispG():
import TESTLispG
# reload to make sure we get the most recent version!
# (only needed when debugging the grammar).
reload(TESTLispG)
LispG = TESTLispG.GRAMMAR()
DeclareTerminals(LispG)
BindRules(LispG)
return LispG
def unMarshalLispG():
import kjParser
infile = open(MARSHALLEDFILENAME, "r")
LispG = kjParser.UnMarshalGram(infile)
infile.close()
DeclareTerminals(LispG)
BindRules(LispG)
return LispG
########## test the grammar generation
if REGENERATEONLOAD:
print "(re)generating the LispG grammar in file TESTLispG.py"
Dummy = GrammarBuild()
print "(re)generation done."
print "loading grammar as python"
LispG = LoadLispG()
### declare an initial context, and do some tests.
Context = { 'x':3 }
test1 = LispG.DoParse1( '()', Context)
test2 = LispG.DoParse1( '(123)', Context)
test3 = LispG.DoParse1( '(x)', Context)
test4 = LispG.DoParse1( '" a string "', Context)
test5 = LispG.DoParse1( '(setq y (1 2 3) )', Context )
test6 = LispG.DoParse1( '(SeTq x ("a string" "another" 0))', Context )
test7str = """
; this is a lisp comment
(setq abc (("a" x)
("b" (setq d 12))
("c" y) ) ; another lisp comment
)
"""
test7 = LispG.DoParse1( test7str, Context)
test8 = LispG.DoParse1( '(print (1 x d))', Context)
print "unmarshalling the grammar"
LispG2 = unMarshalLispG()
### declare an initial context, and do some tests.
Context = { 'x':3 }
test1 = LispG2.DoParse1( '()', Context)
test2 = LispG2.DoParse1( '(123)', Context)
test3 = LispG2.DoParse1( '(x)', Context)
test4 = LispG2.DoParse1( '" a string "', Context)
test5 = LispG2.DoParse1( '(setq y (1 2 3) )', Context )
test6 = LispG2.DoParse1( '(SeTq x ("a string" "another" 0))', Context )
test7str = """
; this is a lisp comment
(setq abc (("a" x)
("b" (setq d 12))
("c" y) ) ; another lisp comment
)
"""
test7 = LispG2.DoParse1( test7str, Context)
test8 = LispG2.DoParse1( '(print (1 x d))', Context) | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/gadfly/DLispShort.py | DLispShort.py |
#set this to automatically rebuild the grammar.
REBUILD = 1
MARSHALFILE = "SQLTEST.mar"
SELECTRULES = """
## highest level for select statement (not select for update)
select-statement ::
@R selectR :: select-statement >>
SELECT
from-clause
where-clause
group-by-clause
having-clause
## generalized to allow null from clause eg: select 2+2
@R fromNull :: from-clause >>
@R fromFull :: from-clause >> FROM
@R whereNull :: where-clause >>
@R whereFull :: where-clause >> WHERE
@R groupNull :: group-by-clause >>
@R groupFull :: group-by-clause >> GROUP BY
@R havingNull :: having-clause >>
@R havingFull :: having-clause >> HAVING
@R unionNull :: union-clause >>
@R unionFull :: union-clause >> UNION
"""
SELECTNONTERMS = """
select-statement
all-distinct select-list table-reference-list
where-clause group-by-clause having-clause union-clause
maybe-order-by
search-condition column-list maybe-all order-by-clause
column-name from-clause
"""
# of these the following need resolution
# (select-list) (table-reference-list)
# (search-condition) order-by-clause (column-name)
SELECTKEYWORDS = """
SELECT FROM WHERE GROUP BY HAVING UNION DISTINCT ALL AS
"""
# test generation of the grammar
def BuildSQLG():
import kjParseBuild
SQLG = kjParseBuild.NullCGrammar()
SQLG.SetCaseSensitivity(0)
SQLG.Keywords(SELECTKEYWORDS)
SQLG.Nonterms(SELECTNONTERMS)
# no comments yet
SQLG.Declarerules(SELECTRULES)
print "building"
SQLG.Compile()
print "marshaling"
outfile = open( MARSHALFILE, "w")
SQLG.MarshalDump(outfile)
outfile.close()
return SQLG
# load function
def LoadSQLG():
import kjParser
print "unmarshalling"
infile = open(MARSHALFILE, "r")
SQLG = kjParser.UnMarshalGram(infile)
infile.close()
return SQLG
#### for testing
if REBUILD:
SQLG0 = BuildSQLG()
print " rebuilt SQLG0 as compilable grammar"
SQLG = LoadSQLG()
print " build SQLG as reloaded grammar" | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/gadfly/SQLTESTG.py | SQLTESTG.py |
### maybe eventually implement disj-conj-eq optimizations
### note: for multithreading x.relbind(...) should ALWAYs return
### a fresh copy of structure (sometimes in-place now).
### note: binding of order by is dubious with archiving,
### should not bind IN PLACE, leave unbound elts alone!
### need to fix serialization/deserialization of btand and btor
###
# use kjbuckets builtin if available
try:
import kjbuckets
except ImportError:
import kjbuckets0
kjbuckets = kjbuckets0
Tuple = kjbuckets.kjDict
Graph = kjbuckets.kjGraph
Set = kjbuckets.kjSet
import sys, traceback
### debug
#sys.stderr = sys.stdin
# operations on simple tuples, mostly from kjbuckets
#def maketuple(thing):
# """try to make a tuple from thing.
# thing should be a dictionary or sequence of (name, value)
# or other tuple."""
# from types import DictType
# if type(thing)==DictType:
# return Tuple(thing.items() )
# else: return Tuple(thing)
def no_ints_nulls(list):
"""in place remove all ints, Nones from a list (for null handling)"""
tt = type
nn = None
from types import IntType
count = 0
for x in list:
if tt(x) is not IntType and x is not nn:
list[count] = x
count = count+1
del list[count:]
return list
# stuff for bound tuples.
class HashJoiner:
def __init__(self, bt, relname, attributes, relation, witness):
self.relname = relname
self.attributes = attributes
self.relation = relation
self.witness = witness
self.bt = bt
eqs = bt.eqs
#print "relname", relname
#print "attributes", attributes
#print "relation", relation
#print "witness", witness
#print "bt", bt
transform = self.transform = kjbuckets.kjDict()
rbindings = self.rbindings = kjbuckets.kjSet()
for a in attributes:
b = (relname, a)
transform[b] = a
rbindings[b] = b
self.eqs = eqs = eqs + kjbuckets.kjGraph(rbindings)
witness = witness.remap(eqs)
known = kjbuckets.kjSet(witness.keys()) & rbindings
batts = tuple(known.items())
if not batts:
atts = ()
elif len(batts)==1:
atts = ( transform[batts[0]], )
else:
atts = transform.dump(batts)
self.atts = atts
self.batts = batts
self.transform = transform
eqs = bt.eqs
#eqs = (rbindings * eqs)
self.eqs = eqs = eqs + kjbuckets.kjGraph(rbindings)
self.transformG = transformG = eqs * transform
assns = self.assns = bt.assns
self.rassns = assns.remap( ~transformG )
def relbind(self, db, atts):
rel = self.relation
#print "rel is ", rel, type(rel)
#print dir(rel)
if rel.is_view:
self.relation = rel.relbind(db, atts)
return self
def uncache(self):
rel = self.relation
if rel.is_view:
self.relation.uncache()
def join(self, subseq):
relname = self.relname
result = []
assns = self.assns
if not subseq: return result
# apply equalities to unitary subseq (embedded subq)
if len(subseq)==1:
subseq0 = subseq[0]
subseq0r = subseq0.remap(self.eqs)
if subseq0r is None:
return [] # inconsistent
subseq0 = subseq0 + subseq0r + assns
if subseq0.Clean() is None:
return [] # inconsistent
subseq = [subseq0]
rassns = self.rassns
#print "rassns", rassns
#print "subseq", subseq
if rassns is None:
#print "inconsistent btup"
return []
relation = self.relation
#print "assns", assns
transformG = self.transformG
#print "transformG", transformG
transform = self.transform
atts = self.atts
batts = self.batts
#print "batts, atts", batts, atts
if not batts:
#print "cross product", relname
tuples = relation.rows()
for t in tuples:
#print "t is", t
if rassns:
t = (t + rassns).Clean()
if t is None:
#print "preselect fails"
continue
new = t.remap(transformG)
#print "new is", new
if new is None:
#print "transform fails"
continue
for subst in subseq:
#print "subst", subst
if subst:
add = (subst + new).Clean()
else:
add = new
#print "add is", add
if add is not None:
result.append(add)
else:
# hash join
#print "hash join"
# first try to use an index
index = relation.choose_index(atts)
#print transform
if index is not None:
#print "index join", index.name, relname
#print index.index.keys()
#print "rassns", rassns
atts = index.attributes()
invtransform = ~transform
if len(atts)==1:
batts = (invtransform[atts[0]],)
else:
batts = invtransform.dump(atts)
hash_tups = 1
tindex = index.index
# memoize converted tuples
tindex0 = {}
test = tindex.has_key
test0 = tindex0.has_key
for i in xrange(len(subseq)):
subst = subseq[i]
#print "substs is", subst
its = subst.dump(batts)
#print "its", its
othersubsts = []
if test0(its):
othersubsts = tindex0[its]
elif test(its):
tups = tindex[its]
for t in tups:
#print "t before", t
t = (t+rassns).Clean()
#print "t after", t
if t is None: continue
new = t.remap(transformG)
#print "new", new
if new is None: continue
othersubsts.append(new)
tindex0[its] = othersubsts
for other in othersubsts:
#print "adding", other, subst
add = (other + subst).Clean()
if add is not None:
result.append(add)
# hash join
#print "hash join"
else:
tuples = relation.rows()
if len(subseq)<len(tuples):
#print "hash subseq", relname
subseqindex = {}
test = subseqindex.has_key
for i in xrange(len(subseq)):
subst = subseq[i]
its = subst.dump(batts)
#print "items1", subseq, batts, its
if test(its):
subseqindex[its].append(subst)
else:
subseqindex[its] = [ subst ]
for t in tuples:
#print "on", t
if rassns:
t = (t+rassns).Clean()
if t is None:
#print "preselect fails"
continue
its = t.dump(atts)
#print "items2", its
if test(its):
new = t.remap(transformG)
#print "...new", new
if new is None:
#print "transform fails"
continue
l = subseqindex[its]
for subst in l:
add = (subst + new).Clean()
#print "adding", add
if add is not None:
result.append(add)
else:
#print "hash tuples", relname
tindex = {}
test = tindex.has_key
for i in xrange(len(tuples)):
t = tuples[i]
if rassns:
t = (t + rassns).Clean()
if t is None:
#print "preselect fails"
continue
new = t.remap(transformG)
#print "new is", new
if new is None:
#print "transform fails"
continue
its = t.dump(atts)
#print "items3", its
if test(its):
tindex[its].append(new)
else:
tindex[its] = [ new ]
for subst in subseq:
its = subst.dump(batts)
#print "items4", its
if test(its):
n = tindex[its]
for new in n:
add = (subst + new).Clean()
if add is not None:
result.append(add)
#print "hashjoin result"
#for x in result:
#print x
#break
return result
### essentially, specialized pickle for this app:
def deserialize(description):
"""simple protocol for generating a marshallable ob"""
#print "deserialize", description
from types import TupleType
if type(description) is not TupleType:
return description # base type
try:
(name, desc) = description
except:
return description # base type
if name == "tuple":
# tuple case
return desc
### other special cases here...
if name == "list":
# list case: map deserialize across desc
return map(deserialize, desc)
# all other cases are classes of sqlsem
import sqlsem
klass = getattr(sqlsem, name)
(args1, args2) = desc
args1 = tuple(map(deserialize, args1))
ob = apply(klass, args1)
ob.demarshal(args2)
return ob
def serialize(ob):
"""dual of deserialize."""
from types import ListType
tt = type(ob)
# for lists map serialize across members
#if tt is ListType:
# return ("list", map(serialize, ob))
try:
#print ob.__class__, ob
args1 = ob.initargs()
#print "args1 before", args1
args1 = tuple(map(serialize, args1))
#print "args1 after", args1
args2 = ob.marshaldata()
return (ob.__class__.__name__, (args1, args2))
except:
from types import InstanceType
#tt = type(ob)
if tt is InstanceType:
#ext = traceback.extract_tb(sys.exc_traceback)
#for x in ext:
#print x
#print
#print sys.exc_type, sys.exc_value
#print ob.__class__
raise ValueError, "couldn't serialize %s %s" % (
tt, ob.__class__)
# assume base type otherwise
return ob
# invariant:
# deserialize(serialize(ob)) returns semantic copy of ob
# serialize(ob) is marshallable
# ie,
# args1 = ob.initargs() # init args
# args1d = map(serialize, args1) # serialized
# args2 = ob.marshaldata() # marshalable addl info
# # assert args1d, args2 are marshallable
# args1copy = map(deserialize, args1)
# ob2 = ob.__class__(args1copy)
# ob2 = ob2.demarshal(args2)
# # assert ob2 is semantic copy of ob
class SimpleRecursive:
"""Simple Recursive structure, only requires initargs"""
def demarshal(self, args):
pass
def marshaldata(self):
return ()
class BoundTuple:
clean = 1 # false if inconsistent
closed = 0 # true if equality constraints inferred
def __init__(self, **bindings):
"""bindings are name>simpletuple associations."""
self.eqs = Graph()
self.assns = Tuple()
for (name, simpletuple) in bindings.items():
self.bind(name, simpletuple)
def initargs(self):
return ()
def marshaldata(self):
#print "btp marshaldata", self
return (self.eqs.items(), self.assns.items(), self.clean, self.closed)
def demarshal(self, args):
(eitems, aitems, self.clean, self.closed) = args
self.eqs = kjbuckets.kjGraph(eitems)
self.assns = kjbuckets.kjDict(aitems)
def relbind(self, dict, db):
"""return bindings of self wrt dict rel>att"""
result = BoundTuple()
e2 = result.eqs
a2 = result.assns
for ((a,b), (c,d)) in self.eqs.items():
if a is None:
try:
a = dict[b]
except KeyError:
raise NameError, `b`+": ambiguous or unknown attribute"
if c is None:
try:
c = dict[d]
except KeyError:
raise NameError, `d`+": ambiguous or unknown attribute"
e2[(a,b)] = (c,d)
for ((a,b), v) in self.assns.items():
if a is None:
try:
a = dict[b]
except KeyError:
raise NameError, `b`+": ambiguous or unknown attribute"
a2[(a,b)] = v
result.closed = self.closed
result.clean = self.clean
return result
#def known(self, relname):
# """return ([(relname, a1), ...], [a1, ...])
# for attributes ai of relname known in self."""
# atts = []
# batts = []
# for x in self.assns.keys():
# (r,a) = x
# if r==relname:
# batts.append(x)
# atts.append(a)
# return (batts, atts)
def relorder(self, db, allrels):
"""based on known constraints, pick an
ordering for materializing relations.
db is database (ignored currently)
allrels is names of all relations to include (list)."""
### not very smart about indices yet!!!
if len(allrels)<2:
# doesn't matter
return allrels
order = []
eqs = self.eqs
assns = self.assns
kjSet = kjbuckets.kjSet
kjGraph = kjbuckets.kjGraph
pinned = kjSet()
has_index = kjSet()
needed = kjSet(allrels)
akeys = assns.keys()
for (r,a) in akeys:
pinned[r]=r # pinned if some value known
known_map = kjGraph(akeys)
for r in known_map.keys():
rknown = known_map.neighbors(r)
if db.has_key(r):
rel = db[r]
index = rel.choose_index(rknown)
if index is not None:
has_index[r] = r # has an index!
if pinned: pinned = pinned & needed
if has_index: has_index = has_index & needed
related = kjGraph()
for ( (r1, a1), (r2, a2) ) in eqs.items():
related[r1]=r2 # related if equated to other
related[r2]=r1 # redundant if closed.
if related: related = needed * related * needed
chosen = kjSet()
pr = kjSet(related) & pinned
# choose first victim
if has_index:
choice = has_index.choose_key()
elif pr:
choice = pr.choose_key()
elif pinned:
choice = pinned.choose_key()
elif related:
choice = related.choose_key()
else:
return allrels[:] # give up!
while pinned or related or has_index:
order.append(choice)
chosen[choice] = 1
if pinned.has_key(choice):
del pinned[choice]
if related.has_key(choice):
del related[choice]
if has_index.has_key(choice):
del has_index[choice]
nexts = related * chosen
if nexts:
# prefer a relation related to chosen
choice = nexts.choose_key()
elif pinned:
# otherwise one that is pinned
choice = pinned.choose_key()
elif related:
# otherwise one that relates to something...
choice = related.choose_key()
others = kjSet(allrels) - chosen
if others: order = order + others.items()
return order
def domain(self):
kjSet = kjbuckets.kjSet
return kjSet(self.eqs) + kjSet(self.assns)
def __repr__(self):
from string import join
result = []
for ( (name, att), value) in self.assns.items():
result.append( "%s.%s=%s" % (name, att, `value`) )
for ( (name, att), (name2, att2) ) in self.eqs.items():
result.append( "%s.%s=%s.%s" % (name, att, name2, att2) )
if self.clean:
if not result: return "TRUE"
else:
result.insert(0, "FALSE")
result.sort()
return join(result, " & ")
def equate(self, equalities):
"""add equalities to self, only if not closed.
equalities should be seq of ( (name, att), (name, att) )
"""
if self.closed: raise ValueError, "cannot add equalities! Closed!"
e = self.eqs
for (a, b) in equalities:
e[a] = b
def close(self):
"""infer equalities, if consistent.
only recompute equality closure if not previously closed.
return None on inconsistency.
"""
neweqs = self.eqs
if not self.closed:
self.eqs = neweqs = (neweqs + ~neweqs).tclosure() # sym, trans closure
self.closed = 1
# add trivial equalities to self
for x in self.assns.keys():
if not neweqs.member(x,x):
neweqs[x] = x
newassns = self.assns.remap(neweqs)
if newassns is not None and self.clean:
self.assns = newassns
#self.clean = 1
return self
else:
self.clean = 0
return None
def share_eqs(self):
"""make clone of self that shares equalities, closure.
note: will share future side effects to eqs too."""
result = BoundTuple()
result.eqs = self.eqs
result.closed = self.closed
return result
def __add__(self, other):
"""combine self with other, return closure."""
result = self.share_eqs()
se = self.eqs
oe = other.eqs
if (se is not oe) and (se != oe):
result.eqs = se + oe
result.closed = 0
ra= result.assns = self.assns + other.assns
result.clean = result.clean and (ra.Clean() is not None)
return result.close()
def __and__(self, other):
"""return closed constraints common to self and other."""
result = BoundTuple()
se = self.eqs
oe = other.eqs
if (se is oe) or (se == oe):
result.eqs = self.eqs
result.closed = self.closed
else:
result.eqs = self.eqs & other.eqs
result.assns = self.assns & other.assns
result.clean = self.clean and other.clean
return result.close()
def __hash__(self):
# note: equalities don't enter into hash computation!
# (some may be spurious)
self.close()
return hash(self.assns)# ^ hash(self.eqs)
def __cmp__(self, other):
test = cmp(self.__class__, other.__class__)
if test: return test
sa = self.assns
oa = other.assns
test = cmp(sa, oa)
if test: return test
kjSet = kjbuckets.kjSet
kjGraph = kjbuckets.kjSet
se = self.eqs
se = kjGraph(se) - kjGraph(kjSet(se))
oe = other.eqs
oe = kjGraph(oe) - kjGraph(kjSet(oe))
return cmp(se, oe)
class BoundExpression(SimpleRecursive):
"""superclass for all bound expressions.
except where overloaded expressions are binary
with self.left and self.right
"""
contains_aggregate = 0 # default
def __init__(self, left, right):
self.left = left
self.right = right
self.contains_aggregate = left.contains_aggregate or right.contains_aggregate
def initargs(self):
return (self.left, self.right)
def uncache(self):
"""prepare for execution, clear cached data."""
self.left.uncache()
self.right.uncache()
# eventually add converters...
def equate(self,other):
"""return predicate equating self and other.
Overload for special cases, please!"""
return NontrivialEqPred(self, other)
def attribute(self):
return (None, `self`)
def le(self, other):
"""predicate self<=other"""
return LessEqPred(self, other)
# these should be overridden for 2 const case someday...
def lt(self, other):
"""predicate self<other"""
return LessPred(self, other)
def __coerce__(self, other):
return (self, other)
def __add__(self, other):
return BoundAddition(self, other)
def __sub__(self, other):
return BoundSubtraction(self, other)
def __mul__(self, other):
return BoundMultiplication(self, other)
def __neg__(self):
return BoundMinus(self)
def __div__(self, other):
return BoundDivision(self, other)
def relbind(self, dict, db):
Class = self.__class__
return Class(self.left.relbind(dict, db), self.right.relbind(dict, db))
def __repr__(self):
return "(%s)%s(%s)" % (self.left, self.op, self.right)
def domain(self):
return self.left.domain() + self.right.domain()
# always overload value
class BoundMinus(BoundExpression, SimpleRecursive):
def __init__(self, thing):
self.thing = thing
self.contains_aggregate = thing.contains_aggregate
def initargs(self):
return (self.thing,)
def __repr__(self):
return "-(%s)" % (self.thing,)
def value(self, contexts):
from types import IntType
tt = type
result = self.thing.value(contexts)
for i in xrange(len(contexts)):
if tt(contexts[i]) is not IntType:
result[i] = -result[i]
return result
def relbind(self, dict, db):
Class = self.__class__
return Class(self.thing.relbind(dict,db))
def uncache(self):
self.thing.uncache()
def domain(self):
return self.thing.domain()
### stuff for aggregation
class Average(BoundMinus):
contains_aggregate = 1
def __init__(self, expr, distinct=0):
self.distinct = distinct
if expr.contains_aggregate:
raise ValueError, `expr`+": aggregate in aggregate "+self.name
self.thing = expr
name = "Average"
def __repr__(self):
distinct = ""
if self.distinct:
distinct = "distinct "
return "%s(%s%s)" % (self.name, distinct, self.thing)
def relbind(self, dict, db):
Class = self.__class__
return Class(self.thing.relbind(dict,db), self.distinct)
def value(self, contexts):
if not contexts: return [] # ???
test = contexts[0]
if not test.has_key(None):
return [self.all_value(contexts)]
else:
return self.agg_value(contexts)
def dvalues(self, values):
d = {}
for i in xrange(len(values)):
d[values[i]] = 1
return d.keys()
def all_value(self, contexts):
thing = self.thing
values = self.clean(thing.value(contexts), contexts)
if self.distinct:
values = self.dvalues(values)
return self.op(values)
def clean(self, values, contexts):
D = {}
from types import IntType
tt = type
for i in xrange(len(contexts)):
if tt(contexts[i]) is not IntType:
D[i] = values[i]
return D.values()
def agg_value(self, contexts):
from types import IntType
tt = type
result = list(contexts)
for i in xrange(len(contexts)):
context = contexts[i]
if tt(context) is not IntType:
result[i] = self.all_value( context[None] )
return result
def op(self, values):
sum = 0
for x in values:
sum = sum+x
return sum/(len(values)*1.0)
class Sum(Average):
name = "Sum"
def op(self, values):
if not values: return 0
sum = values[0]
for x in values[1:]:
sum = sum+x
return sum
class Median(Average):
name = "Median"
def op(self, values):
if not values:
raise ValueError, "Median of empty set"
values = list(values)
values.sort()
lvals = len(values)
return values[lvals/2]
class Maximum(Average):
name = "Maximum"
def op(self, values):
return max(values)
class Minimum(Average):
name = "Minimum"
def op(self, values):
return min(values)
class Count(Average):
name = "Count"
distinct = 0
def __init__(self, thing, distinct = 0):
if thing=="*":
self.thing = "*"
else:
Average.__init__(self, thing, distinct)
def domain(self):
thing = self.thing
if thing=="*":
return kjbuckets.kjSet()
return thing.domain()
def all_value(self, contexts):
thing = self.thing
if thing=="*" or not self.distinct:
test = self.clean(contexts, contexts)
#print "count len", len(test), contexts[0]
return len(test)
return Average.all_value(self, contexts)
def op(self, values):
return len(values)
def relbind(self, dict, db):
if self.thing=="*":
return self
return Average.relbind(self, dict, db)
def uncache(self):
if self.thing!="*": self.thing.uncache()
def aggregate(assignments, exprlist):
"""aggregates are a assignments with special
attribute None > list of subtuple"""
lexprs = len(exprlist)
if lexprs<1:
raise ValueError, "aggregate on no expressions?"
lassns = len(assignments)
pairs = list(exprlist)
for i in xrange(lexprs):
expr = exprlist[i]
attributes = [expr.attribute()]*lassns
values = expr.value(assignments)
pairs[i] = map(None, attributes, values)
#for x in pairs:
#print "pairs", x
if lexprs>1:
newassnpairs = apply(map, (None,)+tuple(pairs))
else:
newassnpairs = pairs[0]
#for x in newassnpairs:
#print "newassnpairs", x
xassns = range(lassns)
dict = {}
test = dict.has_key
for i in xrange(lassns):
thesepairs = newassnpairs[i]
thissubassn = assignments[i]
if test(thesepairs):
dict[thesepairs].append(thissubassn)
else:
dict[thesepairs] = [thissubassn]
items = dict.items()
result = list(items)
kjDict = kjbuckets.kjDict
if lexprs>1:
for i in xrange(len(items)):
(pairs, subassns) = items[i]
#print "pairs", pairs
#print "subassns", subassns
D = kjDict(pairs)
D[None] = subassns
result[i] = D
else:
for i in xrange(len(items)):
(pair, subassns) = items[i]
#print "pair", pair
#print "subassns", subassns
result[i] = kjDict( [pair, (None, subassns)] )
return result
### stuff for order_by
class DescExpr(BoundMinus):
"""special wrapper used only for order by descending
for things with no -thing operation (eg, strings)"""
def __init__(self, thing):
self.thing = thing
self.contains_aggregate = thing.contains_aggregate
def value(self, contexts):
from types import IntType, StringType
tt = type
result = self.thing.value(contexts)
allwrap = None
allnowrap = None
for i in xrange(len(contexts)):
if tt(contexts[i]) is not IntType:
resulti = result[i]
# currently assume only value needing wrapping is string
if tt(resulti) is StringType:
if allnowrap is not None:
raise ValueError, "(%s, %s) cannot order desc" % (allnowrap, resulti)
allwrap = resulti
result[i] = descOb(resulti)
else:
if allwrap is not None:
raise ValueError, "(%s, %s) cannot order desc" % (allwrap, resulti)
allnowrap = resulti
result[i] = -resulti
return result
def __repr__(self):
return "DescExpr(%s)" % (self.thing,)
def orderbind(self, order):
"""order is list of (att, expr)."""
Class = self.__class__
return Class(self.thing.orderbind(order))
class SimpleColumn(SimpleRecursive):
"""a simple column name for application to a list of tuples"""
contains_aggregate = 0
def __init__(self, name):
self.name = name
def relbind(self, dict, db):
# already bound!
return self
def orderbind(self, whatever):
# already bound!
return self
def initargs(self):
return (self.name,)
def value(self, simpletuples):
from types import IntType
tt = type
name = self.name
result = list(simpletuples)
for i in xrange(len(result)):
ri = result[i]
if tt(ri) is not IntType:
result[i] = ri[name]
else:
result[i] = None # ???
return result
def __repr__(self):
return "<SimpleColumn %s>" % (self.name,)
class NumberedColumn(BoundMinus):
"""order by column number"""
contains_aggregate = 0
def __init__(self, num):
self.thing = num
def __repr__(self):
return "<NumberedColumn %s>" % (self.thing,)
def relbind(self, dict, db):
from types import IntType
if type(self.thing)!=IntType:
raise ValueError, `self.thing`+": not a numbered column"
return self
def orderbind(self, order):
return SimpleColumn( order[self.thing-1][0] )
class OrderExpr(BoundMinus):
"""order by expression."""
def orderbind(self, order):
expratt = self.thing.attribute()
for (att, exp) in order:
if exp.attribute()==expratt:
return SimpleColumn(att)
else:
raise NameError, `self`+": invalid ordering specification"
def __repr__(self):
return "<order expression %s>" % (self.thing,)
class descOb:
"""special wrapper only used for sorting in descending order
should only be compared with other descOb instances.
should only wrap items that cannot be easily "order inverted",
(eg, strings).
"""
def __init__(self, ob):
self.ob = ob
def __cmp__(self, other):
#test = cmp(self.__class__, other.__class__)
#if test: return test
return -cmp(self.ob,other.ob)
def __coerce__(self, other):
return (self, other)
def __hash__(self):
return hash(self.ob)
def __repr__(self):
return "descOb(%s)" % (self.ob,)
def PositionedSort(num, ord):
nc = NumberedColumn(num)
if ord=="DESC":
return DescExpr(nc)
return nc
def NamedSort(name, ord):
oe = OrderExpr(name)
if ord=="DESC":
return DescExpr(oe)
return oe
def relbind_sequence(order_list, dict, db):
result = list(order_list)
for i in xrange(len(order_list)):
result[i] = result[i].relbind(dict,db)
return result
def orderbind_sequence(order_list, order):
result = list(order_list)
for i in xrange(len(order_list)):
result[i] = result[i].orderbind(order)
return result
def order_tuples(order_list, tuples):
lorder_list = len(order_list)
ltuples = len(tuples)
if lorder_list<1:
raise ValueError, "order on empty list?"
order_map = list(order_list)
for i in xrange(lorder_list):
order_map[i] = order_list[i].value(tuples)
if len(order_map)>1:
order_vector = apply(map, (None,)+tuple(order_map) )
else:
order_vector = order_map[0]
#G = kjbuckets.kjGraph()
pairs = map(None, range(ltuples), tuples)
ppairs = map(None, order_vector, pairs)
G = kjbuckets.kjGraph(ppairs)
#for i in xrange(ltuples):
# G[ order_vector[i] ] = (i, tuples[i])
Gkeys = G.keys()
Gkeys.sort()
result = list(tuples)
index = 0
for x in Gkeys:
#print x
for (i, y) in G.neighbors(x):
#print " ", y
result[index]=y
index = index+1
if index!=ltuples:
raise ValueError, \
"TUPLE LOST IN ORDERING COMPUTATION! (%s,%s)" % (ltuples, index)
return result
class BoundAddition(BoundExpression):
"""promised addition."""
op = "+"
def value(self, contexts):
from types import IntType
tt = type
lvs = self.left.value(contexts)
rvs = self.right.value(contexts)
for i in xrange(len(contexts)):
if tt(contexts[i]) is not IntType:
lvs[i] = lvs[i] + rvs[i]
return lvs
class BoundSubtraction(BoundExpression):
"""promised subtraction."""
op = "-"
def value(self, contexts):
from types import IntType
tt = type
lvs = self.left.value(contexts)
rvs = self.right.value(contexts)
for i in xrange(len(contexts)):
if tt(contexts[i]) is not IntType:
lvs[i] = lvs[i] - rvs[i]
return lvs
class BoundMultiplication(BoundExpression):
"""promised multiplication."""
op = "*"
def value(self, contexts):
from types import IntType
tt = type
lvs = self.left.value(contexts)
rvs = self.right.value(contexts)
#print lvs
for i in xrange(len(contexts)):
if tt(contexts[i]) is not IntType:
lvs[i] = lvs[i] * rvs[i]
return lvs
class BoundDivision(BoundExpression):
"""promised division."""
op = "/"
def value(self, contexts):
from types import IntType
tt = type
lvs = self.left.value(contexts)
rvs = self.right.value(contexts)
for i in xrange(len(contexts)):
if tt(contexts[i]) is not IntType:
lvs[i] = lvs[i] / rvs[i]
return lvs
class BoundAttribute(BoundExpression):
"""bound attribute: initialize with relname=None if
implicit."""
contains_aggregate = 0
def __init__(self, rel, name):
self.rel = rel
self.name = name
def initargs(self):
return (self.rel, self.name)
def relbind(self, dict, db):
if self.rel is not None: return self
name = self.name
try:
rel = dict[name]
except KeyError:
raise NameError, `name` + ": unknown or ambiguous"
return BoundAttribute(rel, name)
def uncache(self):
pass
def __repr__(self):
return "%s.%s" % (self.rel, self.name)
def attribute(self):
"""return (rename, attribute) for self."""
return (self.rel, self.name)
def domain(self):
return kjbuckets.kjSet([ (self.rel, self.name) ])
def value(self, contexts):
"""return value of self in context (bound tuple)."""
#print "value of ", self, "in", contexts
from types import IntType
tt = type
result = list(contexts)
ra = (self.rel, self.name)
for i in xrange(len(result)):
if tt(result[i]) is not IntType:
result[i] = contexts[i][ra]
return result
def equate(self, other):
oc = other.__class__
if oc==BoundAttribute:
result = BoundTuple()
result.equate([(self.attribute(), other.attribute())])
return BTPredicate(result)
elif oc==Constant:
result = BoundTuple()
result.assns[ self.attribute() ] = other.value([1])[0]
return BTPredicate(result)
else:
return NontrivialEqPred(self, other)
class Constant(BoundExpression):
contains_aggregate = 0
def __init__(self, value):
self.value0 = value
def __hash__(self):
return hash(self.value0)
def initargs(self):
return (self.value0,)
def domain(self):
return kjbuckets.kjSet()
def __add__(self, other):
if other.__class__==Constant:
return Constant(self.value0 + other.value0)
return BoundAddition(self, other)
def __sub__(self, other):
if other.__class__==Constant:
return Constant(self.value0 - other.value0)
return BoundSubtraction(self, other)
def __mul__(self, other):
if other.__class__==Constant:
return Constant(self.value0 * other.value0)
return BoundMultiplication(self, other)
def __neg__(self):
return Constant(-self.value0)
def __div__(self, other):
if other.__class__==Constant:
return Constant(self.value0 / other.value0)
return BoundDivision(self, other)
def relbind(self, dict, db):
return self
def uncache(self):
pass
def value(self, contexts):
"""return the constant value associated with self."""
return [self.value0] * len(contexts)
def equate(self,other):
if other.__class__==Constant:
if other.value0 == self.value0:
return BTPredicate() #true
else:
return ~BTPredicate() #false
else:
return other.equate(self)
def attribute(self):
"""invent a pair to identify a constant"""
return ('unbound', `self`)
def __repr__(self):
return "<const %s at %s>" % (`self.value0`, id(self))
class TupleCollector:
"""Translate a sequence of assignments to simple tuples.
(for implementing the select list of a SELECT).
"""
contains_aggregate = 0
contains_nonaggregate = 0
def __init__(self):
self.final = None
self.order = []
self.attorder = []
self.exporder = []
def initargs(self):
return ()
def marshaldata(self):
exps = map(serialize, self.exporder)
return (self.attorder, exps,
self.contains_aggregate, self.contains_nonaggregate)
def demarshal(self, args):
(self.attorder, exps, self.contains_aggregate,
self.contains_nonaggregate) = args
exporder = self.exporder = map(deserialize, exps)
self.order = map(None, self.attorder, exporder)
def uncache(self):
for exp in self.exporder:
exp.uncache()
def domain(self):
all=[]
for e in self.exporder:
all = all+e.domain().items()
return kjbuckets.kjSet(all)
def __repr__(self):
l = []
for (att, exp) in self.order:
l.append( "%s as %s" % (exp, att) )
from string import join
return join(l, ", ")
def addbinding(self, attribute, expression):
"""bind att>expression."""
self.order.append((attribute, expression) )
self.attorder.append(attribute )
self.exporder.append(expression)
if expression.contains_aggregate:
self.contains_aggregate = 1
else:
self.contains_nonaggregate = 1
def map(self, assnlist):
"""remap btlist by self. return (tuplelist, attorder)"""
# DON'T eliminate nulls
from types import IntType
tt = type
values = []
for exp in self.exporder:
values.append(exp.value(assnlist))
if len(values)>1:
valtups = apply(map, (None,) + tuple(values) )
else:
valtups = values[0]
kjUndump = kjbuckets.kjUndump
undumper = tuple(self.attorder)
for i in xrange(len(valtups)):
test = assnlist[i]
if tt(test) is IntType or test is None:
valtups[i] = 0 # null/false
else:
tup = valtups[i]
valtups[i] = kjUndump(undumper, tup)
return (valtups, self.attorder)
def relbind(self, dict, db):
"""disambiguate missing rel names if possible.
also choose output names appropriately."""
# CURRENTLY THIS IS AN "IN PLACE" OPERATION
order = self.order
attorder = self.attorder
exporder = self.exporder
known = {}
for i in xrange(len(order)):
(att, exp) = order[i]
#print exp
exp = exp.relbind(dict, db)
if att is None:
# choose a name for this column
#print exp
(rname, aname) = exp.attribute()
if known.has_key(aname):
both = rname+"."+aname
att = both
count = 0
while known.has_key(att):
# crank away!
count = count+1
att = both+"."+`count`
else:
att = aname
else:
if known.has_key(att):
raise NameError, `att`+" ambiguous in select list"
order[i] = (att, exp)
exporder[i] = exp
attorder[i] = att
known[att] = att
return self
class BTPredicate(SimpleRecursive):
"""superclass for bound tuple predicates.
Eventually should be modified to use "compile" for speed
to generate an "inlined" evaluation function.
self(bt) returns bt with additional equality constraints
(possible) or None if predicate fails."""
false = 0
constraints = None
contains_aggregate = 0
def __init__(self, constraints=None):
"""default interpretation: True."""
if constraints is not None:
self.constraints = constraints.close()
def initargs(self):
return (self.constraints,)
def relbind(self, dict, db):
c = self.constraints
if c is None: return self
return BTPredicate( self.constraints.relbind(dict, db) )
def uncache(self):
pass
#def evaluate(self, db, relnames):
#"""evaluate the predicate over database bindings."""
# pretty simple strategy right now...
### need to do something about all/distinct...
#c = self.constraints
#if c is None:
# c = BoundTuple()
#order = c.relorder(db, relnames)
#if not order:
# raise ValueError, "attempt to evaluate over no relations: "+`relnames`
#result = [c]
#for r in order:
# result = hashjoin(result, r, db[r])
#if self.__class__==BTPredicate:
# # if it's just equality conjunction, we're done
# return result
#else:
# # apply additional constraints
# return self(result)
def domain(self):
c = self.constraints
kjSet = kjbuckets.kjSet
if c is None: return kjSet()
return c.domain()
def __repr__(self):
if self.false: return "FALSE"
c = self.constraints
if c is None: c = "true"
return "[pred](%s)" % c
def detrivialize(self):
"""hook added to allow elimination of trivialities
return None if completely true, or simpler form
or self, if no simplification is possible."""
if self.false: return self
if not self.constraints: return None
return self
def negated_constraints(self):
"""equality constraints always false of satisfactory tuple."""
return BoundTuple() # there aren't any
def __call__(self, assignments, toplevel=0):
"""apply self to sequence of assignments
return copy of asssignments with false results
replaced by 0! Input may have 0's!"""
# optimization
# if toplevel, the btpred has been evaluated during join.
if toplevel:
return list(assignments)
from types import IntType
tt = type
lbt = len(assignments)
if self.false: return [0] * lbt
c = self.constraints
if c is None or not c:
result = assignments[:] # no constraints
else:
assns = c.assns
eqs = c.eqs
eqsinteresting = 0
for (a,b) in eqs.items():
if a!=b:
eqsinteresting = 1
result = assignments[:]
for i in xrange(lbt):
this = assignments[i]
#print "comparing", self, "to", this
if type(this) is IntType: continue
this = (this + assns).Clean()
if this is None:
result[i] = 0
elif eqsinteresting:
this = this.remap(eqs)
if this is None:
result[i] = 0
return result
def __and__(self, other):
"""NOTE: all subclasses must define an __and__!!!"""
#print "BTPredicate.__and__", (self, other)
if self.__class__==BTPredicate and other.__class__==BTPredicate:
c = self.constraints
o = other.constraints
if c is None: return other
if o is None: return self
if self.false: return self
if other.false: return other
# optimization for simple constraints
all = (c+o)
result = BTPredicate( all ) # all constraints
if all is None: result.false = 1
else:
result = other & self
return result
def __or__(self, other):
if self.__class__==BTPredicate and other.__class__==BTPredicate:
c = self.constraints
o = other.constraints
if c is None: return self # true dominates
if o is None: return other
if other.false: return self
if self.false: return other
if self == other: return self
result = BTor_pred([self, other])
return result
def __invert__(self):
if self.false:
return BTPredicate()
if not self.constraints:
result = BTPredicate()
result.false = 1
return result
return BTnot_pred(self)
def __cmp__(self, other):
test = cmp(other.__class__, self.__class__)
if test: return test
if self.false and other.false: return 0
return cmp(self.constraints, other.constraints)
def __hash__(self):
if self.false: return 11111
return hash(self.constraints)
class BTor_pred(BTPredicate):
def __init__(self, members, *othermembers):
# replace any OR in members with its members
#print "BTor_pred", members
members = list(members) + list(othermembers)
for m in members[:]:
if m.__class__==BTor_pred:
members.remove(m)
members = members + m.members
#print "before", members
members = self.members = kjbuckets.kjSet(members).items()
#print members
for m in members[:]:
if m.false: members.remove(m)
self.constraints = None # common constraints
for m in members:
if m.contains_aggregate:
self.contains_aggregate = 1
if members:
# common constraints are those in all members
constraints = members[0].constraints
for m in members[1:]:
mc = m.constraints
if not constraints or not mc:
constraints = None
break
constraints = constraints & mc
self.constraints = constraints
#print members
def initargs(self):
return ((),) + tuple(self.members)
def relbind(self, dict, db):
ms = []
for m in self.members:
ms.append( m.relbind(dict, db) )
return BTor_pred(ms)
def uncache(self):
for m in self.members:
m.uncache()
def domain(self):
all = BTPredicate.domain(self).items()
for x in self.members:
all = all + x.domain().items()
return kjbuckets.kjSet(all)
def __repr__(self):
c = self.constraints
m = self.members
mr = map(repr, m)
from string import join
mr.sort()
mr = join(mr, " | ")
if not mr: mr = "FALSE_OR"
if c:
mr = "[disj](%s and %s)" % (c, mr)
return mr
def detrivialize(self):
"""hook added to allow elimination of trivialities
return None if completely true, or simpler form
or self, if no simplification is possible."""
ms = self.members
for i in xrange(len(ms)):
ms[i] = ms[i].detrivialize()
# now suck out subordinate ors
someor = None
for m in ms:
if m.__class__== BTor_pred:
someor = m
ms.remove(m)
break
if someor is not None:
result = someor
for m in ms:
result = result + m
return result.detrivialize()
allfalse = 1
for m in ms:
if m is None: allfalse=0; break # true member
allfalse = allfalse & m.false
if allfalse: return ~BTPredicate() # boundary case
ms[:] = filter(None, ms)
if not ms: return None # all true.
ms[:] = kjbuckets.kjSet(ms).items()
if len(ms)==1: return ms[0] # or of 1
return self
def __call__(self, boundtuples, toplevel=0):
# apply common constraints first
lbt = len(boundtuples)
# boundary case for or is false
members = self.members
if not members:
return [0] * lbt
current = BTPredicate.__call__(self, boundtuples, toplevel)
# now apply first alternative
alt1 = members[0](current)
# determine questionables
questionables = current[:]
rng = xrange(len(current))
from types import IntType
tt = type
for i in rng:
if tt(alt1[i]) is not IntType:
questionables[i]=0
# now test other alternatives
#print "alt1", members[0]
#for x in alt1:
#print x
for m in self.members[1:]:
#print "questionables", m
#for x in questionables:
#print x
passm = m(questionables)
for i in rng:
test = passm[i]
if tt(test) is not IntType:
questionables[i] = 0
alt1[i] = test
return alt1
def negated_constraints(self):
"""the negated constraints of an OR are
the negated constraints of *all* members"""
ms = self.members
result = ms.negated_constraints()
for m in ms[1:]:
if not result: return result
mc = m.negated_constraints()
if not mc: return mc
result = result & mc
return result
def __and__(self, other):
"""push "and" down"""
newmembers = self.members[:]
for i in xrange(len(newmembers)):
newmembers[i] = newmembers[i] & other
return BTor_pred(newmembers)
def __or__(self, other):
"""collapse two ors, otherwise just add new member"""
if self.__class__==BTor_pred and other.__class__==BTor_pred:
return BTor_pred(self.members+other.members)
return BTor_pred(self.members + [other])
def __invert__(self):
"""translate to and-not"""
ms = self.members
if not ms: return BTPredicate() # boundary case
result = ~ms[0]
for m in ms[1:]:
result = result & ~m
return result
def __cmp__(self, other):
test = cmp(self.__class__, other.__class__)
if test: return test
kjSet = kjbuckets.kjSet
test = cmp(kjSet(self.members), kjSet(other.members))
if test: return test
return BTPredicate.__cmp__(self, other)
def __hash__(self):
return hash(kjbuckets.kjSet(self.members))
class BTnot_pred(BTPredicate):
def __init__(self, thing):
self.negated = thing
self.contains_aggregate = thing.contains_aggregate
self.constraints = thing.negated_constraints()
def initargs(self):
return (self.negated,)
def relbind(self, dict, db):
return BTnot_pred( self.negated.relbind(dict, db) )
def uncache(self):
self.negated.uncache()
def domain(self):
result = BTPredicate.domain(self) + self.negated.domain()
#print "neg domain is", `self`, result
return result
def __repr__(self):
c = self.constraints
n = self.negated
r = "(NOT %s)" % n
if c: r = "[neg](%s & %s)" % (c, r)
return r
def detrivialize(self):
"""hook added to allow elimination of trivialities
return None if completely true, or simpler form
or self, if no simplification is possible."""
# first, fix or/and/not precedence
thing = self.negated
if thing.__class__ == BTnot_pred:
return thing.negated.detrivialize()
if thing.__class__ == BTor_pred:
# translate to and_not
members = thing.members[:]
for i in xrange(len(members)):
members[i] = ~members[i]
result = BTand_pred(members)
return result.detrivialize()
if thing.__class__ == BTand_pred:
# translate to or_not
members = thing.members[:]
c = thing.constraints # precondition
if c is not None:
members.append(BTPredicate(c))
for i in xrange(len(members)):
members[i] = ~members[i]
result = BTor_pred(members)
return result.detrivialize()
self.negated = thing = self.negated.detrivialize()
if thing is None: return ~BTPredicate() # uniquely false
if thing.false: return None # uniquely true
return self
def __call__(self, boundtuples, toplevel=0):
from types import IntType
tt = type
current = BTPredicate.__call__(self, boundtuples, toplevel)
omit = self.negated(current)
for i in xrange(len(current)):
if tt(omit[i]) is not IntType:
current[i]=0
return current
def negated_constraints(self):
"""the negated constraints of a NOT are the
negated constraints of the thing negated."""
return self.negated.constraints
def __and__(self, other):
"""do the obvious thing."""
return BTand_pred([self, other])
def __or__(self, other):
"""do the obvious thing"""
return BTor_pred([self, other])
def __invert__(self):
return self.negated
def __cmp__(self, other):
test = cmp(self.__class__, other.__class__)
if test: return test
test = cmp(self.negated,other.negated)
if test: return test
return BTPredicate.__cmp__(self,other)
def __hash__(self):
return hash(self.negated)^787876^hash(self.constraints)
class BTand_pred(BTPredicate):
def __init__(self, members, precondition=None, *othermembers):
#print "BTand_pred", (members, precondition)
members = list(members) + list(othermembers)
members = self.members = kjbuckets.kjSet(members).items()
self.constraints = precondition # common constraints
if members:
# common constraints are those in any member
if precondition is not None:
constraints = precondition
else:
constraints = BoundTuple()
for i in xrange(len(members)):
m = members[i]
mc = m.constraints
if mc:
#print "constraints", constraints
constraints = constraints + mc
if constraints is None: break
if m.__class__==BTPredicate:
members[i] = None # subsumed above
members = self.members = filter(None, members)
for m in members:
if m.contains_aggregate:
self.contains_aggregate=1
### consider propagating constraints down?
self.constraints = constraints
if constraints is None: self.false = 1
def initargs(self):
#print "self.members", self.members
#print "self.constraints", self.constraints
#return (list(self.members), self.constraints)
return ((), self.constraints) + tuple(self.members)
def relbind(self, dict, db):
ms = []
for m in self.members:
ms.append( m.relbind(dict, db) )
c = self.constraints.relbind(dict, db)
return BTand_pred(ms, c)
def uncache(self):
for m in self.members:
m.uncache()
def domain(self):
all = BTPredicate.domain(self).items()
for x in self.members:
all = all + x.domain().items()
return kjbuckets.kjSet(all)
def __repr__(self):
m = self.members
c = self.constraints
r = map(repr, m)
if self.false: r.insert(0, "FALSE")
from string import join
r = join(r, " AND ")
r = "(%s)" % r
if c: r = "[conj](%s and %s)" % (c, r)
return r
def detrivialize(self):
"""hook added to allow elimination of trivialities
return None if completely true, or simpler form
or self, if no simplification is possible."""
# first apply demorgan's law to push ands down
# (exponential in worst case).
#print "detrivialize"
#print self
ms = self.members
some_or = None
c = self.constraints
for m in ms:
if m.__class__==BTor_pred:
some_or = m
ms.remove(m)
break
if some_or is not None:
result = some_or
if c is not None:
some_or = some_or & BTPredicate(c)
for m in ms:
result = result & m # preserves or/and precedence
if result.__class__!=BTor_pred:
raise "what the?"
result = result.detrivialize()
#print "or detected, returning"
#print result
return result
for i in xrange(len(ms)):
ms[i] = ms[i].detrivialize()
ms[:] = filter(None, ms)
if not ms:
#print "returning boundary case of condition"
if c is None:
return None
else:
return BTPredicate(c).detrivialize()
ms[:] = kjbuckets.kjSet(ms).items()
if len(ms)==1 and c is None:
#print "and of 1, returning"
#print ms[0]
return ms[0] # and of 1
return self
def __call__(self, boundtuples, toplevel=0):
# apply common constraints first
current = BTPredicate.__call__(self, boundtuples, toplevel)
for m in self.members:
current = m(current)
return current
def negated_constraints(self):
"""the negated constraints of an AND are
the negated constraints of *any* member"""
ms = self.members
result = BoundTuple()
for m in ms:
mc = m.negated_constraints()
if mc: result = result + mc
return result
def __and__(self, other):
"""push "and" down if other is an or"""
if other.__class__==BTor_pred:
return other & self
c = self.constraints
# merge in other and
if other.__class__==BTand_pred:
allmem = self.members+other.members
oc = other.constraints
if c is None:
c = oc
elif oc is not None:
c = c+oc
return BTand_pred(allmem, c)
return BTand_pred(self.members + [other], c)
def __or__(self, other):
"""do the obvious thing."""
return BTor_pred([self, other])
def __invert__(self):
"""translate to or-not"""
ms = self.members
if not ms: return ~BTPredicate() # boundary case
result = ~ms[0]
for m in ms[1:]:
result = result | ~m
return result
def __cmp__(self, other):
test = cmp(self.__class__, other.__class__)
if test: return test
kjSet = kjbuckets.kjSet
test = cmp(kjSet(self.members), kjSet(other.members))
if test: return test
return BTPredicate.__cmp__(self, other)
def __hash__(self):
return hash(kjbuckets.kjSet(self.members))
class NontrivialEqPred(BTPredicate):
"""equation of nontrivial expressions."""
def __init__(self, left, right):
#print "making pred", self.__class__, left, right
# maybe should used reflexivity...
self.left = left
self.right = right
self.contains_aggregate = left.contains_aggregate or right.contains_aggregate
def initargs(self):
return (self.left, self.right)
def __cmp__(self, other):
test = cmp(self.__class__, other.__class__)
if test: return test
test = cmp(self.right, other.right)
if test: return test
return cmp(other.left, other.left)
def hash(self, other):
return hash(self.left) ^ hash(self.right)
def relbind(self, dict, db):
Class = self.__class__
return Class(self.left.relbind(dict,db), self.right.relbind(dict,db) )
def uncache(self):
self.left.uncache()
self.right.uncache()
def domain(self):
return self.left.domain() + self.right.domain()
op = "=="
def __repr__(self):
return "(%s)%s(%s)" % (self.left, self.op, self.right)
def detrivialize(self):
return self
def __call__(self, assigns, toplevel=0):
from types import IntType
tt = type
lv = self.left.value(assigns)
rv = self.right.value(assigns)
result = assigns[:]
for i in xrange(len(assigns)):
t = assigns[i]
if type(t) is not IntType and lv[i]!=rv[i]:
result[i] = 0
return result
def negated_constraints(self):
return None
def __and__(self, other):
return BTand_pred( [self, other] )
def __or__(self, other):
return BTor_pred( [self, other] )
def __invert__(self):
return BTnot_pred(self)
class BetweenPredicate(NontrivialEqPred):
"""e1 BETWEEN e2 AND e3"""
def __init__(self, middle, lower, upper):
self.middle = middle
self.lower = lower
self.upper = upper
def initargs(self):
return (self.middle, self.lower, self.upper)
def domain(self):
return (
self.middle.domain() + self.lower.domain() + self.upper.domain())
def relbind(self, dict, db):
self.middle = self.middle.relbind(dict, db)
self.lower = self.lower.relbind(dict, db)
self.upper = self.upper.relbind(dict, db)
return self
def uncache(self):
self.middle.uncache()
self.upper.uncache()
self.lower.uncache()
def __repr__(self):
return "(%s BETWEEN %s AND %s)" % (
self.middle, self.lower, self.upper)
def __hash__(self):
return hash(self.middle)^~hash(self.lower)^hash(self.upper)^55
def __cmp__(self, other):
test = cmp(self.__class__, other.__class__)
if test: return test
test = cmp(self.lower, other.lower)
if test: return test
test = cmp(self.middle, other.middle)
if test: return test
return cmp(self.upper, other.upper)
def __call__(self, assigns, toplevel=0):
from types import IntType
tt = type
lowv = self.lower.value(assigns)
upv = self.upper.value(assigns)
midv = self.middle.value(assigns)
result = assigns[:]
for i in xrange(len(assigns)):
t = assigns[i]
if tt(t) is not IntType:
midvi = midv[i]
if lowv[i]>midvi or upv[i]<midvi:
result[i] = 0
return result
class ExistsPred(NontrivialEqPred):
"""EXISTS subquery."""
contains_aggregate = 0
def __init__(self, subq):
self.cached_result = None
self.cachable = None
self.subq = subq
def initargs(self):
return (self.subq,)
def domain(self):
result = self.subq.unbound()
# if there are no outer bindings, evaluate ONCE!
if not result:
self.cachable = 1
return result
def relbind(self, dict, db):
self.subq = self.subq.relbind(db, dict)
return self
def uncache(self):
self.cached_result = None
self.subq.uncache()
def __repr__(self):
return "\nEXISTS\n%s\n" % (self.subq,)
def __call__(self, assigns, toplevel=0):
### should optimize!!!
#print "exists"
#print self.subq
from types import IntType
tt = type
eval = self.subq.eval
result = assigns[:]
# shortcut: if cachable, eval only once and cache
if self.cachable:
test = self.cached_result
if test is None:
self.cached_result = test = eval()
#print "exists cached", self.cached_result
if test:
return result
else:
return [0] * len(result)
kjDict = kjbuckets.kjDict
for i in xrange(len(assigns)):
#print "exists uncached"
assignsi = assigns[i]
if tt(assignsi) is IntType: continue
testbtup = BoundTuple()
testbtup.assns = kjDict(assignsi)
test = eval(outerboundtuple=testbtup).rows()
#for x in test:
#print "exists for", assignsi
#print x
#break
if not test:
result[i] = 0
return result
def __hash__(self):
return hash(self.subq)^3333
def __cmp__(self, other):
test = cmp(self.__class__, other.__class__)
if test: return test
return cmp(self.subq, other.subq)
class QuantEQ(NontrivialEqPred):
"""Quantified equal any predicate"""
def __init__(self, expr, subq):
self.expr = expr
self.subq = subq
self.cachable = 0
self.cached_column = None
self.att = None
def initargs(self):
return (self.expr, self.subq)
def uncache(self):
self.cached_column = None
def domain(self):
first = self.subq.unbound()
if not first:
self.cachable = 1
more = self.expr.domain()
return first + more
def relbind(self, dict, db):
subq = self.subq = self.subq.relbind(db, dict)
self.expr = self.expr.relbind(dict, db)
# test that subquery is single column and determine att
sl = subq.select_list
atts = sl.attorder
if len(atts)<>1:
raise ValueError, \
"Quantified predicate requires unit select list: %s" % atts
self.att = atts[0]
return self
fmt = "(%s %s ANY %s)"
op = "="
def __repr__(self):
return self.fmt % (self.expr, self.op, self.subq)
def __call__(self, assigns, toplevel=0):
cached_column = self.cached_column
cachable = self.cachable
expr = self.expr
subq = self.subq
att = self.att
if cachable:
if cached_column is None:
subqr = subq.eval().rows()
cc = self.cached_column = dump_single_column(subqr, att)
#print self, "cached", self.cached_column
exprvals = expr.value(assigns)
kjDict = kjbuckets.kjDict
compare = self.compare
tt = type
from types import IntType
result = assigns[:]
for i in xrange(len(assigns)):
assignsi = assigns[i]
if tt(assignsi) is IntType: continue
thisval = exprvals[i]
testbtup = BoundTuple()
testbtup.assns = kjDict(assignsi)
if not cachable:
subqr = subq.eval(outerboundtuple=testbtup).rows()
cc = dump_single_column(subqr, att)
#print self, "uncached", cc, thisval
if not compare(thisval, cc):
#print "eliminated", assignsi
result[i] = 0
return result
def compare(self, value, column):
return value in column
def __hash__(self):
return hash(self.subq) ^ ~hash(self.expr)
def __cmp__(self, other):
test = cmp(self.__class__, other.__class__)
if test: return test
test = cmp(self.expr, other.expr)
if test: return test
return cmp(self.subq, other.subq)
# "expr IN (subq)" same as "expr = ANY (subq)"
InPredicate = QuantEQ
class InLits(NontrivialEqPred):
"""expr IN literals, support dynamic bindings."""
def __init__(self, expr, lits):
self.expr = expr
self.lits = lits
self.cached_lits = None
def initargs(self):
return (self.expr, self.lits)
def uncache(self):
self.cached_lits = None
def domain(self):
d = []
for l in self.lits:
d0 = l.domain()
if d0:
d = d + d0.items()
d0 = self.expr.domain()
if d:
kjSet = kjbuckets.kjSet
return d0 + kjSet(d)
else:
return d0
def relbind(self, dict, db):
newlits = []
for l in self.lits:
newlits.append(l.relbind(dict, db))
self.lits = newlits
self.expr = self.expr.relbind(dict, db)
return self
fmt = "(%s IN %s)"
def __repr__(self):
return self.fmt % (self.expr, self.lits)
def __call__(self, assigns, toplevel=0):
# LITERALS ARE CONSTANT! NEED ONLY LOOK FOR ONE ASSIGN.
tt = type
from types import IntType
litvals = self.cached_lits
if litvals is None:
assigns0 = []
for asn in assigns:
if tt(asn) is not IntType:
assigns0.append(asn)
break
if not assigns0:
# all false/unknown
return assigns
litvals = []
for lit in self.lits:
value = lit.value(assigns0)
litvals.append(value[0])
self.cached_lits = litvals
expr = self.expr
exprvals = expr.value(assigns)
result = assigns[:]
for i in xrange(len(assigns)):
assignsi = assigns[i]
if tt(assignsi) is IntType: continue
thisval = exprvals[i]
if thisval not in litvals:
#print "eliminated", assignsi
result[i] = 0
return result
def compare(self, value, column):
return value in column
def __hash__(self):
return 10 ^ hash(self.expr)
def __cmp__(self, other):
test = cmp(self.__class__, other.__class__)
if test: return test
test = cmp(self.expr, other.expr)
if test: return test
return cmp(self.lits, other.lits)
class QuantNE(QuantEQ):
"""Quantified not equal any predicate"""
op = "<>"
def compare(self, value, column):
for x in column:
if value!=x: return 1
return 0
### note: faster NOT IN using QuantNE?
class QuantLT(QuantEQ):
"""Quantified less than any predicate"""
op = "<"
def uncache(self):
self.testval = self.cached = self.cached_column = None
def compare(self, value, column):
if self.cachable:
if self.cached:
testval = self.testval
else:
testval = self.testval = max(column)
self.cached = 1
else:
testval = max(column)
return value < testval
class QuantLE(QuantLT):
"""Quantified less equal any predicate"""
op = "<="
def compare(self, value, column):
if self.cachable:
if self.cached:
testval = self.testval
else:
testval = self.testval = max(column)
self.cached = 1
else:
testval = max(column)
return value <= testval
class QuantGE(QuantLT):
"""Quantified greater equal any predicate"""
op = ">="
def compare(self, value, column):
if self.cachable:
if self.cached:
testval = self.testval
else:
testval = self.testval = min(column)
self.cached = 1
else:
testval = min(column)
return value >= testval
class QuantGT(QuantLT):
"""Quantified greater than any predicate"""
op = ">"
def compare(self, value, column):
if self.cachable:
if self.cached:
testval = self.testval
else:
self.testval = testval = min(column)
self.cached = 1
else:
testval = min(column)
return value > testval
def dump_single_column(assigns, att):
"""dump single column assignment"""
result = assigns[:]
for i in xrange(len(result)):
result[i] = result[i][att]
return result
class LessPred(NontrivialEqPred):
op = "<"
def __call__(self, assigns, toplevel=0):
from types import IntType
tt = type
lv = self.left.value(assigns)
rv = self.right.value(assigns)
result = assigns[:]
for i in xrange(len(assigns)):
t = assigns[i]
if tt(t) is not IntType and lv[i]>=rv[i]:
result[i] = 0
return result
def __inv__(self):
return LessEqPred(self.right, self.left)
def __hash__(self):
return hash(self.left)^hash(self.right)
class LessEqPred(LessPred):
op = "<="
def __call__(self, assigns, toplevel=0):
from types import IntType
tt = type
lv = self.left.value(assigns)
rv = self.right.value(assigns)
result = assigns[:]
for i in xrange(len(assigns)):
t = assigns[i]
if tt(t) is not IntType and lv[i]>rv[i]:
result[i] = 0
return result
def __inv__(self):
return LessPred(self.right, self.left)
class SubQueryExpression(BoundMinus, SimpleRecursive):
"""sub query expression (subq), must eval to single column, single value"""
def __init__(self, subq):
self.subq = subq
self.att = self.cachable = self.cached = self.cached_value = None
def initargs(self):
return (self.subq,)
def uncache(self):
self.cached = self.cached_value = None
def domain(self):
result = self.subq.unbound()
if not result:
self.cachable = 1
#print "expr subq domain", result
return result
def relbind(self, dict, db):
subq = self.subq = self.subq.relbind(db, dict)
# test that subquery is single column and determine att
sl = subq.select_list
atts = sl.attorder
if len(atts)<>1:
raise ValueError, \
"Quantified predicate requires unit select list: %s" % atts
self.att = atts[0]
return self
def __repr__(self):
return "(%s)" % self.subq
def value(self, contexts):
subq = self.subq
att = self.att
if self.cachable:
if self.cached:
cached_value = self.cached_value
else:
self.cached = 1
seval = subq.eval().rows()
lse = len(seval)
if lse<>1:
raise ValueError, \
"const subquery expression must return 1 result: got %s" % lse
self.cached_value = cached_value = seval[0][att]
#print "const subq cached", cached_value
return [cached_value] * len(contexts)
from types import IntType
tt = type
result = contexts[:]
kjDict = kjbuckets.kjDict
for i in xrange(len(contexts)):
contextsi = contexts[i]
if tt(contextsi) is not IntType:
testbtup = BoundTuple()
testbtup.assns = kjDict(contextsi)
#print "subq exp", testbtup
seval = subq.eval(outerboundtuple=testbtup).rows()
lse = len(seval)
if lse<>1:
raise ValueError, \
"dynamic subquery expression must return 1 result: got %s" % lse
result[i] = seval[0][att]
#print "nonconst subq uncached", result[i], contextsi
return result
SELECT_TEMPLATE = """\
SELECT %s %s
FROM %s
WHERE %s
GROUP BY %s
HAVING %s %s
ORDER BY %s %s
"""
def dynamic_binding(ndynamic, dynamic):
"""create bindings from dynamic tuple for ndynamic parameters
if a tuple is given create one
if a list is given create many
"""
from types import ListType, TupleType
if not dynamic:
if ndynamic>0:
raise ValueError, `ndynamic`+" dynamic parameters unbound"
return [kjbuckets.kjDict()]
ldyn = len(dynamic)
undumper = map(None, [0]*ndynamic, range(ndynamic))
undumper = tuple(undumper)
tdyn = type(dynamic)
if tdyn is TupleType:
ldyn = len(dynamic)
if len(dynamic)!=ndynamic:
raise ValueError, "%s,%s: wrong number of dynamics" % (ldyn,ndynamic)
dynamic = [dynamic]
elif tdyn is not ListType:
raise TypeError, "dynamic parameters must be list or tuple"
else:
lens = map(len, dynamic)
ndynamic = max(lens)
if ndynamic!=min(lens):
raise ValueError, "dynamic parameters of inconsistent lengths"
undumper = map(None, [0]*ndynamic, range(ndynamic))
undumper = tuple(undumper)
result = list(dynamic)
kjUndump = kjbuckets.kjUndump
for i in xrange(len(dynamic)):
dyn = dynamic[i]
ldyn = len(dyn)
#print undumper, dyn
if ldyn==1:
dynresult = kjUndump(undumper, dyn[0])
else:
dynresult = kjUndump(undumper, dyn)
result[i] = dynresult
return result
class Selector:
"""For implementing, eg the SQL SELECT statement."""
def __init__(self, alldistinct,
select_list,
table_reference_list,
where_pred,
group_list,
having_cond,
union_select =None,
order_by_spec =None,
ndynamic=0, # number of dyn params expected
):
self.ndynamic = ndynamic
self.alldistinct = alldistinct
self.select_list = select_list
self.table_list = table_reference_list
self.where_pred = where_pred
self.group_list = group_list
self.having_cond = having_cond
self.union_select = union_select
self.order_by = order_by_spec
#self.union_spec = "DISTINCT" # default union mode
self.relbindings = None # binding of relations
self.unbound_set = None # unbound attributes
self.rel_atts = None # graph of alias>attname bound in self
self.all_aggregate = 0
if select_list!="*" and not group_list:
if select_list.contains_aggregate:
### should restore this check somewhere else!
#if select_list.contains_nonaggregate:
#raise ValueError, "aggregates/nonaggregates don't mix without grouping"
self.all_aggregate = 1
if where_pred and where_pred.contains_aggregate:
raise ValueError, "aggregate in WHERE"
self.query_plan = None
def initargs(self):
#print self.alldistinct
#print self.select_list
#print self.table_list
#print self.where_pred
#print self.having_cond
#print self.union_select
#print self.group_list
#print self.order_by
#print self.ndynamic
# note: order by requires special handling
return (self.alldistinct, self.select_list, self.table_list, self.where_pred,
None, self.having_cond, self.union_select, None,
self.ndynamic)
def marshaldata(self):
order_by = self.order_by
if order_by:
order_by = map(serialize, order_by)
group_list = self.group_list
if group_list:
group_list = map(serialize, group_list)
#print "marshaldata"
#print order_by
#print group_list
return (order_by, group_list)
def demarshal(self, data):
(order_by, group_list) = data
if order_by:
order_by = map(deserialize, order_by)
if group_list:
group_list = map(deserialize, group_list)
#print "demarshal"
#print order_by
#print group_list
self.order_by = order_by
self.group_list = group_list
def unbound(self):
result = self.unbound_set
if result is None:
raise ValueError, "binding not available"
return result
def uncache(self):
wp = self.where_pred
hc = self.having_cond
sl = self.select_list
if wp is not None: wp.uncache()
if hc is not None: hc.uncache()
sl.uncache()
qp = self.query_plan
if qp:
for joiner in qp:
joiner.uncache()
def relbind(self, db, outerbindings=None):
ad = self.alldistinct
sl = self.select_list
tl = self.table_list
wp = self.where_pred
gl = self.group_list
hc = self.having_cond
us = self.union_select
ob = self.order_by
test = db.bindings(tl)
#print len(test)
#for x in test:
#print x
(attbindings, relbindings, ambiguous, ambiguousatts) = test
if outerbindings:
# bind in outerbindings where unambiguous
for (a,r) in outerbindings.items():
if ((not attbindings.has_key(a))
and (not ambiguousatts.has_key(a)) ):
attbindings[a] = r
# fix "*" select list
if sl=="*":
sl = TupleCollector()
for (a,r) in attbindings.items():
sl.addbinding(None, BoundAttribute(r,a))
for (dotted, (r,a)) in ambiguous.items():
sl.addbinding(dotted, BoundAttribute(r,a))
sl = sl.relbind(attbindings, db)
wp = wp.relbind(attbindings, db)
if hc is not None: hc = hc.relbind(attbindings, db)
if us is not None: us = us.relbind(db, attbindings)
# bind grouping if present
if gl:
gl = relbind_sequence(gl, attbindings, db)
# bind ordering list if present
#print ob
if ob:
ob = relbind_sequence(ob, attbindings, db)
ob = orderbind_sequence(ob, sl.order)
result = Selector(ad, sl, tl, wp, gl, hc, us, ob)
result.relbindings = relbindings
result.ndynamic = self.ndynamic
result.check_domains()
result.plan_query()
query_plan = result.query_plan
for i in range(len(query_plan)):
query_plan[i] = query_plan[i].relbind(db, attbindings)
return result
def plan_query(self):
"""generate a query plan (sequence of join operators)."""
rel_atts = self.rel_atts # rel>attname
where_pred = self.where_pred.detrivialize()
#select_list = self.select_list
# shortcut
if where_pred is None:
bt = BoundTuple()
else:
bt = self.where_pred.constraints
if bt is None:
bt = BoundTuple()
eqs = kjbuckets.kjGraph(bt.eqs)
witness = kjbuckets.kjDict()
# set all known and unbound atts as witnessed
for att in bt.assns.keys():
witness[att] = 1
#print self, "self.unbound_set", self.unbound_set
for att in self.unbound_set.items():
witness[att] = 1
relbindings = self.relbindings
allrels = relbindings.keys()
#print relbindings
allrels = bt.relorder(relbindings, allrels)
#print allrels
rel_atts = self.rel_atts
plan = []
for rel in allrels:
relation = relbindings[rel]
ratts = rel_atts.neighbors(rel)
h = HashJoiner(bt, rel, ratts, relation, witness)
plan.append(h)
for a in ratts:
ra = (rel, a)
witness[ra] = 1
eqs[ra] = ra
witness = witness.remap(eqs)
self.query_plan = plan
def check_domains(self):
"""determine set of unbound names in self.
"""
relbindings = self.relbindings
sl = self.select_list
wp = self.where_pred
gl = self.group_list
hc = self.having_cond
us = self.union_select
all = sl.domain().items()
if wp is not None:
all = all + wp.domain().items()
# ignore group_list ???
if hc is not None:
all = all + hc.domain().items()
kjSet = kjbuckets.kjSet
kjGraph = kjbuckets.kjGraph
alldomain = kjSet(all)
rel_atts = self.rel_atts = kjGraph(all)
allnames = kjSet()
#print "relbindings", relbindings.keys()
for name in relbindings.keys():
rel = relbindings[name]
for att in rel.attributes():
allnames[ (name, att) ] = 1
# union compatibility check
if us is not None:
us.check_domains()
myatts = self.attributes()
thoseatts = us.attributes()
if myatts!=thoseatts:
if len(myatts)!=len(thoseatts):
raise IndexError, "outer %s, inner %s: union select lists lengths differ"\
% (len(myatts), len(thoseatts))
for p in map(None, myatts, thoseatts):
(x,y)=p
if x!=y:
raise NameError, "%s union names don't match" % (p,)
self.unbound_set = alldomain - allnames
def attributes(self):
return self.select_list.attorder
def eval(self, dynamic=None, outerboundtuple=None):
"""leaves a lot to be desired.
dynamic and outerboundtuple are mutually
exclusive. dynamic is only pertinent to
top levels, outerboundtuple to subqueries"""
#print "select eval", dynamic, outerboundtuple
from gfdb0 import Relation0
# only uncache if outerboundtuple is None (not subquery)
# ???
if outerboundtuple is None:
self.uncache()
query_plan = self.query_plan
where_pred = self.where_pred.detrivialize()
select_list = self.select_list
# shortcut
if where_pred is not None and where_pred.false:
return Relation0(select_list.attorder, [])
#print "where_pred", where_pred
if where_pred is None or where_pred.constraints is None:
assn0 = assn1 = kjbuckets.kjDict()
else:
assn1 = self.where_pred.constraints.assns
assn0 = assn1 = kjbuckets.kjDict(assn1)
# erase stored results from possible previous evaluation
ndynamic = self.ndynamic
if outerboundtuple is not None:
assn1 = assn1 + outerboundtuple.assns
elif ndynamic:
dyn = dynamic_binding(ndynamic, dynamic)
if len(dyn)!=1:
raise ValueError, "only one dynamic subst for selection allowed"
dyn = dyn[0]
assn1 = assn1 + dyn
#print "dynamic", bt
#print "assn1", assn1
# check unbound names
unbound_set = self.unbound_set
#print "unbound", unbound_set
#print unbound_set
#print self.rel_atts
for pair in unbound_set.items():
if not assn1.has_key(pair):
raise KeyError, `pair`+": unbound in selection"
assn1 = (unbound_set * assn1) + assn0
#print "assn1 now", assn1
substseq = [assn1]
for h in query_plan:
#print "***"
#for x in substseq:
#print x
#print "***"
substseq = h.join(substseq)
if not substseq: break
#print "***"
#for x in substseq:
#print x
#print "***"
# apply the rest of the where predicate at top level
if substseq and where_pred is not None:
#where_pred.uncache()
substseq = where_pred(substseq, 1)
# eliminate zeros/nulls
substseq = no_ints_nulls(substseq)
# apply grouping if present
group_list = self.group_list
if substseq and group_list:
substseq = aggregate(substseq, group_list)
having_cond = self.having_cond
#print having_cond
if having_cond is not None:
#having_cond.uncache()
substseq = no_ints_nulls(having_cond(substseq))
elif self.all_aggregate:
# universal group
substseq = [kjbuckets.kjDict( [(None, substseq)] ) ]
(tups, attorder) = select_list.map(substseq)
# do UNION if present
union_select = self.union_select
if union_select is not None:
tups = union_select.eval(tups, dynamic, outerboundtuple)
# apply DISTINCT if appropriate
if self.alldistinct=="DISTINCT":
tups = kjbuckets.kjSet(tups).items()
# apply ordering if present
ob = self.order_by
if ob:
tups = order_tuples(ob, tups)
return Relation0(attorder, tups)
def __repr__(self):
ndyn = ""
if self.ndynamic:
ndyn = "\n[%s dynamic parameters]" % self.ndynamic
result = SELECT_TEMPLATE % (
self.alldistinct,
self.select_list,
self.table_list,
self.where_pred,
self.group_list,
self.having_cond,
#union_spec,
self.union_select,
self.order_by,
ndyn
)
return result
class Union(SimpleRecursive):
"""union clause."""
def __init__(self, alldistinct, selection):
self.alldistinct = alldistinct
self.selection = selection
def initargs(self):
return (self.alldistinct, self.selection)
def unbound(self):
return self.selection.unbound()
def relbind(self, db, outer=None):
self.selection = self.selection.relbind(db, outer)
return self
def check_domains(self):
self.selection.check_domains()
def attributes(self):
return self.selection.attributes()
def eval(self, assns, dyn=None, outer=None):
r = self.selection.eval(dyn, outer)
rows = r.rows()
allrows = rows + assns
if self.alldistinct=="DISTINCT":
allrows = kjbuckets.kjSet(allrows).items()
return allrows
def __repr__(self):
return "\nUNION %s %s " % (self.alldistinct, self.selection)
class Intersect(Union):
def eval(self, assns, dyn=None, outer=None):
r = self.selection.eval(dyn, outer)
rows = r.rows()
kjSet = kjbuckets.kjSet
allrows = (kjSet(assns) & kjSet(rows)).items()
return allrows
op = "INTERSECT"
def __repr__(self):
return "\n%s %s" % (self.op, self.selection)
class Except(Union):
def eval(self, assns, dyn=None, outer=None):
r = self.selection.eval(dyn, outer)
rows = r.rows()
kjSet = kjbuckets.kjSet
allrows = (kjSet(assns) - kjSet(rows)).items()
return allrows
op = "EXCEPT"
class Parse_Context:
"""contextual information for parsing
p.param() returns a new sequence number for external parameter.
"""
# not serializable
parameter_index = 0
# no __init__ yet
def param(self):
temp = self.parameter_index
self.parameter_index = temp+1
return temp
def ndynamic(self):
return self.parameter_index
# update/delete/insert statements
import sqlmod
CreateTable = sqlmod.CreateTable
CreateIndex = sqlmod.CreateIndex
DropIndex = sqlmod.DropIndex
DropTable = sqlmod.DropTable
UpdateOp = sqlmod.UpdateOp
DeleteOp = sqlmod.DeleteOp
InsertOp = sqlmod.InsertOp
InsertValues = sqlmod.InsertValues
InsertSubSelect = sqlmod.InsertSubSelect
ColumnDef = sqlmod.ColumnDef
CreateView = sqlmod.CreateView
DropView = sqlmod.DropView
# update storage structures from gfdb0
import gfdb0
Add_Tuples = gfdb0.Add_Tuples
Erase_Tuples = gfdb0.Erase_Tuples
Reset_Tuples = gfdb0.Reset_Tuples
####### testing
# test helpers
#def tp(**kw):
# return maketuple(kw)
#def st(**kw):
# return BTPredicate(BoundTuple(r=kw)) | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/gadfly/sqlsem.py | sqlsem.py |
# EDIT THIS: THE DIRECTORY IN WHICH TO MARSHAL THE
# GRAMMAR DATA STRUCTURES.
#
ARCHIVE = "."
marshalfilename = ARCHIVE + "/pygram.mar"
pyrules = """
all ::
## input terminates with "fake" dedent (forces read of all file)
@R all1 :: all >> file_input DEDENT
## 1 term newline
##@R lead_blank :: file_input >> NEWLINE file_input
@R top_stmt :: file_input >> file_input stmt
@R file_input :: file_input >> stmt
## 2
@R simple :: stmt >> simple_stmt
@R compound :: stmt >> compound_stmt
## 3 punct ; term NEWLINE
@R one_small :: simple_stmt >> small_stmt NEWLINE
@R more_small :: simple_stmt >> small_stmt ; simple_stmt
@R small_semi :: simple_stmt >> small_stmt ; NEWLINE
## 4 kw pass
@R smexpr :: small_stmt >> expr_stmt
@R smassn :: small_stmt >> assn
@R smprint :: small_stmt >> print_stmt
@R smdel :: small_stmt >> del_stmt
@R smpass :: small_stmt >> pass
@R smflow :: small_stmt >> flow_stmt
@R smimport :: small_stmt >> import_stmt
@R smglobal :: small_stmt >> global_stmt
## access ignored
@R smexec :: small_stmt >> exec_stmt
## 5
@R cmif :: compound_stmt >> if_stmt
@R cmwhile :: compound_stmt >> while_stmt
@R cmfor :: compound_stmt >> for_stmt
@R cmtry :: compound_stmt >> try_stmt
@R cmdef :: compound_stmt >> funcdef
@R cmclass :: compound_stmt >> classdef
##6
@R exprlist :: expr_stmt >> testlist
##@R assignment :: expr_stmt >> assn
@R assn1 :: assn >> testlist = testlist
@R assnn :: assn >> testlist = assn
@R assn1c :: assn >> testlist , = testlist
@R assn1c2 :: assn >> testlist , = testlist ,
@R assnnc :: assn >> testlist , = assn
##testing @R exprassn :: expr_stmt >> expr_stmt = testlist
@R exprlistc :: expr_stmt >> testlist ,
##testing @R exprassnc :: expr_stmt >> expr_stmt = testlist ,
##7 kw print
@R rprint0 :: print_stmt >> print
@R rprint :: print_stmt >> print testlist
@R rprintc :: print_stmt >> print testlist ,
##8 kw del
@R rdel :: del_stmt >> del exprlist
##9 trivially handled in #4
##10 kw raise continue break return
## eliminates 11 12 13 14
@R rbreak :: flow_stmt >> break
@R rcontinue :: flow_stmt >> continue
@R rreturn0 :: flow_stmt >> return
@R rreturn :: flow_stmt >> return testlist
@R rreturnc :: flow_stmt >> return testlist ,
@R rraise1 :: flow_stmt >> raise test
@R rraise2 :: flow_stmt >> raise test , test
@R rraise3 :: flow_stmt >> raise test , test , test
## 11 12 13 14 skipped
## 15 kw import from
@R rimport :: import_stmt >> import dotted_name_list
@R rimportc :: import_stmt >> import dotted_name_list ,
@R dnlist1 :: dotted_name_list >> dotted_name
@R dnlistn :: dotted_name_list >> dotted_name_list , dotted_name
@R rfrom :: import_stmt >> from dotted_name import name_list
@R rfroms :: import_stmt >> from dotted_name import *
@R rfromc :: import_stmt >> from dotted_name import name_list ,
@R nlistn :: name_list >> name_list , NAME
@R nlist1 :: name_list >> NAME
##16 nt NAME
@R dn1 :: dotted_name >> NAME
@R dnn :: dotted_name >> dotted_name . NAME
##17 kw global
@R global1 :: global_stmt >> global NAME
@R globaln :: global_stmt >> global_stmt , NAME
## 18 19 ignored
##20 kw exec in
@R exec1 :: exec_stmt >> exec expr
@R exec2 :: exec_stmt >> exec expr in test
@R exec3 :: exec_stmt >> exec expr in test , test
##21 kw if elif else punct :
@R ifr :: if_stmt >> if test : suite elifs
@R elifs0 :: elifs >>
@R relse :: elifs >> else : suite
@R elifsn :: elifs >> elif test : suite elifs
##22 kw while
@R while1 :: while_stmt >>
while test :
suite
@R while2 :: while_stmt >>
while test :
suite
else :
suite
##23 kw for
@R for1 :: for_stmt >>
for exprlist in testlist :
suite
@R for2 :: for_stmt >>
for exprlist in testlist :
suite
else :
suite
##24 kw try
@R tryr :: try_stmt >> try : suite excepts
@R excepts1 :: excepts >> except_clause : suite
@R excepts2 :: excepts >> except_clause : suite else : suite
@R exceptsn :: excepts >> except_clause : suite excepts
@R tryf :: try_stmt >> try : suite finally : suite
##25 kw except
@R except0 :: except_clause >> except
@R except1 :: except_clause >> except test
@R except2 :: except_clause >> except test , test
##26
@R class1 :: classdef >> class NAME : suite
@R class2 :: classdef >> class NAME ( testlist ) : suite
##27 kw def
@R rdef :: funcdef >> def NAME parameters : suite
##28, 29 punct = *
## (modified from grammar presented)
@R params1 :: parameters >> ( varargslist )
@R params1c :: parameters >> ( varargslist , )
@R params2 :: varargslist >>
## this is way too permissive: fix at semantic level
@R params3 :: varargslist >> arg
@R params4 :: varargslist >> varargslist , arg
@R argd :: arg >> NAME = test
@R arg2 :: arg >> fpdef
@R arg3 :: arg >> * NAME
@R arg4 :: arg >> ** NAME
## 30
@R fpdef1 :: fpdef >> NAME
@R fpdef2 :: fpdef >> ( fplist )
@R fpdef2c :: fpdef >> ( fplist , )
##31
@R fplist1 :: fplist >> fpdef
@R fplistn :: fplist >> fplist , fpdef
##32 t INDENT DEDENT
@R ssuite :: suite >> simple_stmt
@R csuite :: suite >> NEWLINE INDENT stmtseq DEDENT
@R stmtseq1 :: stmtseq >> stmt
@R stmtseqn :: stmtseq >> stmtseq stmt
##33 kw or cancels 53
@R testor :: test >> or_test
@R testand :: or_test >> and_test
@R testor1 :: or_test >> or_test or and_test
## @R testlambda0 :: test >> lambda : test REDUNDANT
@R testlambda1 :: test >> lambda varargslist : test
##34 kw and
@R andnot :: and_test >> not_test
@R andand :: and_test >> and_test and not_test
##35 kw not
@R notnot :: not_test >> not not_test
@R notcmp :: not_test >> comparison
##36 NOTE KWS == >= <= <> !=
@R cmpexpr :: comparison >> expr
@R cmplt :: comparison >> comparison < expr
@R cmpgt :: comparison >> comparison > expr
@R cmpeq :: comparison >> comparison == expr
@R cmpge :: comparison >> comparison >= expr
@R cmple :: comparison >> comparison <= expr
@R cmpnep :: comparison >> comparison <> expr
@R cmpne :: comparison >> comparison != expr
@R cmpin :: comparison >> comparison in expr
@R cmpnotin :: comparison >> comparison not in expr
@R cmpis :: comparison >> comparison is expr
@R cmpisnot :: comparison >> comparison is not expr
##37 kw is not punct > < ! (eliminated)
##38 p |
@R expr_xor :: expr >> xor_expr
@R expr_lor :: expr >> expr | xor_expr
##39 p ^
@R xor_and :: xor_expr >> and_expr
@R xor_xor :: xor_expr >> xor_expr ^ and_expr
##40
@R and_shift :: and_expr >> shift_expr
@R and_and :: and_expr >> and_expr & shift_expr
##41 note kw's << >x> note goofy x to avoid confusing the grammar
@R shift_arith :: shift_expr >> arith_expr
@R shift_left :: shift_expr >> shift_expr << arith_expr
@R shift_right :: shift_expr >> shift_expr >x> arith_expr
##42
@R arith_term :: arith_expr >> term
@R arith_plus :: arith_expr >> arith_expr + term
@R arith_minus :: arith_expr >> arith_expr - term
##43 p */%
@R termfactor :: term >> factor
@R termmul :: term >> term * factor
@R termdiv :: term >> term / factor
@R termmod :: term >> term % factor
## stuff for power
@R factorpower :: factor >> power
@R factorexp :: factor >> factor ** power
##44 p ~
@R powera :: power >> atom trailerlist
@R trailerlist0 :: trailerlist >>
@R trailerlistn :: trailerlist >> trailer trailerlist
@R powerp :: power >> + power
@R powerm :: power >> - power
@R poweri :: power >> ~ power
##45 t NUMBER STRING
@R nulltup :: atom >> ( )
@R parens :: atom >> ( testlist )
@R parensc :: atom >> ( testlist , )
@R nulllist :: atom >> [ ]
@R list :: atom >> [ testlist ]
@R listc :: atom >> [ testlist , ]
@R nulldict :: atom >> { }
@R dict :: atom >> { dictmaker }
@R dictc :: atom >> { dictmaker , }
@R repr :: atom >> ` testlist `
## @R reprc :: atom >> ` testlist , ` doesn't work, apparently
@R aname :: atom >> NAME
## note number to be broken out into FLOAT OCTINT HEXINT INT
@R anumber :: atom >> NUMBER
@R astring :: atom >> stringseq
@R stringseq0 :: stringseq >> STRING
@R stringseqn :: stringseq >> stringseq STRING
##46
@R nullcall :: trailer >> ( )
@R call :: trailer >> ( arglist )
@R callc :: trailer >> ( arglist , )
@R index :: trailer >> [ subscriptdots ]
@R getattr :: trailer >> . NAME
##47
@R arg1 :: arglist >> argument
@R argn :: arglist >> arglist , argument
##@R argn1 :: arglist >> arglist , NAME = test
##48 ( !!!! is this wrong in PP?)
@R posarg :: argument >> test
## here the left test should be a NAME always, but parser doesn't like it
@R namearg :: argument >> test = test
##49 this IS wrong in PP (numeric ext)
@R nodots :: subscriptdots >> subscriptseq
@R yesdots :: subscriptdots >> subscriptseq , . . . , subscriptseq
@R subscript1 :: subscriptseq >> subscript
@R subscriptn :: subscriptseq >> subscriptseq , subscript
@R subscriptt :: subscript >> test
@R subscripts0 :: subscript >> :
@R subscriptsL :: subscript >> test :
@R subscriptsR :: subscript >> : test
@R subscripts :: subscript >> test : test
##50
@R exprlist1 :: exprlist >> expr
@R exprlistn :: exprlist >> exprlist , expr
##51
@R testlist0 :: testlist >> test
@R testlistn :: testlist >> testlist , test
##52
@R dictmaker1 :: dictmaker >> test : test
@R dictmaker2 :: dictmaker >> dictmaker , test : test
"""
nonterms = """
subscriptdots subscript arg
argument arglist subscriptseq params trailerlist
factor atom trailer dictmaker stringseq power
xor_expr and_expr shift_expr arith_expr term
and_test or_test not_test comparison comp_op expr
fplist stmtseq varargslist assn
expr elifs suite excepts parameters pbasic pdefault pspecial
testlist exprlist test dotted_name_list dotted_name name_list
if_stmt while_stmt for_stmt try_stmt funcdef classdef
expr_stmt print_stmt del_stmt flow_stmt import_stmt global_stmt
small_stmt compound_stmt stmt simple_stmt exec_stmt
file_input except_clause fpdef cmp_op
all
"""
import string
# python needs special handling for the lexical stuff
NAMEre = "[" + string.letters + "_][" + string.letters+string.digits +"]*"
NUMBERre = "[" + string.digits + "]+" # temporary!
STRINGre = '"[^"\n]*"' # to be overridden in lexdict
#NEWLINEre = "\n" # to be overridden in lexdict
INDENTre = "#" # a fake! to be overridden
DEDENTre = "#" # a fake! to be overridden
def echo(str):
return str
def DeclareTerminals(Grammar):
Grammar.Addterm("NAME", NAMEre, echo)
Grammar.Addterm("NUMBER", NUMBERre, echo)
Grammar.Addterm("STRING", STRINGre, echo)
#Grammar.Addterm("NEWLINE", NEWLINEre, echo) # newline is kw!
Grammar.Addterm("INDENT", INDENTre, echo)
Grammar.Addterm("DEDENT", DEDENTre, echo)
# not >x> is a fake!
keywords = """
and break class continue def del elif else except exec
finally for from global if import in is lambda not or pass
print raise return try while == >= <= <> != >x> << NEWLINE
**
"""
import kjParser, string, re
from kjParser import KEYFLAG, ENDOFFILETERM
alphanumunder = string.letters+string.digits+"_"
alpha = string.letters + "_"
# components that are part of a identifier (cannot be next to kw).
id_letters = map(None, alphanumunder)
# terminator re for names
nametermre = "[^" + alphanumunder + "]"
nameterm = re.compile(nametermre)
# terminator re for numbers (same as above but allow "." in num).
numtermre = "[^" + alphanumunder + "\.]"
numterm = re.compile(numtermre)
parseerror = "parseerror"
pycommentre = r"(#.*)"
# whitespace regex outside of brackets
# white followed by (comment\n maybe repeated)
# DON'T EAT NEWLINE!!
pywhiteoutre = r"([ \t\r\014]|[\]\n)*%s?" % pycommentre
pywhiteout = re.compile(pywhiteoutre)
# whitespace regex inside brackets
# white or newline possibly followed by comment, all maybe repeated
pywhiteinre = pywhiteoutre #"[ \t\r]*(\\\\\n)*%s?" % pycommentre
pywhitein = re.compile(pywhiteinre)
# totally blank lines (only recognize if next char is newline)
#allblankre = "\n" + pywhiteinre
#allblank = re.compile(allblankre)
# re for indentation (might accept empty string)
indentp = re.compile(r"[\t ]*")
# two char kws and puncts
char2kw = ["if", "or", "in", "is"]
punct2 = ["<>", "<<", ">>", "<=", ">=", "!=", "**", "=="]
# >two char kws as map of first 3 chars to others
char3k_data = """
and break class continue def del elif else except
finally for from global import lambda not pass print
raise return try while exec
"""
char3kw = string.split(char3k_data)
char3kwdict = {}
for x in char3kw:
char3kwdict[x[:3]] = x
# NOTE: newline is treated same as a punctuation
# NOTE: "' ARE NOT PUNCTS
punct = "~!#%^&*()-+=|{}<>,.;:/[]{}\n`"
punctlist = map(None, punct)
kwmap = {}
for x in char2kw + punct2 + char3kw + map(None, punct):
# everything parses as length 1 to the outer world.
kwmap[x] = (((KEYFLAG, x), x), 1)
# special hack
kwmap[">>"] = (((KEYFLAG, ">x>"), ">x>"), 1)
newlineresult = kwmap["\n"] = (((KEYFLAG, "NEWLINE"), "NEWLINE"), 1)
#finaldedent = (((TERMFLAG, "DEDENT"), ""), 1)
# Python lexical dictionary.
### MUST HANDLE WHOLELY BLANK LINES CORRECTLY!
def RMATCH(re, key, start=0):
group = re.match(key, start)
if group is None: return -1
return group.end() - group.start()
class pylexdict(kjParser.LexDictionary):
def __init__(self):
kjParser.LexDictionary.__init__(self)
# need to add special map for >>
self.brackets = 0 # count of active brackets
self.realindex = 0 # where to start
self.indents = [""] # stack of indents (start with a fake one)
self.lineno = 0
self.atdedent = 0
### handle multiple dedents correctly!!!
### translate tabs to 8 spaces...
from kjParser import TERMFLAG
self.NAMEflag = (TERMFLAG, "NAME")
self.STRINGflag = (TERMFLAG, "STRING")
self.NEWLINEflag = (TERMFLAG, "NEWLINE")
self.INDENTflag = (TERMFLAG, "INDENT")
self.DEDENTflag = (TERMFLAG, "DEDENT")
self.NUMBERflag = (TERMFLAG, "NUMBER")
def endoffile(self, String):
# pop off all indentations!
indents = self.indents
#lastresult = self.lastresult
self.realindex = len(String)
if not indents:
# pop indents
#print "eof after dedent"
result = self.lastresult = (ENDOFFILETERM, 0)
else:
#print "eof as dedent after", self.lastresult
del indents[-1]
if indents:
dedent = indents[-1]
else:
dedent = ""
result = self.lastresult = ((self.DEDENTflag, dedent), 1)
#print "returning eof", result, "after", lastresult
return result
def Token(self, String, StartPosition):
#print "Token", (StartPosition,
# `String[self.realindex:self.realindex+20]`, self.lastresult)
# HAVE TO FAKE OUT LEXER FOR DEDENTS
# STARTPOSITION COUNTS # OF TOKEN, NOT STRING POSITION
# STRING POSITION IS MAINTAINED IN LexDict object.
lastindex = self.lastindex
lastresult = self.lastresult
if self.laststring is not String:
#print "parsing new string"
self.laststring = String
# special hack: skip lead whitespace
cursor = 0
self.lineno = 1
while 1:
test = RMATCH(pywhitein,String, cursor)
if test<0: break
next = cursor + test
#print "lead skip:", next, String[cursor:next]
if String[next]!="\n": break
#skipped = String[cursor:next]
#if "\n" in skipped:
# self.lineno = (
# self.lineno + len(string.splitfields(skipped, "\n")))
#self.lineno = self.lineno+1
cursor = next + 1
self.realindex = cursor
self.saveindex = 0
self.indents = [""] # stack of indents (start with a fake one)
# pretend we saw a newline
self.lastresult = newlineresult
if StartPosition!=0:
self.laststring = None
raise ValueError, "python lexical parsing must start at zero"
lastindex = self.lastindex
lastresult = None
elif lastindex == StartPosition:
#print "returning lastresult ", lastresult
return lastresult
elif lastindex != StartPosition-1:
raise ValueError, "python lexer can't skip tokens"
#print "parsing", StartPosition, lastresult
# do newline counting here!
delta = String[self.saveindex: self.realindex]
#print "delta", `delta`
if "\n" in delta:
#print self.lineno, self.saveindex, self.realindex, `delta`
self.lineno = self.lineno + len(
string.splitfields(delta, "\n")) - 1
realindex = self.saveindex = self.realindex
self.lastindex = StartPosition
# skip whitespace (including comments)
### needs to be improved to parse blank lines, count line numbers...
# skip all totally blank lines (don't eat last newline)
atlineend = (String[realindex:realindex+1] == "\n"
or lastresult is newlineresult
or self.atdedent)
skipnewlines = (lastresult is newlineresult or
self.atdedent or
self.brackets>0)
if atlineend: #String[realindex:realindex+1]=="\n":
#print "trying to skip blank lines", String[realindex:realindex+10]
while 1:
#if String[realindex:realindex+1]=="\n":
# start = realindex+1 # move past current newline
# self.lineno = self.lineno + 1
#else:
# start = realindex
start = realindex
if skipnewlines:
while String[start:start+1]=="\n":
start = start+1
#self.lineno = self.lineno+1
#print "matching", `String[start:start+10]`
skip = RMATCH(pywhitein,String, start)
#print "skip=", skip
if skip<0: break
rs = skip + realindex + (start-realindex)
if rs==realindex: break
#print "at", rs, `String[rs]`
if (rs<len(String) and
(String[rs] == "\n" or
(skipnewlines and String[rs-1:rs]=="\n"))):
#print "skipping blank line"
#if lastresult is newlineresult or self.brackets>0:
# rs = rs + 1
#skipped = String[start:rs]
#if "\n" in skipped:
#self.lineno = self.lineno + len(
# string.splitfields(skipped, "\n"))
self.realindex = realindex = rs
#self.lineno = self.lineno+1
else:
if skipnewlines: self.realindex = realindex = start
break
#print "after skipping blank lines", `String[realindex:realindex+20]`
skipto = realindex
skip = 0
if self.brackets>0:
while 1:
#print "skipping white in brackets", skipto
if realindex>len(String):
break
if String[skipto]=="\n":
#self.lineno = self.lineno+1
skipto = skipto + 1
self.realindex = realindex = skipto
continue
skip = RMATCH(pywhiteout,String, skipto)
nextskipto = skipto+skip
#skipped = String[skipto:nextskipto]
#if "\n" in skipped:
# self.lineno = self.lineno+len(
# string.splitfields(skipped, "\n"))
if skip>0:
skipto = nextskipto
else: break
skip = skipto - realindex
elif not atlineend:
skip = RMATCH(pywhitein,String, realindex)
if skip<=0:
skip = 0
else:
#print "skipping", skip
nextri = realindex + skip
#skipped = String[realindex:nextri]
#if "\n" in skipped:
# self.lineno = self.lineno + len(
# string.splitfields(skipped, "\n"))
realindex = self.realindex = nextri
if realindex>=len(String):
return self.endoffile(String)
# now look for a keyword, name, number, punctuation,
# INDENT, DEDENT, NEWLINE
first = String[realindex]
#if last parse was newline and not in brackets:
# look for indent/dedent
if (self.brackets<=0 and (lastresult is newlineresult or self.atdedent)
and first != "\n"):
#print "looking for dent", realindex, `String[realindex:realindex+20]`
match = RMATCH(indentp,String, realindex)
if match>=0:
dent = String[realindex: realindex+match]
#print "dent match", match, `dent`
oldindex = realindex
self.realindex = realindex = realindex+match
# replace tabs with 8 spaces
dent = string.joinfields(string.splitfields(dent, "\t"),
" ")
dents = self.indents
lastdent = dents[-1]
ldl = len(lastdent)
dl = len(dent)
#print "last", ldl, dents
if ldl<dl:
self.atdedent = 0
result = self.lastresult = ((self.INDENTflag, dent), 1)
dents.append(dent)
#print "indent ", result, dents
return result
if ldl>dl:
self.realindex = oldindex # back up, may have to see it again!
self.atdedent = 1
result = self.lastresult = ((self.DEDENTflag, dent), 1)
del dents[-1]
#print "dedent ", result, dl, dents
return result
# otherwise, indentation is same, keep looking
# might be at eof now:
if realindex>=len(String):
#print "returning eof"
return self.endoffile(String)
first = String[realindex]
self.atdedent = 0
from string import digits #, letters
if (first in punctlist and
# special case for .123 numbers (yuck!)
(first!="." or String[realindex+1] not in digits)):
# is it a 2 char punct?
first2 = String[realindex:realindex+2]
if first2 in punct2:
result = self.lastresult = kwmap[first2]
self.realindex = realindex+2
#print "2 digit punct", result
return result
# otherwise, just return normal punct
result = self.lastresult = kwmap[first]
self.realindex = self.realindex + 1
### special bookkeeping
if first=="\n":
result = newlineresult
#print "newline!"
#self.lineno = self.lineno+1
elif first in "[{(":
#print "bracket!"
self.brackets = self.brackets + 1
elif first in "]})":
#print "close bracket!"
self.brackets = self.brackets - 1
#print "1 digit punct", result
return result
if first in digits or first==".":
# parse a number...
skip = numterm.search(String, realindex)
if skip<=realindex:
raise parseerror, "number length<1 (!)"
thenumber = String[realindex:skip]
self.realindex = skip
### note don't interpret number here!!
result = self.lastresult = ((self.NUMBERflag, thenumber), 1)
#print "number", result
return result
if first in alpha:
# try keyword...
first2 = String[realindex: realindex+2]
if first2 in char2kw:
if String[realindex+2:realindex+3] not in id_letters:
# parse a 2 char kw first2
result = self.lastresult = kwmap[first2]
self.realindex = self.realindex+2
#print "keyword 2", result
return result
first3 = String[realindex: realindex+3]
if char3kwdict.has_key(first3):
the_kw = char3kwdict[first3]
the_end = realindex+len(the_kw)
if ((the_end<len(String)) and
(String[the_end] not in id_letters) and
(String[realindex:the_end]==the_kw)):
# parse the_kw
self.realindex = the_end
result = self.lastresult = kwmap[the_kw]
#print "keyword +", result
return result
#otherwise parse an identifier
#print "looking for name:", `String[realindex:realindex+10]`
skip = nameterm.search(String, realindex)
if skip<=realindex:
raise parseerror, "identifier length<1 (!)"
theid = String[realindex:skip]
self.realindex = skip
### note don't interpret number here!!
result = self.lastresult = ((self.NAMEflag, theid), 1)
#print "id", result
return result
if first in "\"'":
# check for triplequotes
first3 = first*3
if String[realindex: realindex+3] == first3:
# parse triple quotes
start = place = realindex+3
while 1:
last = string.find(String, first3, place)
if last<0:
raise parseerror, "failed to terminate triple quotes"
if String[last-1:last]=="\\" and String[last-2:last-1]!="\\":
place = last+1
else: break
the_string = String[start: last]
self.realindex = last+3
result = self.lastresult = ((self.STRINGflag, the_string), 1)
#print "3q string", result
# count the newlines!
#newlinecount = len(string.splitfields(the_string, "\n"))
#self.lineno = self.lineno+newlinecount
#print "triple quotes", result
return result
else:
# parse single quotes
sanity = start = place = realindex+1
done = 0
while 1:
sanity = min(string.find(String, "\n", sanity), len(String))
if sanity<start:
sanity=len(String)
break
if String[sanity-1]!="\\":
break
else:
#self.lineno = self.lineno+1
sanity = sanity + 1
while 1:
last = string.find(String, first, place)
if last<0 or last>sanity:
raise parseerror, "failed to terminate single quotes"
if String[last-1:last]=="\\":
# are we at the end of an odd number of backslashes? (yuck!)
bplace = last-1
while String[bplace:bplace+1]=="\\":
bplace = bplace-1
if (last-bplace)%2==1:
break # the end quote is real!
place = last+1
else: break
the_string = String[start:last]
self.realindex = last+1
result = self.lastresult = ((self.STRINGflag, the_string), 1)
#print "1q string", result
return result
#print (String[realindex-20:realindex-1], String[realindex],
# String[realindex+1:realindex+20])
raise parseerror, "invalid first: " + `first`
# use a modified lexstringwalker
class pylexstringwalker(kjParser.LexStringWalker):
def DUMP(self):
kjParser.DumpStringWindow(self.String, self.LexDict.realindex)
## a HORRIBLE HACK! of a hack: override the DoParse of Grammar
## to give Python line numbers. RELIES ON GLOBAL pyg
##
def hackDoParse(String, Context=None, DoReductions=1):
import sys, kjParser
try:
# construct the ParserObj
# add a newline to front to avoid problem with leading comment
#String = "\n%s\n" % String
Stream = pylexstringwalker( String, pyg.LexD )
Stack = [] # {-1:0} #Walkers.SimpleStack()
ParseOb = kjParser.ParserObj( pyg.RuleL, Stream, pyg.DFA, Stack, \
DoReductions, Context )
# do the parse
ParseResult = ParseOb.GO()
# return final result of reduction and the context
return (ParseResult[1], Context)
#return kjParser.Grammar.DoParse(pyg, String, Context, DoReductions)
except: ### for testing!!
t, v = sys.exc_type, sys.exc_value
v = ("near line", pyg.LexD.lineno, v)
raise t, v
buildinfo = """
Please edit the ARCHIVE parameter of this module (%s)
to place the python grammar archive in a standard
directory to prevent the module from rebuilding
the python grammar over and over and over...
""" % __name__
def GrammarBuild():
global pyg
import kjParseBuild
pyg = kjParseBuild.NullCGrammar()
pyg.DoParse = hackDoParse
# override lexical dict here
pyg.LexD = pylexdict()
DeclareTerminals(pyg)
pyg.Keywords(keywords)
pyg.punct("~!#%^&*()-+=|{}'`<>,.;:/[]{}")
pyg.Nonterms(nonterms)
pyg.Declarerules(pyrules)
print buildinfo
print "compiling... this may take a while..."
pyg.Compile()
print "dumping"
outfile = open(marshalfilename, "wb")
pyg.MarshalDump(outfile)
outfile.close()
print "self testing the grammar"
test(pyg)
print "\n\ndone with regeneration"
return pyg
def unMarshalpygram():
global pyg
import kjParser
print "loading"
try:
infile = open(marshalfilename, "rb")
except IOError:
print marshalfilename, "not found, attempting creation"
pyg = GrammarBuild()
else:
pyg = kjParser.UnMarshalGram(infile)
infile.close()
pyg.DoParse = hackDoParse
# lexical override
pyg.LexD = pylexdict()
DeclareTerminals(pyg)
# BindRules(pyg)
if dotest:
print "self testing the grammar"
test(pyg)
return pyg
# not used, commented
#### interpretation rules/classes
#
#def zeroth(list, Context):
# return list[0] # eg, for all1, ignore all but first
#
## file_input, stmt, simple_stmt, compound_stmt give list of statement_ob
#def append(list, Context):
# "eg, for top_stmt, conjoin two smt lists"
# return list[0] + list[1]
#
## file_input >zeroth
#
## simple, compound, one_small, small_semi: echol
#def echol(list, Context):
# return list
#
## more_small > seq_sep
#def seq_sep(list, Context):
# list[0].append(list[2])
# return list[0]
#
## smexpr, smassn, smpring, smdel, smflow, smimport, smglobal, smexec
## > zeroth
#
## cmif, cmwhile, cmfor, cmtry, cmdef, cmclass > zeroth
#
#
#def BindRules(pyg):
# for name in string.split("""
# all1 file_input cmif cmwhile cmfor cmtry cmdef cmclass
# smexpr smassn smprint smdel smflow smimport smglobal smexec
# """):
# pyg.Bind(name, zeroth)
# for name in string.split("""
# simple compound one_small small_semi
# """):
# pyg.Bind(name, echol)
# pyg.Bind("top_stmt", append)
# pyg.Bind("more_small", seq_sep)
teststring = """#
#
# a test string
#
from string import join, split
'''
import re
for a in l:
a.attr, a[x], b = c
else:
d = b
'''
class zzz:
'''
#doc string
'''
'''
global re, join
d = {}
for i in range(10): d[i] = i
'''
def test(c,s):
return "this"
while not done:
print done
break
list = [1,2,3]
# comment
return 5
n,x = 89 >> 90 + 6 / 7 % x + z << 6 + 2 ** 8
if x==5:
while y:
for i in range(6):
raise SystemError, "oops"
"""
#teststring ="""\
## comment
#if x in y: print z
#elif 1: print w
#"""
'''
teststring="""
exec "print 1"
"""
'''
def test(grammar, context=None, teststring=teststring):
from time import time
now = time()
x = grammar.DoParse1(teststring, context)
elapsed = time()-now
print x
print elapsed
return x
regen = 0
dotest = 0
if __name__ == "__main__" :
if regen: GrammarBuild()
unMarshalpygram() | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/gadfly/pygram.py | pygram.py |
import sqlgen, sqlbind
sql = sqlgen.getSQL()
sql = sqlbind.BindRules(sql)
error = "gadfly_error"
verbosity = 0
class gadfly:
"""as per the DBAPI spec "gadfly" is the connection object."""
closed = 0
verbose = verbosity # debug!
def __init__(self, databasename=None, directory=None,
forscratch=0, autocheckpoint=1, verbose=0):
verbose = self.verbose = self.verbose or verbose
# checkpoint on each commit if set
self.autocheckpoint = autocheckpoint
if verbose:
print "initializing gadfly instance", (\
databasename, directory, forscratch, verbose)
self.is_scratch = forscratch
self.databasename=databasename
self.directory = directory
self.fs = None
self.database = None
# db global transaction id
self.transid = 0
if databasename is not None:
self.open()
def transaction_log(self):
from gfdb0 import Transaction_Logger
if self.verbose:
print "new transaction log for", self.transid
return Transaction_Logger(self.database.log, self.transid, self.is_scratch)
# causes problems in 1.5?
#def __del__(self):
# """clear database on deletion, just in case of circularities"""
# # implicit checkpoint
# if self.verbose:
# print "deleting gadfly instance", self.databasename
# if not self.closed:
# self.close()
def checkpoint(self):
"""permanently record committed updates"""
# note: No transactions should be active at checkpoint for this implementation!
# implicit abort of active transactions!
verbose = self.verbose
if verbose:
print "checkpointing gadfly instance", self.databasename
db = self.database
log = db.log
# dump committed db to fs
fs = self.fs
if fs and db and not db.is_scratch:
# flush the log
if log:
if verbose: print "gadfly: committing log"
log.commit()
elif verbose:
print "gadfly: no log to commit"
if verbose: print "gadfly: dumping mutated db structures"
fs.dump(db)
elif verbose:
print "gadfly: no checkpoint required"
if verbose:
print "gadfly: new transid, reshadowing"
self.transid = self.transid+1
self.working_db.reshadow(db, self.transaction_log())
def startup(self, databasename, directory, scratch=0, verbose=0):
from gfdb0 import Database0, File_Storage0
verbose = self.verbose
if verbose:
print "gadfly: starting up new ", databasename
if self.database:
raise error, "cannot startup, database bound"
self.databasename=databasename
self.directory = directory
db = self.database = Database0()
db.is_scratch = scratch or self.is_scratch
self.fs = File_Storage0(databasename, directory)
self.working_db = Database0(db, self.transaction_log())
# commit initializes database files and log structure
self.commit()
# for now: all transactions serialized
# working db shared among all transactions/cursors
self.transid = self.transid+1
self.working_db = Database0(db, self.transaction_log())
def restart(self):
"""reload and rerun committed updates from log, discard uncommitted"""
# mainly for testing recovery.
if self.verbose:
print "gadfly: restarting database", self.databasename
self.database.clear()
self.working_db.clear()
self.working_db = None
self.database = None
self.open()
def open(self):
"""(re)load existing database"""
if self.verbose:
print "gadfly: loading database", self.databasename
from gfdb0 import File_Storage0, Database0
if self.directory:
directory = self.directory
else:
directory = "."
fs = self.fs = File_Storage0(self.databasename, directory)
db = self.database = fs.load(sql)
self.transid = self.transid+1
self.working_db = Database0(db, self.transaction_log())
def add_remote_view(self, name, definition):
"""add a remote view to self.
Must be redone on each reinitialization!
Must not recursively reenter the query evaluation process for
this database!
"Tables" added in this manner cannot be update via SQL.
"""
self.database[name] = definition
self.working_db[name] = definition
def close(self):
"""checkpoint and clear the database"""
if self.closed: return
if self.verbose:
print "gadfly: closing database", self.databasename
db = self.database
if not db.is_scratch:
self.checkpoint()
if db: db.clear()
wdb = self.working_db
if wdb:
wdb.clear()
self.working_db = None
self.closed = 1
def commit(self):
"""commit the working database+transaction, flush log, new transid"""
verbose = self.verbose
autocheckpoint = self.autocheckpoint
if self.verbose:
print "gadfly: committing", self.transid, self.databasename
self.transid = self.transid+1
fs = self.fs
db = self.database
wdb = self.working_db
wdblog = wdb.log
if wdblog: wdblog.commit()
wdb.commit()
if fs and db and not db.is_scratch:
if autocheckpoint:
if verbose:
print "gadfly: autocheckpoint"
# skips a transid?
self.checkpoint()
else:
if verbose:
print "gadfly: no autocheckpoint"
wdb.reshadow(db, self.transaction_log())
else:
if verbose:
print "gadfly: scratch db, no logging, just reshadow"
wdb.reshadow(db, self.transaction_log())
def rollback(self):
"""discard the working db, new transid, recreate working db"""
verbose = self.verbose
if verbose:
print "gadfly: rolling back", self.transid, self.databasename
if not (self.fs or self.database):
raise error, "unbound, cannot rollback"
# discard updates in working database
self.working_db.clear()
self.transid = self.transid+1
self.working_db.reshadow(self.database, self.transaction_log())
#self.open()
def cursor(self):
if self.verbose:
print "gadfly: new cursor", self.databasename
db = self.database
if db is None:
raise error, "not bound to database"
return GF_Cursor(self)
def dumplog(self):
log = self.database.log
if log:
log.dump()
else:
print "no log to dump"
def table_names(self):
return self.working_db.relations()
def DUMP_ALL(self):
print "DUMPING ALL CONNECTION DATA", self.databasename, self.directory
print
print "***** BASE DATA"
print
print self.database
print
print "***** WORKING DATA"
print
print self.working_db
class GF_Cursor:
verbose = verbosity
arraysize = None
description = None
EVAL_DUMP = 0 # only for extreme debugging!
def __init__(self, gadfly_instance):
verbose = self.verbose = self.verbose or gadfly_instance.verbose
if verbose:
print "GF_Cursor.__init__", id(self)
self.connection = gadfly_instance
self.results = None
self.resultlist = None
self.statement = None
# make a shadow of the shadow db! (in case of errors)
from gfdb0 import Database0
self.shadow_db = Database0()
self.reshadow()
self.connection = gadfly_instance
def reshadow(self):
if self.verbose:
print "GF_Cursor.reshadow", id(self)
db = self.connection.working_db
shadow = self.shadow_db
shadow.reshadow(db, db.log)
if self.verbose:
print "rels", shadow.rels.keys()
def close(self):
if self.verbose:
print "GF_Cursor.close", id(self)
self.connection = None
def reset_results(self):
if self.verbose:
print "GF_Cursor.reset_results", id(self)
rs = self.results
if rs is None:
raise error, "must execute first"
if len(rs)!=1:
raise error, "cannot retrieve multiple results"
rel = rs[0]
rows = rel.rows()
atts = rel.attributes()
tupatts = tuple(atts)
resultlist = list(rows)
if len(tupatts)==1:
att = tupatts[0]
for i in xrange(len(resultlist)):
resultlist[i] = (resultlist[i][att],)
else:
for i in xrange(len(resultlist)):
resultlist[i] = resultlist[i].dump(tupatts)
self.resultlist = resultlist
def fetchone(self):
if self.verbose:
print "GF_Cursor.fetchone", id(self)
r = self.resultlist
if r is None:
self.reset_results()
r = self.resultlist
if len(r)<1:
raise error, "no more results"
result = r[0]
del r[0]
return result
def fetchmany(self, size=None):
if self.verbose:
print "GF_Cursor.fetchmany", id(self)
r = self.resultlist
if r is None:
self.reset_results()
r = self.resultlist
if size is None:
size = len(r)
result = r[:size]
del r[:size]
return result
def fetchall(self):
if self.verbose:
print "GF_Cursor.fetchall", id(self)
return self.fetchmany()
def execute(self, statement=None, params=None):
"""execute operations, commit results if no error"""
success = 0
verbose = self.verbose
if verbose:
print "GF_Cursor.execute", id(self)
if statement is None and self.statement is None:
raise error, "cannot execute, statement not bound"
if statement!=self.statement:
if verbose: print "GF_cursor: new statement: parsing"
# only reparse on new statement.
self.statement=statement
from sqlsem import Parse_Context
context = Parse_Context()
cs = self.commands = sql.DoParse1(statement, context)
else:
if verbose: print "GF_cursor: old statment, not parsing"
cs = self.commands
# always rebind! (db may have changed)
if verbose: print "GF_Cursor: binding to temp db"
# make a new shadow of working db
# (should optimize?)
self.reshadow()
# get shadow of working database
database = self.shadow_db
if self.EVAL_DUMP:
print "***"
print "*** dumping connection parameters before eval"
print "***"
print "*** eval scratch db..."
print
print database
print
print "*** connection data"
print
self.connection.DUMP_ALL()
print "********** end of eval dump"
for i in xrange(len(cs)):
if verbose:
print "GFCursor binding\n", cs[i]
print database.rels.keys()
cs[i] = cs[i].relbind(database)
cs = self.commands
self.results = results = list(cs)
# only unshadow results on no error
try:
for i in xrange(len(cs)):
results[i] = cs[i].eval(params)
success = 1
finally:
#print "in finally", success
# only on no error...
if success:
# commit updates in shadow of working db (not in real db)
if verbose: print "GFCursor: successful eval, storing results in wdb"
database.log.flush()
# database commit does not imply transaction commit.
database.commit()
else:
if verbose:
print \
"GFCursor: UNSUCCESSFUL EVAL, discarding results and log entries"
self.statement = None
self.results = None
self.resultlist = None
database.log.reset()
# handle curs.description
self.description = None
if len(results)==1:
result0 = results[0]
try:
atts = result0.attributes()
except:
pass
else:
descriptions = list(atts)
fluff = (None,) * 6
for i in xrange(len(atts)):
descriptions[i] = (atts[i],) + fluff
self.description = tuple(descriptions)
self.resultlist = None
def setoutputsize(self, *args):
# not implemented
pass
def setinputsizes(self, *args):
# not implemented
pass
def pp(self):
"""return pretty-print string rep of current results"""
from string import join
stuff = map(repr, self.results)
return join(stuff, "\n\n") | zope.rdb | /zope.rdb-3.5.0.tar.gz/zope.rdb-3.5.0/src/zope/rdb/gadfly/gadfly.py | gadfly.py |
import os, shutil, sys, tempfile
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --setup-source and --download-base to point to
local resources, you can keep this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", help="use a specific zc.buildout version")
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
options, args = parser.parse_args()
######################################################################
# load/install distribute
to_reload = False
try:
import pkg_resources, setuptools
if not hasattr(pkg_resources, '_distribute'):
to_reload = True
raise ImportError
except ImportError:
ez = {}
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
exec(urlopen('http://python-distribute.org/distribute_setup.py').read(), ez)
setup_args = dict(to_dir=tmpeggs, download_delay=0, no_fake=True)
ez['use_setuptools'](**setup_args)
if to_reload:
reload(pkg_resources)
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
cmd = [sys.executable, '-c',
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
distribute_path = ws.find(
pkg_resources.Requirement.parse('distribute')).location
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[distribute_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=distribute_path)) != 0:
raise Exception(
"Failed to execute command:\n%s",
repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs) | zope.renderer | /zope.renderer-4.0.0a1.zip/zope.renderer-4.0.0a1/bootstrap.py | bootstrap.py |
"""ReStructured Text Renderer Classes
"""
__docformat__ = 'restructuredtext'
import docutils.core
from zope.component import adapts
from zope.interface import implementer
from zope.publisher.browser import BrowserView
from zope.publisher.interfaces.browser import IBrowserRequest
from zope.renderer.i18n import ZopeMessageFactory as _
from zope.renderer.interfaces import ISource, IHTMLRenderer
from zope.renderer import SourceFactory
class IReStructuredTextSource(ISource):
"""Marker interface for a restructured text source. Note that an
implementation of this interface should always derive from unicode or
behave like a unicode class."""
ReStructuredTextSourceFactory = SourceFactory(
IReStructuredTextSource, _("ReStructured Text (ReST)"),
_("ReStructured Text (ReST) Source"))
@implementer(IHTMLRenderer)
class ReStructuredTextToHTMLRenderer(BrowserView):
r"""An Adapter to convert from Restructured Text to HTML.
Examples::
>>> from zope.publisher.browser import TestRequest
>>> source = ReStructuredTextSourceFactory(u'''
... This is source.
...
... Header 3
... --------
... This is more source.
... ''')
>>> renderer = ReStructuredTextToHTMLRenderer(source, TestRequest())
>>> print(renderer.render().strip())
<p>This is source.</p>
<div class="section" id="header-3">
<h3>Header 3</h3>
<p>This is more source.</p>
</div>
"""
adapts(IReStructuredTextSource, IBrowserRequest)
def render(self, settings_overrides={}):
"""See zope.app.interfaces.renderer.IHTMLRenderer
Let's make sure that inputted unicode stays as unicode:
>>> renderer = ReStructuredTextToHTMLRenderer(u'b\xc3h', None)
>>> renderer.render() == u'<p>b\\xc3h</p>\\n'
True
>>> text = u'''
... =========
... Heading 1
... =========
...
... hello world
...
... Heading 2
... ========='''
>>> overrides = {'initial_header_level': 2,
... 'doctitle_xform': 0 }
>>> renderer = ReStructuredTextToHTMLRenderer(text, None)
>>> print(renderer.render(overrides))
<div class="section" id="heading-1">
<h2>Heading 1</h2>
<p>hello world</p>
<div class="section" id="heading-2">
<h3>Heading 2</h3>
</div>
</div>
<BLANKLINE>
"""
# default settings for the renderer
overrides = {
'halt_level': 6,
'input_encoding': 'unicode',
'output_encoding': 'unicode',
'initial_header_level': 3,
}
overrides.update(settings_overrides)
parts = docutils.core.publish_parts(
self.context,
writer_name='html',
settings_overrides=overrides,
)
return u''.join(
(parts['body_pre_docinfo'], parts['docinfo'], parts['body'])) | zope.renderer | /zope.renderer-4.0.0a1.zip/zope.renderer-4.0.0a1/src/zope/renderer/rest.py | rest.py |
"""Vocabulary support for schema.
"""
from collections import OrderedDict
from zope.interface import directlyProvides
from zope.interface import implementer
from zope.interface import providedBy
from zope.schema.interfaces import ITitledTokenizedTerm
from zope.schema.interfaces import ITokenizedTerm
from zope.schema.interfaces import ITreeVocabulary
from zope.schema.interfaces import IVocabularyRegistry
from zope.schema.interfaces import IVocabularyTokenized
# simple vocabularies performing enumerated-like tasks
_marker = object()
@implementer(ITokenizedTerm)
class SimpleTerm(object):
"""
Simple tokenized term used by SimpleVocabulary.
.. versionchanged:: 4.6.0
Implement equality and hashing based on the value, token and title.
"""
def __init__(self, value, token=None, title=None):
"""Create a term for *value* and *token*. If *token* is
omitted, str(value) is used for the token, escaping any
non-ASCII characters.
If *title* is provided, term implements
:class:`zope.schema.interfaces.ITitledTokenizedTerm`.
"""
self.value = value
if token is None:
token = value
# str(bytes) returns str(repr(bytes)), which is not what we want
# here. On the other hand, we want to try to keep the token as
# readable as possible. self.token should be a native string
# (ASCIILine).
if isinstance(token, bytes):
token = token.decode('raw_unicode_escape')
elif not isinstance(token, str):
# Nothing we recognize as intended to be textual data.
# Get its str() as promised
token = str(token)
# Escape any non-ASCII characters.
token = token.encode('ascii', 'backslashreplace').decode('ascii')
self.token = token
self.title = title
if title is not None:
directlyProvides(self, ITitledTokenizedTerm)
def __eq__(self, other):
if other is self:
return True
if not isinstance(other, SimpleTerm):
return False
return (
self.value == other.value
and self.token == other.token
and self.title == other.title
)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.value, self.token, self.title))
@implementer(IVocabularyTokenized)
class SimpleVocabulary(object):
"""
Vocabulary that works from a sequence of terms.
.. versionchanged:: 4.6.0
Implement equality and hashing based on the terms list
and interfaces implemented by this object.
"""
def __init__(self, terms, *interfaces, **kwargs):
"""Initialize the vocabulary given a list of terms.
The vocabulary keeps a reference to the list of terms passed
in; it should never be modified while the vocabulary is used.
One or more interfaces may also be provided so that alternate
widgets may be bound without subclassing.
By default, ValueErrors are thrown if duplicate values or tokens
are passed in. If you want to swallow these exceptions, pass
in ``swallow_duplicates=True``. In this case, the values will
override themselves.
"""
self.by_value = {}
self.by_token = {}
self._terms = terms
swallow_dupes = kwargs.get('swallow_duplicates', False)
for term in self._terms:
if not swallow_dupes:
if term.value in self.by_value:
raise ValueError(
'term values must be unique: %s' % repr(term.value))
if term.token in self.by_token:
raise ValueError(
'term tokens must be unique: %s' % repr(term.token))
self.by_value[term.value] = term
self.by_token[term.token] = term
if interfaces:
directlyProvides(self, *interfaces)
@classmethod
def fromItems(cls, items, *interfaces):
"""
Construct a vocabulary from a list of (token, value) pairs or
(token, value, title) triples. The list does not have to be
homogeneous.
The order of the items is preserved as the order of the terms
in the vocabulary. Terms are created by calling the class
method :meth:`createTerm`` with the pair or triple.
One or more interfaces may also be provided so that alternate
widgets may be bound without subclassing.
.. versionchanged:: 4.6.0
Allow passing in triples to set item titles.
"""
terms = [cls.createTerm(item[1], item[0], *item[2:])
for item in items]
return cls(terms, *interfaces)
@classmethod
def fromValues(cls, values, *interfaces):
"""Construct a vocabulary from a simple list.
Values of the list become both the tokens and values of the
terms in the vocabulary. The order of the values is preserved
as the order of the terms in the vocabulary. Tokens are
created by calling the class method :meth:`createTerm()` with
the value as the only parameter.
One or more interfaces may also be provided so that alternate
widgets may be bound without subclassing.
"""
terms = [cls.createTerm(value) for value in values]
return cls(terms, *interfaces)
@classmethod
def createTerm(cls, *args):
"""Create a single term from data.
Subclasses may override this with a class method that creates
a term of the appropriate type from the arguments.
"""
return SimpleTerm(*args)
def __contains__(self, value):
"""See zope.schema.interfaces.IBaseVocabulary"""
try:
return value in self.by_value
except TypeError:
# sometimes values are not hashable
return False
def getTerm(self, value):
"""See zope.schema.interfaces.IBaseVocabulary"""
try:
return self.by_value[value]
except KeyError:
raise LookupError(value)
def getTermByToken(self, token):
"""See zope.schema.interfaces.IVocabularyTokenized"""
try:
return self.by_token[token]
except KeyError:
raise LookupError(token)
def __iter__(self):
"""See zope.schema.interfaces.IIterableVocabulary"""
return iter(self._terms)
def __len__(self):
"""See zope.schema.interfaces.IIterableVocabulary"""
return len(self.by_value)
def __eq__(self, other):
if other is self:
return True
if not isinstance(other, SimpleVocabulary):
return False
return (
self._terms == other._terms
and providedBy(self) == providedBy(other)
)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(tuple(self._terms))
def _createTermTree(ttree, dict_):
""" Helper method that creates a tree-like dict with ITokenizedTerm
objects as keys from a similar tree with tuples as keys.
See fromDict for more details.
"""
for key in sorted(dict_.keys()):
term = SimpleTerm(key[1], key[0], *key[2:])
ttree[term] = TreeVocabulary.terms_factory()
_createTermTree(ttree[term], dict_[key])
return ttree
@implementer(ITreeVocabulary)
class TreeVocabulary(object):
""" Vocabulary that relies on a tree (i.e nested) structure.
"""
# The default implementation uses a dict to create the tree structure. This
# can however be overridden in a subclass by any other IEnumerableMapping
# compliant object type. Python 2.7's OrderedDict for example.
terms_factory = OrderedDict
def __init__(self, terms, *interfaces):
"""Initialize the vocabulary given a recursive dict (i.e a tree) with
ITokenizedTerm objects for keys and self-similar dicts representing the
branches for values.
Refer to the method fromDict for more details.
Concerning the ITokenizedTerm keys, the 'value' and 'token' attributes
of each key (including nested ones) must be unique.
One or more interfaces may also be provided so that alternate
widgets may be bound without subclassing.
"""
self._terms = self.terms_factory()
self._terms.update(terms)
self.path_by_value = {}
self.term_by_value = {}
self.term_by_token = {}
self._populateIndexes(terms)
if interfaces:
directlyProvides(self, *interfaces)
def __contains__(self, value):
""" See zope.schema.interfaces.IBaseVocabulary
D.__contains__(k) -> True if D has a key k, else False
"""
try:
return value in self.term_by_value
except TypeError:
# sometimes values are not hashable
return False
def __getitem__(self, key):
"""x.__getitem__(y) <==> x[y]
"""
return self._terms.__getitem__(key)
def __iter__(self):
"""See zope.schema.interfaces.IIterableVocabulary
x.__iter__() <==> iter(x)
"""
return self._terms.__iter__()
def __len__(self):
"""x.__len__() <==> len(x)
"""
return self._terms.__len__()
def get(self, key, default=None):
"""Get a value for a key
The default is returned if there is no value for the key.
"""
return self._terms.get(key, default)
def keys(self):
"""Return the keys of the mapping object.
"""
return self._terms.keys()
def values(self):
"""Return the values of the mapping object.
"""
return self._terms.values()
def items(self):
"""Return the items of the mapping object.
"""
return self._terms.items()
@classmethod
def fromDict(cls, dict_, *interfaces):
"""Constructs a vocabulary from a dictionary-like object (like dict or
OrderedDict), that has tuples for keys.
The tuples should have either 2 or 3 values, i.e:
(token, value, title) or (token, value). Only tuples that have
three values will create a
:class:`zope.schema.interfaces.ITitledTokenizedTerm`.
For example, a dict with 2-valued tuples::
dict_ = {
('exampleregions', 'Regions used in ATVocabExample'): {
('aut', 'Austria'): {
('tyr', 'Tyrol'): {
('auss', 'Ausserfern'): {},
}
},
('ger', 'Germany'): {
('bav', 'Bavaria'):{}
},
}
}
One or more interfaces may also be provided so that alternate
widgets may be bound without subclassing.
.. versionchanged:: 4.6.0
Only create ``ITitledTokenizedTerm`` when a title is actually
provided.
"""
return cls(_createTermTree(cls.terms_factory(), dict_), *interfaces)
def _populateIndexes(self, tree):
""" The TreeVocabulary contains three helper indexes for quick lookups.
They are: term_by_value, term_by_token and path_by_value
This method recurses through the tree and populates these indexes.
tree: The tree (a nested/recursive dictionary).
"""
for term in tree.keys():
value = getattr(term, 'value')
token = getattr(term, 'token')
if value in self.term_by_value:
raise ValueError(
"Term values must be unique: '%s'" % value)
if token in self.term_by_token:
raise ValueError(
"Term tokens must be unique: '%s'" % token)
self.term_by_value[value] = term
self.term_by_token[token] = term
if value not in self.path_by_value: # pragma: no branch
self.path_by_value[value] = self._getPathToTreeNode(self,
value)
self._populateIndexes(tree[term])
def getTerm(self, value):
"""See zope.schema.interfaces.IBaseVocabulary"""
try:
return self.term_by_value[value]
except KeyError:
raise LookupError(value)
def getTermByToken(self, token):
"""See zope.schema.interfaces.IVocabularyTokenized"""
try:
return self.term_by_token[token]
except KeyError:
raise LookupError(token)
def _getPathToTreeNode(self, tree, node):
"""Helper method that computes the path in the tree from the root
to the given node.
The tree must be a recursive IEnumerableMapping object.
"""
path = []
for parent, child in tree.items():
if node == parent.value:
return [node]
path = self._getPathToTreeNode(child, node)
if path:
path.insert(0, parent.value)
break
return path
def getTermPath(self, value):
"""Returns a list of strings representing the path from the root node
to the node with the given value in the tree.
Returns an empty string if no node has that value.
"""
return self.path_by_value.get(value, [])
# registry code
class VocabularyRegistryError(LookupError):
"""
A specialized subclass of `LookupError` raised for unknown
(unregistered) vocabularies.
.. seealso:: `VocabularyRegistry`
"""
def __init__(self, name):
self.name = name
super(VocabularyRegistryError, self).__init__(str(self))
def __str__(self):
return "unknown vocabulary: %r" % self.name
@implementer(IVocabularyRegistry)
class VocabularyRegistry(object):
"""
Default implementation of
:class:`zope.schema.interfaces.IVocabularyRegistry`.
An instance of this class is used by default by
:func:`getVocabularyRegistry`, which in turn is used by
:class:`~.Choice` fields.
Named vocabularies must be manually registered with this object
using :meth:`register`. This associates a vocabulary name with a
:class:`zope.schema.interfaces.IVocabularyFactory`.
An alternative to this is to use the :mod:`zope.component` registry via
`zope.vocabularyregistry
<https://pypi.org/project/zope.vocabularyregistry/>`_.
"""
__slots__ = ('_map',)
def __init__(self):
self._map = {}
def get(self, context, name):
"""See zope.schema.interfaces.IVocabularyRegistry"""
try:
vtype = self._map[name]
except KeyError:
raise VocabularyRegistryError(name)
return vtype(context)
def register(self, name, factory):
"""Register a *factory* for the vocabulary with the given *name*."""
self._map[name] = factory
_vocabularies = None
def getVocabularyRegistry():
"""Return the vocabulary registry.
If the registry has not been created yet, an instance of
VocabularyRegistry will be installed and used.
"""
if _vocabularies is None:
setVocabularyRegistry(VocabularyRegistry())
return _vocabularies
def setVocabularyRegistry(registry):
"""Set the vocabulary registry."""
global _vocabularies
_vocabularies = registry
def _clear():
"""Remove the registries (for use by tests)."""
global _vocabularies
_vocabularies = None
try:
from zope.testing.cleanup import addCleanUp
except ImportError: # pragma: no cover
# don't have that part of Zope
pass
else: # pragma: no cover
addCleanUp(_clear)
del addCleanUp | zope.schema | /zope.schema-7.0.1-py3-none-any.whl/zope/schema/vocabulary.py | vocabulary.py |
"""Bootstrap schema interfaces and exceptions
"""
from functools import total_ordering
import zope.interface
from zope.interface import Attribute
from zope.interface.interfaces import IInterface
from zope.schema._messageid import _
# pylint:disable=inherit-non-class,keyword-arg-before-vararg,
# pylint:disable=no-self-argument
class StopValidation(Exception):
"""Raised if the validation is completed early.
Note that this exception should be always caught, since it is just
a way for the validator to save time.
"""
@total_ordering
class ValidationError(zope.interface.Invalid):
"""Raised if the Validation process fails."""
#: The field that raised the error, if known.
field = None
#: The value that failed validation.
value = None
def with_field_and_value(self, field, value):
self.field = field
self.value = value
return self
def doc(self):
return self.__class__.__doc__
def __lt__(self, other):
# There's no particular reason we choose to sort this way,
# it's just the way we used to do it with __cmp__.
if not hasattr(other, 'args'):
return True
return self.args < other.args
def __eq__(self, other):
if not hasattr(other, 'args'):
return False
return self.args == other.args
# XXX : This is probably inconsistent with __eq__, which is
# a violation of the language spec.
__hash__ = zope.interface.Invalid.__hash__ # python3
def __repr__(self): # pragma: no cover
return '%s(%s)' % (
self.__class__.__name__,
', '.join(repr(arg) for arg in self.args))
class RequiredMissing(ValidationError):
__doc__ = _("""Required input is missing.""")
class WrongType(ValidationError):
__doc__ = _("""Object is of wrong type.""")
#: The type or tuple of types that was expected.
#:
#: .. versionadded:: 4.7.0
expected_type = None
def __init__(self, value=None, expected_type=None, name=None, *args):
"""
WrongType(value, expected_type, name)
.. versionchanged:: 4.7.0
Added named arguments to the constructor and the `expected_type`
field.
"""
ValidationError.__init__(self, value, expected_type, name, *args)
self.expected_type = expected_type
self.value = value
class OutOfBounds(ValidationError):
"""
A value was out of the allowed bounds.
This is the common superclass for `OrderableOutOfBounds` and
`LenOutOfBounds`, which in turn are the superclasses for `TooBig`
and `TooSmall`, and `TooLong` and `TooShort`, respectively.
.. versionadded:: 4.7.0
"""
#: The value that was exceeded
bound = None
#: A constant for `violation_direction`.
TOO_LARGE = type('TOO_LARGE', (), {'__slots__': ()})()
#: A constant for `violation_direction`.
TOO_SMALL = type('TOO_SMALL', (), {'__slots__': ()})()
#: Whether the value was too large or
#: not large enough. One of the values
#: defined by the constants `TOO_LARGE`
#: or `TOO_SMALL`
violation_direction = None
def __init__(self, value=None, bound=None, *args):
"""
OutOfBounds(value, bound)
"""
super(OutOfBounds, self).__init__(value, bound, *args)
self.value = value
self.bound = bound
class OrderableOutOfBounds(OutOfBounds):
"""
A value was too big or too small in comparison to another value.
.. versionadded:: 4.7.0
"""
class TooBig(OrderableOutOfBounds):
__doc__ = _("""Value is too big""")
violation_direction = OutOfBounds.TOO_LARGE
class TooSmall(OrderableOutOfBounds):
__doc__ = _("""Value is too small""")
violation_direction = OutOfBounds.TOO_SMALL
class LenOutOfBounds(OutOfBounds):
"""
The length of the value was out of bounds.
.. versionadded:: 4.7.0
"""
class TooLong(LenOutOfBounds):
__doc__ = _("""Value is too long""")
violation_direction = OutOfBounds.TOO_LARGE
class TooShort(LenOutOfBounds):
__doc__ = _("""Value is too short""")
violation_direction = OutOfBounds.TOO_SMALL
class InvalidValue(ValidationError):
__doc__ = _("""Invalid value""")
class ConstraintNotSatisfied(ValidationError):
__doc__ = _("""Constraint not satisfied""")
class NotAContainer(ValidationError):
__doc__ = _("""Not a container""")
class NotAnIterator(ValidationError):
__doc__ = _("""Not an iterator""")
class WrongContainedType(ValidationError):
__doc__ = _("""Wrong contained type""")
#: A collection of exceptions raised when validating
#: the *value*.
#:
#: .. versionadded:: 4.7.0
errors = ()
def __init__(self, errors=None, name=None, *args):
"""
WrongContainedType(errors, name)
.. versionchanged:: 4.7.0
Added named arguments to the constructor, and the `errors` property.
"""
super(WrongContainedType, self).__init__(errors, name, *args)
self.errors = errors
class SchemaNotCorrectlyImplemented(WrongContainedType):
__doc__ = _("""An object failed schema or invariant validation.""")
#: A dictionary mapping failed attribute names of the
#: *value* to the underlying exception
schema_errors = None
#: A list of exceptions from validating the invariants
#: of the schema.
invariant_errors = ()
def __init__(self, errors=None, name=None, schema_errors=None,
invariant_errors=(), *args):
"""
SchemaNotCorrectlyImplemented(errors, name, schema_errors,
invariant_errors)
.. versionchanged:: 4.7.0
Added named arguments to the constructor.
"""
super(SchemaNotCorrectlyImplemented, self).__init__(
errors, name, *args)
self.schema_errors = schema_errors
self.invariant_errors = invariant_errors
class SchemaNotFullyImplemented(ValidationError):
__doc__ = _("""Schema not fully implemented""")
class SchemaNotProvided(ValidationError):
__doc__ = _("""Schema not provided""")
#: The interface that the *value* was supposed to provide,
#: but does not.
schema = None
def __init__(self, schema=None, value=None, *args):
"""
SchemaNotProvided(schema, value)
.. versionchanged:: 4.7.0
Added named arguments to the constructor and the `schema` property.
"""
super(SchemaNotProvided, self).__init__(schema, value, *args)
self.schema = schema
self.value = value
class NotAnInterface(WrongType, SchemaNotProvided):
"""
Object is not an interface.
This is a `WrongType` exception for backwards compatibility with
existing ``except`` clauses, but it is raised when
``IInterface.providedBy`` is not true, so it's also a
`SchemaNotProvided`. The ``expected_type`` field is filled in as
``IInterface``; this is not actually a `type`, and
``isinstance(thing, IInterface)`` is always false.
.. versionadded:: 4.7.0
"""
expected_type = IInterface
def __init__(self, value, name):
super(NotAnInterface, self).__init__(value, IInterface, name)
class IFromUnicode(zope.interface.Interface):
"""Parse a unicode string to a value
We will often adapt fields to this interface to support views and
other applications that need to convert raw data as unicode
values.
"""
def fromUnicode(value):
"""Convert a unicode string to a value.
"""
class IFromBytes(zope.interface.Interface):
"""
Parse a byte string to a value.
If the string needs to be decoded, decoding is done using UTF-8.
.. versionadded:: 4.8.0
"""
def fromBytes(value):
"""Convert a byte string to a value.
"""
class IContextAwareDefaultFactory(zope.interface.Interface):
"""A default factory that requires a context.
The context is the field context. If the field is not bound, context may
be ``None``.
"""
def __call__(context):
"""Returns a default value for the field."""
class IBeforeObjectAssignedEvent(zope.interface.Interface):
"""An object is going to be assigned to an attribute on another object.
Subscribers to this event can change the object on this event to change
what object is going to be assigned. This is useful, e.g. for wrapping
or replacing objects before they get assigned to conform to application
policy.
"""
object = Attribute("The object that is going to be assigned.")
name = Attribute("The name of the attribute under which the object "
"will be assigned.")
context = Attribute("The context object where the object will be "
"assigned to.")
class IValidatable(zope.interface.Interface):
# Internal interface, the base for IField, but used to prevent
# import recursion. This should *not* be implemented by anything
# other than IField.
def validate(value):
"""Validate that the given value is a valid field value.
Returns nothing but raises an error if the value is invalid.
It checks everything specific to a Field and also checks
with the additional constraint.
"""
class NO_VALUE(object):
def __repr__(self): # pragma: no cover
return '<NO_VALUE>'
NO_VALUE = NO_VALUE() | zope.schema | /zope.schema-7.0.1-py3-none-any.whl/zope/schema/_bootstrapinterfaces.py | _bootstrapinterfaces.py |
"""Schema interfaces and exceptions
"""
__docformat__ = "reStructuredText"
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface.common.mapping import IEnumerableMapping
from zope.interface.interfaces import IInterface
from zope.schema._bootstrapfields import Bool
from zope.schema._bootstrapfields import Complex
from zope.schema._bootstrapfields import Decimal
from zope.schema._bootstrapfields import Field
from zope.schema._bootstrapfields import Int
from zope.schema._bootstrapfields import Integral
from zope.schema._bootstrapfields import Number
from zope.schema._bootstrapfields import Object
from zope.schema._bootstrapfields import Rational
from zope.schema._bootstrapfields import Real
from zope.schema._bootstrapfields import Text
from zope.schema._bootstrapfields import TextLine
# Import from _bootstrapinterfaces only because other packages will expect
# to find these interfaces here.
from zope.schema._bootstrapinterfaces import ConstraintNotSatisfied
from zope.schema._bootstrapinterfaces import IBeforeObjectAssignedEvent
from zope.schema._bootstrapinterfaces import IContextAwareDefaultFactory
from zope.schema._bootstrapinterfaces import IFromBytes
from zope.schema._bootstrapinterfaces import IFromUnicode
from zope.schema._bootstrapinterfaces import InvalidValue
from zope.schema._bootstrapinterfaces import IValidatable
from zope.schema._bootstrapinterfaces import LenOutOfBounds
from zope.schema._bootstrapinterfaces import NotAContainer
from zope.schema._bootstrapinterfaces import NotAnInterface
from zope.schema._bootstrapinterfaces import NotAnIterator
from zope.schema._bootstrapinterfaces import OrderableOutOfBounds
from zope.schema._bootstrapinterfaces import OutOfBounds
from zope.schema._bootstrapinterfaces import RequiredMissing
from zope.schema._bootstrapinterfaces import SchemaNotCorrectlyImplemented
from zope.schema._bootstrapinterfaces import SchemaNotFullyImplemented
from zope.schema._bootstrapinterfaces import SchemaNotProvided
from zope.schema._bootstrapinterfaces import StopValidation
from zope.schema._bootstrapinterfaces import TooBig
from zope.schema._bootstrapinterfaces import TooLong
from zope.schema._bootstrapinterfaces import TooShort
from zope.schema._bootstrapinterfaces import TooSmall
from zope.schema._bootstrapinterfaces import ValidationError
from zope.schema._bootstrapinterfaces import WrongContainedType
from zope.schema._bootstrapinterfaces import WrongType
from zope.schema._messageid import _
__all__ = [
# Exceptions
'ConstraintNotSatisfied',
'InvalidDottedName',
'InvalidId',
'InvalidURI',
'InvalidValue',
'LenOutOfBounds',
'NotAContainer',
'NotAnInterface',
'NotAnIterator',
'NotUnique',
'OrderableOutOfBounds',
'OutOfBounds',
'RequiredMissing',
'SchemaNotCorrectlyImplemented',
'SchemaNotFullyImplemented',
'SchemaNotProvided',
'StopValidation',
'TooBig',
'TooLong',
'TooShort',
'TooSmall',
'Unbound',
'ValidationError',
'WrongContainedType',
'WrongType',
# Interfaces
'IASCII',
'IASCIILine',
'IAbstractBag',
'IAbstractSet',
'IBaseVocabulary',
'IBeforeObjectAssignedEvent',
'IBool',
'IBytes',
'IBytesLine',
'IChoice',
'ICollection',
'IComplex',
'IContainer',
'IContextAwareDefaultFactory',
'IContextSourceBinder',
'IDate',
'IDatetime',
'IDecimal',
'IDict',
'IDottedName',
'IField',
'IFieldEvent',
'IFieldUpdatedEvent',
'IFloat',
'IFromBytes',
'IFromUnicode',
'IFrozenSet',
'IId',
'IInt',
'IIntegral',
'IInterfaceField',
'IIterable',
'IIterableSource',
'IIterableVocabulary',
'ILen',
'IList',
'IMapping',
'IMinMax',
'IMinMaxLen',
'IMutableMapping',
'IMutableSequence',
'INativeString',
'INativeStringLine',
'INumber',
'IObject',
'IOrderable',
'IPassword',
'IPythonIdentifier',
'IRational',
'IReal',
'ISequence',
'ISet',
'ISource',
'ISourceQueriables',
'ISourceText',
'ITerm',
'IText',
'ITextLine',
'ITime',
'ITimedelta',
'ITitledTokenizedTerm',
'ITokenizedTerm',
'ITreeVocabulary',
'ITuple',
'IURI',
'IUnorderedCollection',
'IVocabulary',
'IVocabularyFactory',
'IVocabularyRegistry',
'IVocabularyTokenized',
]
class NotUnique(ValidationError):
__doc__ = _("""One or more entries of sequence are not unique.""")
class InvalidURI(ValidationError):
__doc__ = _("""The specified URI is not valid.""")
class InvalidId(ValidationError):
__doc__ = _("""The specified id is not valid.""")
class InvalidDottedName(ValidationError):
__doc__ = _("""The specified dotted name is not valid.""")
class Unbound(Exception):
__doc__ = _("""The field is not bound.""")
class IField(IValidatable):
"""Basic Schema Field Interface.
Fields are used for Interface specifications. They at least provide
a title, description and a default value. You can also
specify if they are required and/or readonly.
The Field Interface is also used for validation and specifying
constraints.
We want to make it possible for a IField to not only work
on its value but also on the object this value is bound to.
This enables a Field implementation to perform validation
against an object which also marks a certain place.
Note that many fields need information about the object
containing a field. For example, when validating a value to be
set as an object attribute, it may be necessary for the field to
introspect the object's state. This means that the field needs to
have access to the object when performing validation::
bound = field.bind(object)
bound.validate(value)
"""
def bind(object):
"""Return a copy of this field which is bound to context.
The copy of the Field will have the 'context' attribute set
to 'object'. This way a Field can implement more complex
checks involving the object's location/environment.
Many fields don't need to be bound. Only fields that condition
validation or properties on an object containing the field
need to be bound.
"""
title = TextLine(
title=_("Title"),
description=_("A short summary or label"),
default=u"",
required=False,
)
description = Text(
title=_("Description"),
description=_("A description of the field"),
default=u"",
required=False,
)
required = Bool(
title=_("Required"),
description=(_("Tells whether a field requires its value to exist.")),
default=True)
readonly = Bool(
title=_("Read Only"),
description=_("If true, the field's value cannot be changed."),
required=False,
default=False)
default = Field(
title=_("Default Value"),
description=_("""The field default value may be None or a legal
field value""")
)
missing_value = Field(
title=_("Missing Value"),
description=_("""If input for this Field is missing, and that's ok,
then this is the value to use""")
)
order = Int(
title=_("Field Order"),
description=_("""
The order attribute can be used to determine the order in
which fields in a schema were defined. If one field is created
after another (in the same thread), its order will be
greater.
(Fields in separate threads could have the same order.)
"""),
required=True,
readonly=True,
)
def constraint(value):
"""Check a customized constraint on the value.
You can implement this method with your Field to
require a certain constraint. This relaxes the need
to inherit/subclass a Field you to add a simple constraint.
Returns true if the given value is within the Field's constraint.
"""
def validate(value):
"""Validate that the given value is a valid field value.
Returns nothing but raises an error if the value is invalid.
It checks everything specific to a Field and also checks
with the additional constraint.
"""
def get(object):
"""Get the value of the field for the given object."""
def query(object, default=None):
"""Query the value of the field for the given object.
Return the default if the value hasn't been set.
"""
def set(object, value):
"""Set the value of the field for the object
Raises a type error if the field is a read-only field.
"""
class IIterable(IField):
"""Fields with a value that can be iterated over.
The value needs to support iteration; the implementation mechanism
is not constrained. (Either `__iter__()` or `__getitem__()` may be
used.)
"""
class IContainer(IField):
"""Fields whose value allows an ``x in value`` check.
The value needs to support the `in` operator, but is not
constrained in how it does so (whether it defines `__contains__()`
or `__getitem__()` is immaterial).
"""
class IOrderable(IField):
"""Field requiring its value to be orderable.
The set of value needs support a complete ordering; the
implementation mechanism is not constrained. Either `__cmp__()` or
'rich comparison' methods may be used.
"""
class ILen(IField):
"""A Field requiring its value to have a length.
The value needs to have a conventional __len__ method.
"""
class IMinMax(IOrderable):
"""Field requiring its value to be between min and max.
This implies that the value needs to support the IOrderable interface.
"""
min = Field(
title=_("Start of the range"),
required=False,
default=None
)
max = Field(
title=_("End of the range (including the value itself)"),
required=False,
default=None
)
class IMinMaxLen(ILen):
"""Field requiring the length of its value to be within a range"""
min_length = Int(
title=_("Minimum length"),
description=_("""
Value after whitespace processing cannot have less than
`min_length` characters (if a string type) or elements (if
another sequence type). If `min_length` is ``None``, there is
no minimum.
"""),
required=False,
min=0, # needs to be a positive number
default=0)
max_length = Int(
title=_("Maximum length"),
description=_("""
Value after whitespace processing cannot have greater
or equal than `max_length` characters (if a string type) or
elements (if another sequence type). If `max_length` is
``None``, there is no maximum."""),
required=False,
min=0, # needs to be a positive number
default=None)
class IInterfaceField(IField):
"""Fields with a value that is an interface (implementing
zope.interface.Interface)."""
class IBool(IField):
"""Boolean Field."""
default = Bool(
title=_("Default Value"),
description=_("""The field default value may be None or a legal
field value""")
)
required = Bool(
title=_("Required"),
description=(_("Tells whether a field requires its value to exist.")),
required=False,
default=False)
class IBytes(IMinMaxLen, IIterable, IField):
"""Field containing a byte string (like the python str).
The value might be constrained to be with length limits.
"""
class IText(IMinMaxLen, IIterable, IField):
"""Field containing a unicode string."""
# for things which are of the str type on both Python 2 and 3
class INativeString(IText):
"""
A field that always contains the native `str` type.
.. versionchanged:: 4.9.0
This is now a distinct type instead of an alias for either `IText`
or `IBytes`, depending on the platform.
"""
class IASCII(INativeString):
"""Field containing a 7-bit ASCII string. No characters > DEL
(chr(127)) are allowed
The value might be constrained to be with length limits.
"""
class IBytesLine(IBytes):
"""Field containing a byte string without newlines."""
class IASCIILine(IASCII):
"""Field containing a 7-bit ASCII string without newlines."""
class ISourceText(IText):
"""Field for source text of object."""
class ITextLine(IText):
"""Field containing a unicode string without newlines."""
class INativeStringLine(ITextLine):
"""
A field that always contains the native `str` type, without any newlines.
.. versionchanged:: 4.9.0
This is now a distinct type instead of an alias for either `ITextLine`
or `IBytesLine`, depending on the platform.
"""
class IPassword(ITextLine):
"""Field containing a unicode password string without newlines."""
###
# Numbers
###
##
# Abstract numbers
##
class INumber(IMinMax, IField):
"""
Field containing a generic number: :class:`numbers.Number`.
.. seealso:: :class:`zope.schema.Number`
.. versionadded:: 4.6.0
"""
min = Number(
title=_("Start of the range"),
required=False,
default=None
)
max = Number(
title=_("End of the range (including the value itself)"),
required=False,
default=None
)
default = Number(
title=_("Default Value"),
description=_("""The field default value may be None or a legal
field value""")
)
class IComplex(INumber):
"""
Field containing a complex number: :class:`numbers.Complex`.
.. seealso:: :class:`zope.schema.Real`
.. versionadded:: 4.6.0
"""
min = Complex(
title=_("Start of the range"),
required=False,
default=None
)
max = Complex(
title=_("End of the range (including the value itself)"),
required=False,
default=None
)
default = Complex(
title=_("Default Value"),
description=_("""The field default value may be None or a legal
field value""")
)
class IReal(IComplex):
"""
Field containing a real number: :class:`numbers.IReal`.
.. seealso:: :class:`zope.schema.Real`
.. versionadded:: 4.6.0
"""
min = Real(
title=_("Start of the range"),
required=False,
default=None
)
max = Real(
title=_("End of the range (including the value itself)"),
required=False,
default=None
)
default = Real(
title=_("Default Value"),
description=_("""The field default value may be None or a legal
field value""")
)
class IRational(IReal):
"""
Field containing a rational number: :class:`numbers.IRational`.
.. seealso:: :class:`zope.schema.Rational`
.. versionadded:: 4.6.0
"""
min = Rational(
title=_("Start of the range"),
required=False,
default=None
)
max = Rational(
title=_("End of the range (including the value itself)"),
required=False,
default=None
)
default = Rational(
title=_("Default Value"),
description=_("""The field default value may be None or a legal
field value""")
)
class IIntegral(IRational):
"""
Field containing an integral number: class:`numbers.Integral`.
.. seealso:: :class:`zope.schema.Integral`
.. versionadded:: 4.6.0
"""
min = Integral(
title=_("Start of the range"),
required=False,
default=None
)
max = Integral(
title=_("End of the range (including the value itself)"),
required=False,
default=None
)
default = Integral(
title=_("Default Value"),
description=_("""The field default value may be None or a legal
field value""")
)
##
# Concrete numbers
##
class IInt(IIntegral):
"""
Field containing exactly the native class :class:`int`.
.. seealso:: :class:`zope.schema.Int`
"""
min = Int(
title=_("Start of the range"),
required=False,
default=None
)
max = Int(
title=_("End of the range (including the value itself)"),
required=False,
default=None
)
default = Int(
title=_("Default Value"),
description=_("""The field default value may be None or a legal
field value""")
)
class IFloat(IReal):
"""
Field containing exactly the native class :class:`float`.
:class:`IReal` is a more general interface, allowing all of
floats, ints, and fractions.
.. seealso:: :class:`zope.schema.Float`
"""
class IDecimal(INumber):
"""Field containing a :class:`decimal.Decimal`"""
min = Decimal(
title=_("Start of the range"),
required=False,
default=None
)
max = Decimal(
title=_("End of the range (including the value itself)"),
required=False,
default=None
)
default = Decimal(
title=_("Default Value"),
description=_("""The field default value may be None or a legal
field value""")
)
###
# End numbers
###
class IDatetime(IMinMax, IField):
"""Field containing a datetime."""
class IDate(IMinMax, IField):
"""Field containing a date."""
class ITimedelta(IMinMax, IField):
"""Field containing a timedelta."""
class ITime(IMinMax, IField):
"""Field containing a time."""
def _is_field(value):
if not IField.providedBy(value):
return False
return True
def _fields(values):
for value in values:
if not _is_field(value):
return False
return True
class IURI(INativeStringLine):
"""A field containing an absolute URI
"""
class IId(INativeStringLine):
"""A field containing a unique identifier
A unique identifier is either an absolute URI or a dotted name.
If it's a dotted name, it should have a module/package name as a prefix.
"""
class IDottedName(INativeStringLine):
"""Dotted name field.
Values of DottedName fields must be Python-style dotted names.
"""
min_dots = Int(
title=_("Minimum number of dots"),
required=True,
min=0,
default=0
)
max_dots = Int(
title=_("Maximum number of dots (should not be less than min_dots)"),
required=False,
default=None
)
class IPythonIdentifier(INativeStringLine):
"""
A single Python identifier, such as a variable name.
.. versionadded:: 4.9.0
"""
class IChoice(IField):
"""Field whose value is contained in a predefined set
Only one, values or vocabulary, may be specified for a given choice.
"""
vocabulary = Field(
title=_("Vocabulary or source providing values"),
description=_("The ISource, IContextSourceBinder or IBaseVocabulary "
"object that provides values for this field."),
required=False,
default=None
)
vocabularyName = TextLine(
title=_("Vocabulary name"),
description=_("Vocabulary name to lookup in the vocabulary registry"),
required=False,
default=None
)
# Collections:
# Abstract
class ICollection(IMinMaxLen, IIterable, IContainer):
"""Abstract interface containing a collection value.
The Value must be iterable and may have a min_length/max_length.
"""
value_type = Object(
IField,
title=_("Value Type"),
description=_("Field value items must conform to the given type, "
"expressed via a Field."))
unique = Bool(
title=_('Unique Members'),
description=_('Specifies whether the members of the collection '
'must be unique.'),
default=False)
class ISequence(ICollection):
"""Abstract interface specifying that the value is ordered"""
class IMutableSequence(ISequence):
"""
Abstract interface specifying that the value is ordered and
mutable.
.. versionadded:: 4.6.0
"""
class IUnorderedCollection(ICollection):
"""Abstract interface specifying that the value cannot be ordered"""
class IAbstractSet(IUnorderedCollection):
"""An unordered collection of unique values."""
unique = Bool(
description="This ICollection interface attribute must be True")
class IAbstractBag(IUnorderedCollection):
"""An unordered collection of values, with no limitations on whether
members are unique"""
unique = Bool(
description="This ICollection interface attribute must be False")
# Concrete
class ITuple(ISequence):
"""Field containing a value that implements the API of a conventional
Python tuple."""
class IList(IMutableSequence):
"""Field containing a value that implements the API of a conventional
Python list."""
class ISet(IAbstractSet):
"""Field containing a value that implements the API of a Python2.4+ set.
"""
class IFrozenSet(IAbstractSet):
"""Field containing a value that implements the API of a conventional
Python 2.4+ frozenset."""
# (end Collections)
class IObject(IField):
"""
Field containing an Object value.
.. versionchanged:: 4.6.0
Add the *validate_invariants* attribute.
"""
schema = Object(
IInterface,
description=_("The Interface that defines the Fields comprising the "
"Object.")
)
validate_invariants = Bool(
title=_("Validate Invariants"),
description=_("A boolean that says whether "
"``schema.validateInvariants`` is called from "
"``self.validate()``. The default is true."),
default=True,
)
class IMapping(IMinMaxLen, IIterable, IContainer):
"""
Field containing an instance of :class:`collections.Mapping`.
The *key_type* and *value_type* fields allow specification
of restrictions for keys and values contained in the dict.
"""
key_type = Object(
IField,
description=_("Field keys must conform to the given type, expressed "
"via a Field.")
)
value_type = Object(
IField,
description=_("Field values must conform to the given type, expressed "
"via a Field.")
)
class IMutableMapping(IMapping):
"""
Field containing an instance of :class:`collections.MutableMapping`.
"""
class IDict(IMutableMapping):
"""Field containing a conventional dict.
"""
class ITerm(Interface):
"""Object representing a single value in a vocabulary."""
value = Attribute(
"value", "The value used to represent vocabulary term in a field.")
class ITokenizedTerm(ITerm):
"""Object representing a single value in a tokenized vocabulary.
"""
# Should be a ``zope.schema.ASCIILine``, but `ASCIILine` is not a bootstrap
# field. `ASCIILine` is a type of NativeString.
token = Attribute(
"token",
"""Token which can be used to represent the value on a stream.
The value of this attribute must be a non-empty 7-bit ``str``.
Control characters, including newline, are not allowed.
""")
class ITitledTokenizedTerm(ITokenizedTerm):
"""A tokenized term that includes a title."""
title = TextLine(title=_("Title"))
class ISource(Interface):
"""A set of values from which to choose
Sources represent sets of values. They are used to specify the
source for choice fields.
Sources can be large (even infinite), in which case, they need to
be queried to find out what their values are.
"""
def __contains__(value):
"""Return whether the value is available in this source
"""
class ISourceQueriables(Interface):
"""A collection of objects for querying sources
"""
def getQueriables():
"""Return an iterable of objects that can be queried
The returned obects should be two-tuples with:
- A unicode id
The id must uniquely identify the queriable object within
the set of queriable objects. Furthermore, in subsequent
calls, the same id should be used for a given queriable
object.
- A queriable object
This is an object for which there is a view provided for
searching for items.
"""
class IContextSourceBinder(Interface):
def __call__(context):
"""Return a context-bound instance that implements ISource.
"""
class IBaseVocabulary(ISource):
"""Representation of a vocabulary.
At this most basic level, a vocabulary only need to support a test
for containment. This can be implemented either by __contains__()
or by sequence __getitem__() (the later only being useful for
vocabularies which are intrinsically ordered).
"""
def getTerm(value):
"""Return the ITerm object for the term 'value'.
If 'value' is not a valid term, this method raises LookupError.
"""
class IIterableSource(ISource):
"""Source which supports iteration over allowed values.
The objects iteration provides must be values from the source.
"""
def __iter__():
"""Return an iterator which provides the values from the source."""
def __len__():
"""Return the number of valid values, or sys.maxint."""
# BBB vocabularies are pending deprecation, hopefully in 3.3
class IIterableVocabulary(Interface):
"""Vocabulary which supports iteration over allowed values.
The objects iteration provides must conform to the ITerm
interface.
"""
def __iter__():
"""Return an iterator which provides the terms from the vocabulary."""
def __len__():
"""Return the number of valid terms, or sys.maxint."""
class IVocabulary(IIterableVocabulary, IBaseVocabulary):
"""Vocabulary which is iterable."""
class IVocabularyTokenized(IVocabulary):
"""Vocabulary that provides support for tokenized representation.
Terms returned from getTerm() and provided by iteration must
conform to ITokenizedTerm.
"""
def getTermByToken(token):
"""Return an ITokenizedTerm for the passed-in token.
If `token` is not represented in the vocabulary, `LookupError`
is raised.
"""
class ITreeVocabulary(IVocabularyTokenized, IEnumerableMapping):
"""A tokenized vocabulary with a tree-like structure.
The tree is implemented as dictionary, with keys being ITokenizedTerm
terms and the values being similar dictionaries. Leaf values are empty
dictionaries.
"""
class IVocabularyRegistry(Interface):
"""
Registry that provides `IBaseVocabulary` objects for specific
fields.
The fields of this package use the vocabulary registry that is
returned from :func:`~.getVocabularyRegistry`. This is a hook
function; by default it returns an instance of
:class:`~.VocabularyRegistry`, but the function
:func:`~.setVocabularyRegistry` can be used to change this.
In particular, the package `zope.vocabularyregistry
<https://pypi.org/project/zope.vocabularyregistry/>`_ can be used
to install a vocabulary registry that uses the :mod:`zope.component`
architecture.
"""
def get(context, name):
"""
Return the vocabulary named *name* for the content object
*context*.
When the vocabulary cannot be found, `LookupError` is raised.
"""
class IVocabularyFactory(Interface):
"""
An object that can create `IBaseVocabulary`.
Objects that implement this interface can be registered with the
default :class:`~.VocabularyRegistry` provided by this package.
Alternatively, `zope.vocabularyregistry
<https://pypi.org/project/zope.vocabularyregistry/>`_ can be used
to install a `IVocabularyRegistry` that looks for named utilities
using :func:`zope.component.getUtility` which provide this
interface.
"""
def __call__(context):
"""The *context* provides a location that vocabulary can make use of.
"""
class IFieldEvent(Interface):
field = Object(
IField,
description="The field that has been changed")
object = Attribute("The object containing the field")
class IFieldUpdatedEvent(IFieldEvent):
"""
A field has been modified
Subscribers will get the old and the new value together with the field
"""
old_value = Attribute("The value of the field before modification")
new_value = Attribute("The value of the field after modification") | zope.schema | /zope.schema-7.0.1-py3-none-any.whl/zope/schema/interfaces.py | interfaces.py |
"""Bootstrapping fields
"""
__docformat__ = 'restructuredtext'
import decimal
import fractions
import numbers
import sys
import threading
import unicodedata
from math import isinf
from zope.event import notify
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import Invalid
from zope.interface import implementer
from zope.interface import providedBy
from zope.interface.interface import InterfaceClass
from zope.interface.interfaces import IInterface
from zope.interface.interfaces import IMethod
from zope.schema._bootstrapinterfaces import ConstraintNotSatisfied
from zope.schema._bootstrapinterfaces import IBeforeObjectAssignedEvent
from zope.schema._bootstrapinterfaces import IContextAwareDefaultFactory
from zope.schema._bootstrapinterfaces import IFromBytes
from zope.schema._bootstrapinterfaces import IFromUnicode
from zope.schema._bootstrapinterfaces import IValidatable
from zope.schema._bootstrapinterfaces import NotAContainer
from zope.schema._bootstrapinterfaces import NotAnInterface
from zope.schema._bootstrapinterfaces import NotAnIterator
from zope.schema._bootstrapinterfaces import RequiredMissing
from zope.schema._bootstrapinterfaces import SchemaNotCorrectlyImplemented
from zope.schema._bootstrapinterfaces import SchemaNotFullyImplemented
from zope.schema._bootstrapinterfaces import SchemaNotProvided
from zope.schema._bootstrapinterfaces import StopValidation
from zope.schema._bootstrapinterfaces import TooBig
from zope.schema._bootstrapinterfaces import TooLong
from zope.schema._bootstrapinterfaces import TooShort
from zope.schema._bootstrapinterfaces import TooSmall
from zope.schema._bootstrapinterfaces import ValidationError
from zope.schema._bootstrapinterfaces import WrongType
class _NotGiven(object):
def __repr__(self): # pragma: no cover
return "<Not Given>"
_NotGiven = _NotGiven()
class ValidatedProperty(object):
def __init__(self, name, check=None, allow_none=False):
self._name = name
self._check = check
self._allow_none = allow_none
def __set__(self, inst, value):
bypass_validation = (
(value is None and self._allow_none)
or value == inst.missing_value
)
if not bypass_validation:
if self._check is not None:
self._check(inst, value)
else:
inst.validate(value)
inst.__dict__[self._name] = value
def __get__(self, inst, owner):
if inst is None:
return self
return inst.__dict__[self._name]
class DefaultProperty(ValidatedProperty):
def __get__(self, inst, owner):
if inst is None:
return self
defaultFactory = inst.__dict__.get('defaultFactory')
# If there is no default factory, simply return the default.
if defaultFactory is None:
return inst.__dict__[self._name]
# Get the default value by calling the factory. Some factories might
# require a context to produce a value.
if IContextAwareDefaultFactory.providedBy(defaultFactory):
value = defaultFactory(inst.context)
else:
value = defaultFactory()
# Check that the created value is valid.
if self._check is not None:
self._check(inst, value)
elif value != inst.missing_value:
inst.validate(value)
return value
def getFields(schema):
"""Return a dictionary containing all the Fields in a schema.
"""
fields = {}
for name in schema:
attr = schema[name]
if IValidatable.providedBy(attr):
fields[name] = attr
return fields
class _DocStringHelpers(object):
# Namespace object to hold methods related to ReST formatting
# docstrings
@staticmethod
def docstring_to_lines(docstring):
# Similar to what sphinx.utils.docstrings.prepare_docstring
# does. Strip leading equal whitespace, accounting for an initial line
# that might not have any. Return a list of lines, with a trailing
# blank line.
lines = docstring.expandtabs().splitlines()
# Find minimum indentation of any non-blank lines after ignored lines.
margin = sys.maxsize
for line in lines[1:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation from first ignored lines.
if len(lines) >= 1:
lines[0] = lines[0].lstrip()
if margin < sys.maxsize:
for i in range(1, len(lines)):
lines[i] = lines[i][margin:]
# Remove any leading blank lines.
while lines and not lines[0]:
lines.pop(0)
#
lines.append('')
return lines
@staticmethod
def make_class_directive(kind):
mod = kind.__module__
if kind.__module__ in ('__builtin__', 'builtins'):
mod = ''
if mod in ('zope.schema._bootstrapfields', 'zope.schema._field'):
mod = 'zope.schema'
mod += '.' if mod else ''
return ':class:`%s%s`' % (mod, kind.__name__)
@classmethod
def make_field(cls, name, value):
return ":%s: %s" % (name, value)
@classmethod
def make_class_field(cls, name, kind):
if isinstance(kind, (type, InterfaceClass)):
return cls.make_field(name, cls.make_class_directive(kind))
if not isinstance(kind, tuple): # pragma: no cover
raise TypeError(
"make_class_field() can't handle kind %r" % (kind,))
return cls.make_field(
name,
', '.join([cls.make_class_directive(t) for t in kind]))
class Field(Attribute):
# Type restrictions, if any
_type = None
context = None
# If a field has no assigned value, it will be set to missing_value.
missing_value = None
# This is the default value for the missing_value argument to the
# Field constructor. A marker is helpful since we don't want to
# overwrite missing_value if it is set differently on a Field
# subclass and isn't specified via the constructor.
__missing_value_marker = _NotGiven
# Note that the "order" field has a dual existance:
# 1. The class variable Field.order is used as a source for the
# monotonically increasing values used to provide...
# 2. The instance variable self.order which provides a
# monotonically increasing value that tracks the creation order
# of Field (including Field subclass) instances.
order = 0
default = DefaultProperty('default')
# These were declared as slots in zope.interface, we override them here to
# get rid of the descriptors so they don't break .bind()
__name__ = None
interface = None
_Element__tagged_values = None
def __init__(self, title=u'', description=u'', __name__='',
required=True, readonly=False, constraint=None, default=None,
defaultFactory=None, missing_value=__missing_value_marker):
"""Pass in field values as keyword parameters.
Generally, you want to pass either a title and description, or
a doc string. If you pass no doc string, it will be computed
from the title and description. If you pass a doc string that
follows the Python coding style (title line separated from the
body by a blank line), the title and description will be
computed from the doc string. Unfortunately, the doc string
must be passed as a positional argument.
Here are some examples:
>>> from zope.schema._bootstrapfields import Field
>>> f = Field()
>>> f.__doc__, str(f.title), str(f.description)
('', '', '')
>>> f = Field(title=u'sample')
>>> str(f.__doc__), str(f.title), str(f.description)
('sample', 'sample', '')
>>> f = Field(title=u'sample', description=u'blah blah\\nblah')
>>> str(f.__doc__), str(f.title), str(f.description)
('sample\\n\\nblah blah\\nblah', 'sample', 'blah blah\\nblah')
"""
__doc__ = ''
# Fix leading whitespace that occurs when using multi-line
# strings, but don't overwrite the original, we need to
# preserve it (it could be a MessageID).
doc_description = '\n'.join(
_DocStringHelpers.docstring_to_lines(description or u'')[:-1]
)
if title:
if doc_description:
__doc__ = "%s\n\n%s" % (title, doc_description)
else:
__doc__ = title
elif description:
__doc__ = doc_description
super(Field, self).__init__(__name__, __doc__)
self.title = title
self.description = description
self.required = required
self.readonly = readonly
if constraint is not None:
self.constraint = constraint
self.default = default
self.defaultFactory = defaultFactory
# Keep track of the order of field definitions
Field.order += 1
self.order = Field.order
if missing_value is not self.__missing_value_marker:
self.missing_value = missing_value
def constraint(self, value):
return True
def bind(self, context):
clone = self.__class__.__new__(self.__class__)
clone.__dict__.update(self.__dict__)
clone.context = context
return clone
def validate(self, value):
if value == self.missing_value:
if self.required:
raise RequiredMissing(
self.__name__
).with_field_and_value(self, value)
else:
try:
self._validate(value)
except StopValidation:
pass
def __get_property_names_to_compare(self):
# Return the set of property names to compare, ignoring
# order
names = {} # used as set of property names, ignoring values
for interface in providedBy(self):
names.update(getFields(interface))
# order will be different always, don't compare it
names.pop('order', None)
return names
def __hash__(self):
# Equal objects should have equal hashes;
# equal hashes does not imply equal objects.
value = (
(type(self), self.interface) +
tuple(self.__get_property_names_to_compare())
)
return hash(value)
def __eq__(self, other):
# should be the same type and in the same interface (or no interface
# at all)
if self is other:
return True
if type(self) != type(other) or self.interface != other.interface:
return False
# should have the same properties
names = self.__get_property_names_to_compare()
# XXX: What about the property names of the other object? Even
# though it's the same type, it could theoretically have
# another interface that it `alsoProvides`.
for name in names:
if getattr(self, name) != getattr(other, name):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _validate(self, value):
if self._type is not None and not isinstance(value, self._type):
raise WrongType(
value, self._type, self.__name__
).with_field_and_value(self, value)
try:
constraint = self.constraint(value)
except ValidationError as e:
if e.field is None:
e.field = self
if e.value is None:
e.value = value
raise
if not constraint:
raise ConstraintNotSatisfied(
value, self.__name__
).with_field_and_value(self, value)
def get(self, object):
return getattr(object, self.__name__)
def query(self, object, default=None):
return getattr(object, self.__name__, default)
def set(self, object, value):
if self.readonly:
raise TypeError("Can't set values on read-only fields "
"(name=%s, class=%s.%s)"
% (self.__name__,
object.__class__.__module__,
object.__class__.__name__))
setattr(object, self.__name__, value)
def getExtraDocLines(self):
"""
Return a list of ReST formatted lines that will be added
to the docstring returned by :meth:`getDoc`.
By default, this will include information about the various
properties of this object, such as required and readonly status,
required type, and so on.
This implementation uses a field list for this.
Subclasses may override or extend.
.. versionadded:: 4.6.0
"""
lines = []
lines.append(_DocStringHelpers.make_class_field(
'Implementation', type(self)))
lines.append(_DocStringHelpers.make_field("Read Only", self.readonly))
lines.append(_DocStringHelpers.make_field("Required", self.required))
if self.defaultFactory:
lines.append(_DocStringHelpers.make_field(
"Default Factory", repr(self.defaultFactory)))
else:
lines.append(_DocStringHelpers.make_field(
"Default Value", repr(self.default)))
if self._type:
lines.append(_DocStringHelpers.make_class_field(
"Allowed Type", self._type))
# key_type and value_type are commonly used, but don't
# have a common superclass to add them, so we do it here.
# Using a rubric produces decent formatting
for name, rubric in (('key_type', 'Key Type'),
('value_type', 'Value Type')):
field = getattr(self, name, None)
if hasattr(field, 'getDoc'):
lines.append("")
lines.append(".. rubric:: " + rubric)
lines.append("")
lines.append(field.getDoc())
return lines
def getDoc(self):
doc = super(Field, self).getDoc()
lines = _DocStringHelpers.docstring_to_lines(doc)
lines += self.getExtraDocLines()
lines.append('')
return '\n'.join(lines)
class Container(Field):
def _validate(self, value):
super(Container, self)._validate(value)
if not hasattr(value, '__contains__'):
try:
iter(value)
except TypeError:
raise NotAContainer(value).with_field_and_value(self, value)
# XXX This class violates the Liskov Substituability Principle: it
# is derived from Container, but cannot be used everywhere an instance
# of Container could be, because it's '_validate' is more restrictive.
class Iterable(Container):
def _validate(self, value):
super(Iterable, self)._validate(value)
# See if we can get an iterator for it
try:
iter(value)
except TypeError:
raise NotAnIterator(value).with_field_and_value(self, value)
class Orderable(object):
"""Values of ordered fields can be sorted.
They can be restricted to a range of values.
Orderable is a mixin used in combination with Field.
"""
min = ValidatedProperty('min', allow_none=True)
max = ValidatedProperty('max', allow_none=True)
def __init__(self, min=None, max=None, default=None, **kw):
# Set min and max to None so that we can validate if
# one of the super methods invoke validation.
self.min = None
self.max = None
super(Orderable, self).__init__(**kw)
# Now really set min and max
self.min = min
self.max = max
# We've taken over setting default so it can be limited by min
# and max.
self.default = default
def _validate(self, value):
super(Orderable, self)._validate(value)
if self.min is not None and value < self.min:
raise TooSmall(value, self.min).with_field_and_value(self, value)
if self.max is not None and value > self.max:
raise TooBig(value, self.max).with_field_and_value(self, value)
class MinMaxLen(object):
"""Expresses constraints on the length of a field.
MinMaxLen is a mixin used in combination with Field.
"""
min_length = 0
max_length = None
def __init__(self, min_length=0, max_length=None, **kw):
self.min_length = min_length
self.max_length = max_length
super(MinMaxLen, self).__init__(**kw)
def _validate(self, value):
super(MinMaxLen, self)._validate(value)
if self.min_length is not None and len(value) < self.min_length:
raise TooShort(value, self.min_length).with_field_and_value(
self, value)
if self.max_length is not None and len(value) > self.max_length:
raise TooLong(value, self.max_length).with_field_and_value(
self, value)
@implementer(IFromUnicode)
class Text(MinMaxLen, Field):
"""A field containing text used for human discourse."""
_type = str
unicode_normalization = 'NFC'
def __init__(self, *args, **kw):
self.unicode_normalization = kw.pop(
'unicode_normalization', self.unicode_normalization)
super(Text, self).__init__(*args, **kw)
def fromUnicode(self, value):
"""
>>> from zope.schema import Text
>>> t = Text(constraint=lambda v: 'x' in v)
>>> t.fromUnicode(b"foo x spam") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
zope.schema._bootstrapinterfaces.WrongType:
('foo x spam', <type 'unicode'>, '')
>>> result = t.fromUnicode(u"foo x spam")
>>> isinstance(result, bytes)
False
>>> str(result)
'foo x spam'
>>> t.fromUnicode(u"foo spam") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
zope.schema._bootstrapinterfaces.ConstraintNotSatisfied:
(u'foo spam', '')
"""
if isinstance(value, str):
if self.unicode_normalization:
value = unicodedata.normalize(
self.unicode_normalization, value)
self.validate(value)
return value
class TextLine(Text):
"""A text field with no newlines."""
def constraint(self, value):
return '\n' not in value and '\r' not in value
class Password(TextLine):
"""A text field containing a text used as a password."""
UNCHANGED_PASSWORD = object()
def set(self, context, value):
"""Update the password.
We use a special marker value that a widget can use
to tell us that the password didn't change. This is
needed to support edit forms that don't display the
existing password and want to work together with
encryption.
"""
if value is self.UNCHANGED_PASSWORD:
return
super(Password, self).set(context, value)
def validate(self, value):
try:
existing = bool(self.get(self.context))
except AttributeError:
existing = False
if value is self.UNCHANGED_PASSWORD and existing:
# Allow the UNCHANGED_PASSWORD value, if a password is set already
return
return super(Password, self).validate(value)
@implementer(IFromUnicode, IFromBytes)
class Bool(Field):
"""
A field representing a Bool.
.. versionchanged:: 4.8.0
Implement :class:`zope.schema.interfaces.IFromBytes`
"""
_type = bool
def _validate(self, value):
# Convert integers to bools to they don't get mis-flagged
# by the type check later.
if isinstance(value, int) and not isinstance(value, bool):
value = bool(value)
Field._validate(self, value)
def set(self, object, value):
if isinstance(value, int) and not isinstance(value, bool):
value = bool(value)
Field.set(self, object, value)
def fromUnicode(self, value):
"""
>>> from zope.schema._bootstrapfields import Bool
>>> from zope.schema.interfaces import IFromUnicode
>>> b = Bool()
>>> IFromUnicode.providedBy(b)
True
>>> b.fromUnicode('True')
True
>>> b.fromUnicode('')
False
>>> b.fromUnicode('true')
True
>>> b.fromUnicode('false') or b.fromUnicode('False')
False
>>> b.fromUnicode(u'\u2603')
False
"""
# On Python 2, we're relying on the implicit decoding
# that happens during string comparisons of unicode to native
# (byte) strings; decoding errors are silently dropped
v = value == 'True' or value == 'true'
self.validate(v)
return v
def fromBytes(self, value):
"""
>>> from zope.schema._bootstrapfields import Bool
>>> from zope.schema.interfaces import IFromBytes
>>> b = Bool()
>>> IFromBytes.providedBy(b)
True
>>> b.fromBytes(b'True')
True
>>> b.fromBytes(b'')
False
>>> b.fromBytes(b'true')
True
>>> b.fromBytes(b'false') or b.fromBytes(b'False')
False
>>> b.fromBytes(u'\u2603'.encode('utf-8'))
False
"""
return self.fromUnicode(value.decode("utf-8"))
class InvalidNumberLiteral(ValueError, ValidationError):
"""Invalid number literal."""
@implementer(IFromUnicode, IFromBytes)
class Number(Orderable, Field):
"""
A field representing a :class:`numbers.Number` and implementing
:class:`zope.schema.interfaces.INumber`.
The :meth:`fromUnicode` method will attempt to use the smallest or
strictest possible type to represent incoming strings::
>>> from zope.schema._bootstrapfields import Number
>>> f = Number()
>>> f.fromUnicode(u"1")
1
>>> f.fromUnicode(u"125.6")
125.6
>>> f.fromUnicode(u"1+0j")
(1+0j)
>>> f.fromUnicode(u"1/2")
Fraction(1, 2)
>>> f.fromUnicode(str(2**11234) + '.' + str(2**256))
... # doctest: +ELLIPSIS
Decimal('590...936')
>>> f.fromUnicode(u"not a number") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidNumberLiteral: Invalid literal for Decimal: 'not a number'
Similarly, :meth:`fromBytes` will do the same for incoming byte strings::
>>> from zope.schema._bootstrapfields import Number
>>> f = Number()
>>> f.fromBytes(b"1")
1
>>> f.fromBytes(b"125.6")
125.6
>>> f.fromBytes(b"1+0j")
(1+0j)
>>> f.fromBytes(b"1/2")
Fraction(1, 2)
>>> f.fromBytes((str(2**11234) + '.' + str(2**256)).encode('ascii'))
... # doctest: +ELLIPSIS
Decimal('590...936')
>>> f.fromBytes(b"not a number") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidNumberLiteral: Invalid literal for Decimal: 'not a number'
.. versionadded:: 4.6.0
.. versionchanged:: 4.8.0
Implement :class:`zope.schema.interfaces.IFromBytes`
"""
_type = numbers.Number
# An ordered sequence of conversion routines. These should accept a
# native string and produce an object that is an instance of `_type`, or
# raise a ValueError. The order should be most specific/strictest
# towards least restrictive (in other words, lowest in the numeric tower
# towards highest). We break this rule with fractions, though: a
# floating point number is more generally useful and expected than a
# fraction, so we attempt to parse as a float before a fraction.
_unicode_converters = (
int, float, fractions.Fraction, complex, decimal.Decimal,
)
# The type of error we will raise if all conversions fail.
_validation_error = InvalidNumberLiteral
def fromUnicode(self, value):
last_exc = None
for converter in self._unicode_converters:
try:
val = converter(value)
if (converter is float
and isinf(val)
and decimal.Decimal in self._unicode_converters):
# Pass this on to decimal, if we're allowed
val = decimal.Decimal(value)
except (ValueError, decimal.InvalidOperation) as e:
last_exc = e
else:
self.validate(val)
return val
try:
raise self._validation_error(*last_exc.args).with_field_and_value(
self, value)
finally:
last_exc = None
def fromBytes(self, value):
return self.fromUnicode(value.decode('utf-8'))
class Complex(Number):
"""
A field representing a :class:`numbers.Complex` and implementing
:class:`zope.schema.interfaces.IComplex`.
The :meth:`fromUnicode` method is like that for :class:`Number`,
but doesn't allow Decimals::
>>> from zope.schema._bootstrapfields import Complex
>>> f = Complex()
>>> f.fromUnicode(u"1")
1
>>> f.fromUnicode(u"125.6")
125.6
>>> f.fromUnicode(u"1+0j")
(1+0j)
>>> f.fromUnicode(u"1/2")
Fraction(1, 2)
>>> f.fromUnicode(str(2**11234) + '.' + str(2**256))
... # doctest: +ELLIPSIS
inf
>>> f.fromUnicode(u"not a number") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidNumberLiteral: Invalid literal for Decimal: 'not a number'
Similarly for :meth:`fromBytes`:
>>> from zope.schema._bootstrapfields import Complex
>>> f = Complex()
>>> f.fromBytes(b"1")
1
>>> f.fromBytes(b"125.6")
125.6
>>> f.fromBytes(b"1+0j")
(1+0j)
>>> f.fromBytes(b"1/2")
Fraction(1, 2)
>>> f.fromBytes((str(2**11234) + '.' + str(2**256)).encode('ascii'))
... # doctest: +ELLIPSIS
inf
>>> f.fromBytes(b"not a number") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidNumberLiteral: Invalid literal for Decimal: 'not a number'
.. versionadded:: 4.6.0
"""
_type = numbers.Complex
_unicode_converters = (int, float, complex, fractions.Fraction)
class Real(Complex):
"""
A field representing a :class:`numbers.Real` and implementing
:class:`zope.schema.interfaces.IReal`.
The :meth:`fromUnicode` method is like that for :class:`Complex`,
but doesn't allow Decimals or complex numbers::
>>> from zope.schema._bootstrapfields import Real
>>> f = Real()
>>> f.fromUnicode("1")
1
>>> f.fromUnicode("125.6")
125.6
>>> f.fromUnicode("1+0j") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidNumberLiteral: Invalid literal for Fraction: '1+0j'
>>> f.fromUnicode("1/2")
Fraction(1, 2)
>>> f.fromUnicode(str(2**11234) + '.' + str(2**256))
... # doctest: +ELLIPSIS
inf
>>> f.fromUnicode("not a number") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidNumberLiteral: Invalid literal for Decimal: 'not a number'
.. versionadded:: 4.6.0
"""
_type = numbers.Real
_unicode_converters = (int, float, fractions.Fraction)
class Rational(Real):
"""
A field representing a :class:`numbers.Rational` and implementing
:class:`zope.schema.interfaces.IRational`.
The :meth:`fromUnicode` method is like that for :class:`Real`,
but does not allow arbitrary floating point numbers::
>>> from zope.schema._bootstrapfields import Rational
>>> f = Rational()
>>> f.fromUnicode("1")
1
>>> f.fromUnicode("1/2")
Fraction(1, 2)
>>> f.fromUnicode("125.6")
Fraction(628, 5)
>>> f.fromUnicode("1+0j") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidNumberLiteral: Invalid literal for Fraction: '1+0j'
>>> f.fromUnicode(str(2**11234) + '.' + str(2**256))
... # doctest: +ELLIPSIS
Fraction(195..., 330...)
>>> f.fromUnicode("not a number") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidNumberLiteral: Invalid literal for Decimal: 'not a number'
.. versionadded:: 4.6.0
"""
_type = numbers.Rational
_unicode_converters = (int, fractions.Fraction)
class InvalidIntLiteral(ValueError, ValidationError):
"""Invalid int literal."""
class Integral(Rational):
"""
A field representing a :class:`numbers.Integral` and implementing
:class:`zope.schema.interfaces.IIntegral`.
The :meth:`fromUnicode` method only allows integral values::
>>> from zope.schema._bootstrapfields import Integral
>>> f = Integral()
>>> f.fromUnicode("125")
125
>>> f.fromUnicode("125.6") #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidIntLiteral: invalid literal for int(): 125.6
Similarly for :meth:`fromBytes`:
>>> from zope.schema._bootstrapfields import Integral
>>> f = Integral()
>>> f.fromBytes(b"125")
125
>>> f.fromBytes(b"125.6") #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidIntLiteral: invalid literal for int(): 125.6
.. versionadded:: 4.6.0
"""
_type = numbers.Integral
_unicode_converters = (int,)
_validation_error = InvalidIntLiteral
class Int(Integral):
"""A field representing a native integer type. and implementing
:class:`zope.schema.interfaces.IInt`.
"""
_type = int
_unicode_converters = (int,)
class InvalidDecimalLiteral(ValueError, ValidationError):
"Raised by decimal fields"
class Decimal(Number):
"""
A field representing a native :class:`decimal.Decimal` and implementing
:class:`zope.schema.interfaces.IDecimal`.
The :meth:`fromUnicode` method only accepts values that can be parsed
by the ``Decimal`` constructor::
>>> from zope.schema._field import Decimal
>>> f = Decimal()
>>> f.fromUnicode("1")
Decimal('1')
>>> f.fromUnicode("125.6")
Decimal('125.6')
>>> f.fromUnicode("1+0j") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidDecimalLiteral: Invalid literal for Decimal(): 1+0j
>>> f.fromUnicode("1/2") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidDecimalLiteral: Invalid literal for Decimal(): 1/2
>>> f.fromUnicode(str(2**11234) + '.' + str(2**256))
... # doctest: +ELLIPSIS
Decimal('5901...936')
>>> f.fromUnicode("not a number") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidDecimalLiteral: could not convert string to float: not a number
Likewise for :meth:`fromBytes`::
>>> from zope.schema._field import Decimal
>>> f = Decimal()
>>> f.fromBytes(b"1")
Decimal('1')
>>> f.fromBytes(b"125.6")
Decimal('125.6')
>>> f.fromBytes(b"1+0j") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidDecimalLiteral: Invalid literal for Decimal(): 1+0j
>>> f.fromBytes(b"1/2") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidDecimalLiteral: Invalid literal for Decimal(): 1/2
>>> f.fromBytes((str(2**11234) + '.' + str(2**256)).encode("ascii"))
... # doctest: +ELLIPSIS
Decimal('5901...936')
>>> f.fromBytes(b"not a number") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidDecimalLiteral: could not convert string to float: not a number
"""
_type = decimal.Decimal
_unicode_converters = (decimal.Decimal,)
_validation_error = InvalidDecimalLiteral
class _ObjectsBeingValidated(threading.local):
def __init__(self):
super(_ObjectsBeingValidated, self).__init__()
self.ids_being_validated = set()
def get_schema_validation_errors(schema, value,
_validating_objects=_ObjectsBeingValidated()):
"""
Validate that *value* conforms to the schema interface *schema*.
All :class:`zope.schema.interfaces.IField` members of the *schema*
are validated after being bound to *value*. (Note that we do not check for
arbitrary :class:`zope.interface.Attribute` members being present.)
:return: A `dict` mapping field names to `ValidationError` subclasses.
A non-empty return value means that validation failed.
"""
errors = {}
# Interface can be used as schema property for Object fields that plan to
# hold values of any type.
# Because Interface does not include any Attribute, it is obviously not
# worth looping on its methods and filter them all out.
if schema is Interface:
return errors
# if `value` is part of a cyclic graph, we need to break the cycle to avoid
# infinite recursion. Collect validated objects in a thread local dict by
# it's python represenation. A previous version was setting a volatile
# attribute which didn't work with security proxy
id_value = id(value)
ids_being_validated = _validating_objects.ids_being_validated
if id_value in ids_being_validated:
return errors
ids_being_validated.add(id_value)
# (If we have gotten here, we know that `value` provides an interface
# other than zope.interface.Interface;
# iow, we can rely on the fact that it is an instance
# that supports attribute assignment.)
try:
for name in schema.names(all=True):
attribute = schema[name]
if IMethod.providedBy(attribute):
continue # pragma: no cover
try:
if IValidatable.providedBy(attribute):
# validate attributes that are fields
field_value = getattr(value, name)
attribute = attribute.bind(value)
attribute.validate(field_value)
except ValidationError as error:
errors[name] = error
except AttributeError as error:
# property for the given name is not implemented
errors[name] = SchemaNotFullyImplemented(
error
).with_field_and_value(attribute, None)
finally:
ids_being_validated.remove(id_value)
return errors
def get_validation_errors(schema, value, validate_invariants=True):
"""
Validate that *value* conforms to the schema interface *schema*.
This includes checking for any schema validation errors (using
`get_schema_validation_errors`). If that succeeds, and
*validate_invariants* is true, then we proceed to check for any
declared invariants.
Note that this does not include a check to see if the *value*
actually provides the given *schema*.
:return: If there were any validation errors, either schema or
invariant, return a two tuple (schema_error_dict,
invariant_error_list). If there were no errors, returns a
two-tuple where both members are empty.
"""
schema_error_dict = get_schema_validation_errors(schema, value)
invariant_errors = []
# Only validate invariants if there were no previous errors. Previous
# errors could be missing attributes which would most likely make an
# invariant raise an AttributeError.
if validate_invariants and not schema_error_dict:
try:
schema.validateInvariants(value, invariant_errors)
except Invalid:
# validateInvariants raises a wrapper error around
# all the errors it got if it got errors, in addition
# to appending them to the errors list. We don't want
# that, we raise our own error.
pass
return (schema_error_dict, invariant_errors)
class Object(Field):
"""
Implementation of :class:`zope.schema.interfaces.IObject`.
"""
schema = None
def __init__(self, schema=_NotGiven, **kw):
"""
Object(schema=<Not Given>, *, validate_invariants=True, **kwargs)
Create an `~.IObject` field. The keyword arguments are as for
`~.Field`.
.. versionchanged:: 4.6.0
Add the keyword argument *validate_invariants*. When true (the
default), the schema's ``validateInvariants`` method will be
invoked to check the ``@invariant`` properties of the schema.
.. versionchanged:: 4.6.0
The *schema* argument can be ommitted in a subclass
that specifies a ``schema`` attribute.
"""
if schema is _NotGiven:
schema = self.schema
if not IInterface.providedBy(schema):
# Note that we don't provide 'self' as the 'field'
# by calling with_field_and_value(): We're not fully constructed,
# we don't want this instance to escape.
raise NotAnInterface(schema, self.__name__)
self.schema = schema
self.validate_invariants = kw.pop('validate_invariants', True)
super(Object, self).__init__(**kw)
def getExtraDocLines(self):
lines = super(Object, self).getExtraDocLines()
lines.append(_DocStringHelpers.make_class_field(
"Must Provide", self.schema))
return lines
def _validate(self, value):
super(Object, self)._validate(value)
# schema has to be provided by value
if not self.schema.providedBy(value):
raise SchemaNotProvided(self.schema, value).with_field_and_value(
self, value)
# check the value against schema
schema_error_dict, invariant_errors = get_validation_errors(
self.schema,
value,
self.validate_invariants
)
if schema_error_dict or invariant_errors:
errors = list(schema_error_dict.values()) + invariant_errors
exception = SchemaNotCorrectlyImplemented(
errors,
self.__name__,
schema_error_dict,
invariant_errors
).with_field_and_value(self, value)
try:
raise exception
finally:
# Break cycles
del exception
del invariant_errors
del schema_error_dict
del errors
def set(self, object, value):
# Announce that we're going to assign the value to the object.
# Motivation: Widgets typically like to take care of policy-specific
# actions, like establishing location.
event = BeforeObjectAssignedEvent(value, self.__name__, object)
notify(event)
# The event subscribers are allowed to replace the object, thus we need
# to replace our previous value.
value = event.object
super(Object, self).set(object, value)
@implementer(IBeforeObjectAssignedEvent)
class BeforeObjectAssignedEvent(object):
"""An object is going to be assigned to an attribute on another object."""
def __init__(self, object, name, context):
self.object = object
self.name = name
self.context = context | zope.schema | /zope.schema-7.0.1-py3-none-any.whl/zope/schema/_bootstrapfields.py | _bootstrapfields.py |
"""Schema Fields
"""
__docformat__ = 'restructuredtext'
import re
from collections import abc
from datetime import date
from datetime import datetime
from datetime import time
from datetime import timedelta
from zope.interface import classImplements
from zope.interface import classImplementsFirst
from zope.interface import implementedBy
from zope.interface import implementer
from zope.interface.interfaces import IInterface
from zope.schema._bootstrapfields import Bool
from zope.schema._bootstrapfields import Complex
from zope.schema._bootstrapfields import Container # API import for __init__
from zope.schema._bootstrapfields import Decimal
from zope.schema._bootstrapfields import Field
from zope.schema._bootstrapfields import Int
from zope.schema._bootstrapfields import Integral
from zope.schema._bootstrapfields import \
InvalidDecimalLiteral # noqa: reexport
from zope.schema._bootstrapfields import Iterable
from zope.schema._bootstrapfields import MinMaxLen
from zope.schema._bootstrapfields import Number
from zope.schema._bootstrapfields import Object
from zope.schema._bootstrapfields import Orderable
from zope.schema._bootstrapfields import Password
from zope.schema._bootstrapfields import Rational
from zope.schema._bootstrapfields import Real
from zope.schema._bootstrapfields import Text
from zope.schema._bootstrapfields import TextLine
from zope.schema._bootstrapfields import _NotGiven
from zope.schema.fieldproperty import FieldProperty
from zope.schema.interfaces import IASCII
from zope.schema.interfaces import IURI
from zope.schema.interfaces import ConstraintNotSatisfied
from zope.schema.interfaces import IASCIILine
from zope.schema.interfaces import IBaseVocabulary
from zope.schema.interfaces import IBool
from zope.schema.interfaces import IBytes
from zope.schema.interfaces import IBytesLine
from zope.schema.interfaces import IChoice
from zope.schema.interfaces import ICollection
from zope.schema.interfaces import IComplex
from zope.schema.interfaces import IContainer
from zope.schema.interfaces import IContextSourceBinder
from zope.schema.interfaces import IDate
from zope.schema.interfaces import IDatetime
from zope.schema.interfaces import IDecimal
from zope.schema.interfaces import IDict
from zope.schema.interfaces import IDottedName
from zope.schema.interfaces import IField
from zope.schema.interfaces import IFloat
from zope.schema.interfaces import IFromBytes
from zope.schema.interfaces import IFromUnicode
from zope.schema.interfaces import IFrozenSet
from zope.schema.interfaces import IId
from zope.schema.interfaces import IInt
from zope.schema.interfaces import IIntegral
from zope.schema.interfaces import IInterfaceField
from zope.schema.interfaces import IIterable
from zope.schema.interfaces import IList
from zope.schema.interfaces import IMapping
from zope.schema.interfaces import IMinMaxLen
from zope.schema.interfaces import IMutableMapping
from zope.schema.interfaces import IMutableSequence
from zope.schema.interfaces import INativeString
from zope.schema.interfaces import INativeStringLine
from zope.schema.interfaces import INumber
from zope.schema.interfaces import InvalidDottedName
from zope.schema.interfaces import InvalidId
from zope.schema.interfaces import InvalidURI
from zope.schema.interfaces import InvalidValue
from zope.schema.interfaces import IObject
from zope.schema.interfaces import IPassword
from zope.schema.interfaces import IPythonIdentifier
from zope.schema.interfaces import IRational
from zope.schema.interfaces import IReal
from zope.schema.interfaces import ISequence
from zope.schema.interfaces import ISet
from zope.schema.interfaces import ISource
from zope.schema.interfaces import ISourceText
from zope.schema.interfaces import IText
from zope.schema.interfaces import ITextLine
from zope.schema.interfaces import ITime
from zope.schema.interfaces import ITimedelta
from zope.schema.interfaces import ITuple
from zope.schema.interfaces import NotAnInterface
from zope.schema.interfaces import NotUnique
from zope.schema.interfaces import ValidationError
from zope.schema.interfaces import WrongContainedType
from zope.schema.interfaces import WrongType
from zope.schema.vocabulary import SimpleVocabulary
from zope.schema.vocabulary import getVocabularyRegistry
# Fix up bootstrap field types
Field.title = FieldProperty(IField['title'])
Field.description = FieldProperty(IField['description'])
Field.required = FieldProperty(IField['required'])
Field.readonly = FieldProperty(IField['readonly'])
# Default is already taken care of
classImplements(Field, IField)
MinMaxLen.min_length = FieldProperty(IMinMaxLen['min_length'])
MinMaxLen.max_length = FieldProperty(IMinMaxLen['max_length'])
classImplementsFirst(Text, IText)
classImplementsFirst(TextLine, ITextLine)
classImplementsFirst(Password, IPassword)
classImplementsFirst(Bool, IBool)
classImplementsFirst(Iterable, IIterable)
classImplementsFirst(Container, IContainer)
classImplementsFirst(Number, INumber)
classImplementsFirst(Complex, IComplex)
classImplementsFirst(Real, IReal)
classImplementsFirst(Rational, IRational)
classImplementsFirst(Integral, IIntegral)
classImplementsFirst(Int, IInt)
classImplementsFirst(Decimal, IDecimal)
classImplementsFirst(Object, IObject)
class implementer_if_needed(object):
# Helper to make sure we don't redundantly implement
# interfaces already inherited. Doing so tends to produce
# problems with the C3 order. This is used when we cannot
# statically determine if we need the interface or not, e.g,
# because we're picking different base classes under some circumstances.
def __init__(self, *ifaces):
self._ifaces = ifaces
def __call__(self, cls):
ifaces_needed = []
implemented = implementedBy(cls)
ifaces_needed = [
iface
for iface in self._ifaces
if not implemented.isOrExtends(iface)
]
return implementer(*ifaces_needed)(cls)
@implementer(ISourceText)
class SourceText(Text):
__doc__ = ISourceText.__doc__
_type = str
@implementer(IBytes, IFromUnicode, IFromBytes)
class Bytes(MinMaxLen, Field):
__doc__ = IBytes.__doc__
_type = bytes
def fromUnicode(self, value):
""" See IFromUnicode.
"""
return self.fromBytes(value.encode('ascii'))
def fromBytes(self, value):
self.validate(value)
return value
@implementer_if_needed(INativeString, IFromUnicode, IFromBytes)
class NativeString(Text):
"""
A native string is always the type `str`.
In addition to :class:`~zope.schema.interfaces.INativeString`,
this implements :class:`~zope.schema.interfaces.IFromUnicode` and
:class:`~zope.schema.interfaces.IFromBytes`.
.. versionchanged:: 4.9.0
This is now a distinct type instead of an alias for either `Text` or
`Bytes`, depending on the platform.
"""
_type = str
def fromBytes(self, value):
value = value.decode('utf-8')
self.validate(value)
return value
@implementer(IASCII)
class ASCII(NativeString):
__doc__ = IASCII.__doc__
def _validate(self, value):
super(ASCII, self)._validate(value)
if not value:
return
if not max(map(ord, value)) < 128:
raise InvalidValue().with_field_and_value(self, value)
@implementer(IBytesLine)
class BytesLine(Bytes):
"""A `Bytes` field with no newlines."""
def constraint(self, value):
# TODO: we should probably use a more general definition of newlines
return b'\n' not in value
@implementer_if_needed(INativeStringLine, IFromUnicode, IFromBytes)
class NativeStringLine(TextLine):
"""
A native string is always the type `str`; this field excludes
newlines.
In addition to :class:`~zope.schema.interfaces.INativeStringLine`,
this implements :class:`~zope.schema.interfaces.IFromUnicode` and
:class:`~zope.schema.interfaces.IFromBytes`.
.. versionchanged:: 4.9.0
This is now a distinct type instead of an alias for either `TextLine`
or `BytesLine`, depending on the platform.
"""
_type = str
def fromBytes(self, value):
value = value.decode('utf-8')
self.validate(value)
return value
@implementer(IASCIILine)
class ASCIILine(ASCII):
__doc__ = IASCIILine.__doc__
def constraint(self, value):
# TODO: we should probably use a more general definition of newlines
return '\n' not in value
class InvalidFloatLiteral(ValueError, ValidationError):
"""Raised by Float fields."""
@implementer(IFloat)
class Float(Real):
"""
A field representing a native :class:`float` and implementing
:class:`zope.schema.interfaces.IFloat`.
The class :class:`zope.schema.Real` is a more general version,
accepting floats, integers, and fractions.
The :meth:`fromUnicode` method only accepts values that can be parsed
by the ``float`` constructor::
>>> from zope.schema._field import Float
>>> f = Float()
>>> f.fromUnicode("1")
1.0
>>> f.fromUnicode("125.6")
125.6
>>> f.fromUnicode("1+0j") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidFloatLiteral: Invalid literal for float(): 1+0j
>>> f.fromUnicode("1/2") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidFloatLiteral: invalid literal for float(): 1/2
>>> f.fromUnicode(str(2**11234) + '.' + str(2**256))
... # doctest: +ELLIPSIS
inf
>>> f.fromUnicode("not a number") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidFloatLiteral: could not convert string to float: not a number
Likewise for :meth:`fromBytes`::
>>> from zope.schema._field import Float
>>> f = Float()
>>> f.fromBytes(b"1")
1.0
>>> f.fromBytes(b"125.6")
125.6
>>> f.fromBytes(b"1+0j") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidFloatLiteral: Invalid literal for float(): 1+0j
>>> f.fromBytes(b"1/2") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidFloatLiteral: invalid literal for float(): 1/2
>>> f.fromBytes((str(2**11234) + '.' + str(2**256)).encode('ascii'))
... # doctest: +ELLIPSIS
inf
>>> f.fromBytes(b"not a number") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidFloatLiteral: could not convert string to float: not a number
"""
_type = float
_unicode_converters = (float,)
_validation_error = InvalidFloatLiteral
@implementer(IDatetime)
class Datetime(Orderable, Field):
__doc__ = IDatetime.__doc__
_type = datetime
def __init__(self, *args, **kw):
super(Datetime, self).__init__(*args, **kw)
@implementer(IDate)
class Date(Orderable, Field):
__doc__ = IDate.__doc__
_type = date
def _validate(self, value):
super(Date, self)._validate(value)
if isinstance(value, datetime):
raise WrongType(
value, self._type, self.__name__
).with_field_and_value(self, value)
@implementer(ITimedelta)
class Timedelta(Orderable, Field):
__doc__ = ITimedelta.__doc__
_type = timedelta
@implementer(ITime)
class Time(Orderable, Field):
__doc__ = ITime.__doc__
_type = time
class MissingVocabularyError(ValidationError,
ValueError,
LookupError):
"""Raised when a named vocabulary cannot be found."""
# Subclasses ValueError and LookupError for backwards compatibility
class InvalidVocabularyError(ValidationError,
ValueError,
TypeError):
"""Raised when the vocabulary is not an ISource."""
# Subclasses TypeError and ValueError for backwards compatibility
def __init__(self, vocabulary):
super(InvalidVocabularyError, self).__init__(
"Invalid vocabulary %r" % (vocabulary,))
@implementer(IChoice, IFromUnicode)
class Choice(Field):
"""Choice fields can have a value found in a constant or dynamic set of
values given by the field definition.
"""
def __init__(self, values=None, vocabulary=None, source=None, **kw):
"""Initialize object."""
if vocabulary is not None:
if (not isinstance(vocabulary, str)
and not IBaseVocabulary.providedBy(vocabulary)):
raise ValueError('vocabulary must be a string or implement '
'IBaseVocabulary')
if source is not None:
raise ValueError(
"You cannot specify both source and vocabulary.")
elif source is not None:
vocabulary = source
if (values is None and vocabulary is None):
raise ValueError(
"You must specify either values or vocabulary."
)
if values is not None and vocabulary is not None:
raise ValueError(
"You cannot specify both values and vocabulary."
)
self.vocabulary = None
self.vocabularyName = None
if values is not None:
self.vocabulary = SimpleVocabulary.fromValues(values)
elif isinstance(vocabulary, str):
self.vocabularyName = vocabulary
else:
if (not ISource.providedBy(vocabulary)
and not IContextSourceBinder.providedBy(vocabulary)):
raise InvalidVocabularyError(vocabulary)
self.vocabulary = vocabulary
# Before a default value is checked, it is validated. However, a
# named vocabulary is usually not complete when these fields are
# initialized. Therefore signal the validation method to ignore
# default value checks during initialization of a Choice tied to a
# registered vocabulary.
self._init_field = (bool(self.vocabularyName) or
IContextSourceBinder.providedBy(self.vocabulary))
super(Choice, self).__init__(**kw)
self._init_field = False
source = property(lambda self: self.vocabulary)
def _resolve_vocabulary(self, value):
# Find the vocabulary we should use, raising
# an exception if this isn't possible, and returning
# an ISource otherwise.
vocabulary = self.vocabulary
if (IContextSourceBinder.providedBy(vocabulary)
and self.context is not None):
vocabulary = vocabulary(self.context)
elif vocabulary is None and self.vocabularyName is not None:
vr = getVocabularyRegistry()
try:
vocabulary = vr.get(self.context, self.vocabularyName)
except LookupError:
raise MissingVocabularyError(
"Can't validate value without vocabulary named %r" % (
self.vocabularyName,)
).with_field_and_value(self, value)
if not ISource.providedBy(vocabulary):
raise InvalidVocabularyError(vocabulary).with_field_and_value(
self, value)
return vocabulary
def bind(self, context):
"""See zope.schema._bootstrapinterfaces.IField."""
clone = super(Choice, self).bind(context)
# Eagerly get registered vocabulary if needed;
# once that's done, just return it
vocabulary = clone.vocabulary = clone._resolve_vocabulary(None)
clone._resolve_vocabulary = lambda value: vocabulary
return clone
def fromUnicode(self, value):
""" See IFromUnicode.
"""
self.validate(value)
return value
def _validate(self, value):
# Pass all validations during initialization
if self._init_field:
return
super(Choice, self)._validate(value)
vocabulary = self._resolve_vocabulary(value)
if value not in vocabulary:
raise ConstraintNotSatisfied(
value, self.__name__
).with_field_and_value(self, value)
# Both of these are inherited from the parent; re-declaring them
# here messes with the __sro__ of subclasses, causing them to be
# inconsistent with C3.
# @implementer(IFromUnicode, IFromBytes)
class _StrippedNativeStringLine(NativeStringLine):
_invalid_exc_type = None
def fromUnicode(self, value):
v = value.strip()
# self._type is unicode, but we don't want to allow non-ASCII
# values, to match the old behaviour on Python 2 (our regexes would
# reject that anyway).
try:
v = v.encode('ascii') # bytes
except UnicodeEncodeError:
raise self._invalid_exc_type(value).with_field_and_value(
self, value)
v = v.decode('ascii')
self.validate(v)
return v
def fromBytes(self, value):
return self.fromUnicode(value.decode('ascii'))
_isuri = r"[a-zA-z0-9+.-]+:" # scheme
_isuri += r"\S*$" # non space (should be pickier)
_isuri = re.compile(_isuri).match
@implementer(IURI)
class URI(_StrippedNativeStringLine):
"""
URI schema field.
URIs can be validated from both unicode values and bytes values,
producing a native text string in both cases::
>>> from zope.schema import URI
>>> field = URI()
>>> field.fromUnicode(u' https://example.com ')
'https://example.com'
>>> field.fromBytes(b' https://example.com ')
'https://example.com'
.. versionchanged:: 4.8.0
Implement :class:`zope.schema.interfaces.IFromBytes`
"""
def _validate(self, value):
super(URI, self)._validate(value)
if _isuri(value):
return
raise InvalidURI(value).with_field_and_value(self, value)
# An identifier is a letter or underscore, followed by
# any number of letters, underscores, and digits.
_identifier_pattern = r'[a-zA-Z_]+\w*'
# The whole string must match to be an identifier
_is_identifier = re.compile('^' + _identifier_pattern + '$').match
_isdotted = re.compile(
# The start of the line, followed by an identifier,
'^' + _identifier_pattern
# optionally followed by .identifier any number of times
+ r"([.]" + _identifier_pattern + r")*"
# followed by the end of the line.
+ r"$").match
@implementer(IPythonIdentifier)
class PythonIdentifier(_StrippedNativeStringLine):
"""
This field describes a python identifier, i.e. a variable name.
Empty strings are allowed.
Identifiers can be validated from both unicode values and bytes values,
producing a native text string in both cases::
>>> from zope.schema import PythonIdentifier
>>> field = PythonIdentifier()
>>> field.fromUnicode(u'zope')
'zope'
>>> field.fromBytes(b'_zope')
'_zope'
>>> field.fromUnicode(u' ')
''
.. versionadded:: 4.9.0
"""
def _validate(self, value):
super(PythonIdentifier, self)._validate(value)
if value and not _is_identifier(value):
raise InvalidValue(value).with_field_and_value(self, value)
@implementer(IDottedName)
class DottedName(_StrippedNativeStringLine):
"""Dotted name field.
Values of DottedName fields must be Python-style dotted names.
Dotted names can be validated from both unicode values and bytes values,
producing a native text string in both cases::
>>> from zope.schema import DottedName
>>> field = DottedName()
>>> field.fromUnicode(u'zope.schema')
'zope.schema'
>>> field.fromBytes(b'zope.schema')
'zope.schema'
>>> field.fromUnicode(u'zope._schema')
'zope._schema'
.. versionchanged:: 4.8.0
Implement :class:`zope.schema.interfaces.IFromBytes`
.. versionchanged:: 4.9.0
Allow leading underscores in each component.
"""
_invalid_exc_type = InvalidDottedName
def __init__(self, *args, **kw):
self.min_dots = int(kw.pop("min_dots", 0))
if self.min_dots < 0:
raise ValueError("min_dots cannot be less than zero")
self.max_dots = kw.pop("max_dots", None)
if self.max_dots is not None:
self.max_dots = int(self.max_dots)
if self.max_dots < self.min_dots:
raise ValueError("max_dots cannot be less than min_dots")
super(DottedName, self).__init__(*args, **kw)
def _validate(self, value):
"""
"""
super(DottedName, self)._validate(value)
if not _isdotted(value):
raise InvalidDottedName(value).with_field_and_value(self, value)
dots = value.count(".")
if dots < self.min_dots:
raise InvalidDottedName(
"too few dots; %d required" % self.min_dots, value
).with_field_and_value(self, value)
if self.max_dots is not None and dots > self.max_dots:
raise InvalidDottedName(
"too many dots; no more than %d allowed" % self.max_dots, value
).with_field_and_value(self, value)
@implementer(IId)
class Id(_StrippedNativeStringLine):
"""Id field
Values of id fields must be either uris or dotted names.
.. versionchanged:: 4.8.0
Implement :class:`zope.schema.interfaces.IFromBytes`
"""
_invalid_exc_type = InvalidId
def _validate(self, value):
super(Id, self)._validate(value)
if _isuri(value):
return
if _isdotted(value) and "." in value:
return
raise InvalidId(value).with_field_and_value(self, value)
@implementer(IInterfaceField)
class InterfaceField(Field):
__doc__ = IInterfaceField.__doc__
def _validate(self, value):
super(InterfaceField, self)._validate(value)
if not IInterface.providedBy(value):
raise NotAnInterface(
value,
self.__name__
).with_field_and_value(self, value)
def _validate_sequence(value_type, value, errors=None):
"""Validates a sequence value.
Returns a list of validation errors generated during the validation. If
no errors are generated, returns an empty list.
value_type is a field. value is the sequence being validated. errors is
an optional list of errors that will be prepended to the return value.
To illustrate, we'll use a text value type. All values must be unicode.
>>> field = TextLine(required=True)
To validate a sequence of various values:
>>> errors = _validate_sequence(field, (bytearray(b'foo'), u'bar', 1))
>>> errors
[WrongType(bytearray(b'foo'), <...>, ''), WrongType(1, <...>, '')]
The only valid value in the sequence is the second item. The others
generated errors.
We can use the optional errors argument to collect additional errors
for a new sequence:
>>> errors = _validate_sequence(field, (2, u'baz'), errors)
>>> errors # doctest: +NORMALIZE_WHITESPACE
[WrongType(bytearray(b'foo'), <...>, ''),
WrongType(1, <...>, ''),
WrongType(2, <...>, '')]
"""
if errors is None:
errors = []
if value_type is None:
return errors
for item in value:
try:
value_type.validate(item)
except ValidationError as error:
errors.append(error)
return errors
def _validate_uniqueness(self, value):
temp_values = []
for item in value:
if item in temp_values:
raise NotUnique(item).with_field_and_value(self, value)
temp_values.append(item)
@implementer(ICollection)
class Collection(MinMaxLen, Iterable):
"""
A generic collection implementing
:class:`zope.schema.interfaces.ICollection`.
Subclasses can define the attribute ``value_type`` to be a field
such as an :class:`Object` that will be checked for each member of
the collection. This can then be omitted from the constructor call.
They can also define the attribute ``_type`` to be a concrete
class (or tuple of classes) that the collection itself will
be checked to be an instance of. This cannot be set in the constructor.
.. versionchanged:: 4.6.0
Add the ability for subclasses to specify ``value_type``
and ``unique``, and allow eliding them from the constructor.
"""
value_type = None
unique = False
def __init__(self, value_type=_NotGiven, unique=_NotGiven, **kw):
super(Collection, self).__init__(**kw)
# whine if value_type is not a field
if value_type is not _NotGiven:
self.value_type = value_type
if (self.value_type is not None
and not IField.providedBy(self.value_type)):
raise ValueError("'value_type' must be field instance.")
if unique is not _NotGiven:
self.unique = unique
def bind(self, context):
"""See zope.schema._bootstrapinterfaces.IField."""
clone = super(Collection, self).bind(context)
# binding value_type is necessary for choices with named vocabularies,
# and possibly also for other fields.
if clone.value_type is not None:
clone.value_type = clone.value_type.bind(context)
return clone
def _validate(self, value):
super(Collection, self)._validate(value)
errors = _validate_sequence(self.value_type, value)
if errors:
try:
raise WrongContainedType(
errors, self.__name__
).with_field_and_value(self, value)
finally:
# Break cycles
del errors
if self.unique:
_validate_uniqueness(self, value)
#: An alternate name for :class:`.Collection`.
#:
#: .. deprecated:: 4.6.0
#: Use :class:`.Collection` instead.
AbstractCollection = Collection
@implementer(ISequence)
class Sequence(Collection):
"""
A field representing an ordered sequence.
.. versionadded:: 4.6.0
"""
_type = abc.Sequence
@implementer(ITuple)
class Tuple(Sequence):
"""A field representing a Tuple."""
_type = tuple
@implementer(IMutableSequence)
class MutableSequence(Sequence):
"""
A field representing a mutable sequence.
.. versionadded:: 4.6.0
"""
_type = abc.MutableSequence
@implementer(IList)
class List(MutableSequence):
"""A field representing a List."""
_type = list
class _AbstractSet(Collection):
unique = True
def __init__(self, *args, **kwargs):
super(_AbstractSet, self).__init__(*args, **kwargs)
if not self.unique: # set members are always unique
raise TypeError(
"__init__() got an unexpected keyword argument 'unique'")
@implementer(ISet)
class Set(_AbstractSet):
"""A field representing a set."""
_type = set
@implementer(IFrozenSet)
class FrozenSet(_AbstractSet):
_type = frozenset
@implementer(IMapping)
class Mapping(MinMaxLen, Iterable):
"""
A field representing a mapping.
.. versionadded:: 4.6.0
"""
_type = abc.Mapping
key_type = None
value_type = None
def __init__(self, key_type=None, value_type=None, **kw):
super(Mapping, self).__init__(**kw)
# whine if key_type or value_type is not a field
if key_type is not None and not IField.providedBy(key_type):
raise ValueError("'key_type' must be field instance.")
if value_type is not None and not IField.providedBy(value_type):
raise ValueError("'value_type' must be field instance.")
self.key_type = key_type
self.value_type = value_type
def _validate(self, value):
super(Mapping, self)._validate(value)
errors = []
if self.value_type:
errors = _validate_sequence(self.value_type, value.values(),
errors)
errors = _validate_sequence(self.key_type, value, errors)
if errors:
try:
raise WrongContainedType(
errors, self.__name__
).with_field_and_value(self, value)
finally:
# Break cycles
del errors
def bind(self, object):
"""See zope.schema._bootstrapinterfaces.IField."""
clone = super(Mapping, self).bind(object)
# binding value_type is necessary for choices with named vocabularies,
# and possibly also for other fields.
if clone.key_type is not None:
clone.key_type = clone.key_type.bind(object)
if clone.value_type is not None:
clone.value_type = clone.value_type.bind(object)
return clone
@implementer(IMutableMapping)
class MutableMapping(Mapping):
"""
A field representing a mutable mapping.
.. versionadded:: 4.6.0
"""
_type = abc.MutableMapping
@implementer(IDict)
class Dict(MutableMapping):
"""A field representing a Dict."""
_type = dict | zope.schema | /zope.schema-7.0.1-py3-none-any.whl/zope/schema/_field.py | _field.py |
import sys
from copy import copy
import zope.schema
from zope import event
from zope import interface
from zope.schema import interfaces
from zope.schema._bootstrapinterfaces import NO_VALUE
_marker = object()
@interface.implementer(interfaces.IFieldUpdatedEvent)
class FieldUpdatedEvent(object):
def __init__(self, obj, field, old_value, new_value):
self.object = obj
self.field = field
self.old_value = old_value
self.new_value = new_value
# The implementation used to differ from the interfaces in that it
# declared `self.inst` instead of `self.object`. Leave `self.inst`
# in place for backwards compat.
inst = property(
lambda self: self.object,
lambda self, new_value: setattr(self, 'object', new_value))
class FieldProperty(object):
"""Computed attributes based on schema fields
Field properties provide default values, data validation and error messages
based on data found in field meta-data.
Note that FieldProperties cannot be used with slots. They can only
be used for attributes stored in instance dictionaries.
"""
def __init__(self, field, name=None):
if name is None:
name = field.__name__
self.__field = field
self.__name = name
def __get__(self, inst, klass):
if inst is None:
return self
value = inst.__dict__.get(self.__name, _marker)
if value is _marker:
field = self.__field.bind(inst)
value = getattr(field, 'default', _marker)
if value is _marker:
raise AttributeError(self.__name)
return value
def queryValue(self, inst, default):
value = inst.__dict__.get(self.__name, default)
if value is default:
field = self.__field.bind(inst)
value = getattr(field, 'default', default)
return value
def __set__(self, inst, value):
field = self.__field.bind(inst)
field.validate(value)
if field.readonly and self.__name in inst.__dict__:
raise ValueError(self.__name, 'field is readonly')
oldvalue = self.queryValue(inst, NO_VALUE)
inst.__dict__[self.__name] = value
event.notify(FieldUpdatedEvent(inst, field, oldvalue, value))
def __getattr__(self, name):
return getattr(self.__field, name)
def createFieldProperties(schema, omit=[]):
"""For each fields in `schema` create a FieldProperty on the class.
schema ... interface those fields should be added to class
omit ... list of field names to be omitted in creation
Usage::
class A(object):
zope.schema.fieldproperty.createFieldProperties(IMySchema)
"""
frame = sys._getframe(1)
for name in zope.schema.getFieldNamesInOrder(schema):
if name in omit:
continue
frame.f_locals[name] = FieldProperty(schema[name])
class FieldPropertyStoredThroughField(object):
def __init__(self, field, name=None):
if name is None:
name = field.__name__
self.field = copy(field)
self.field.__name__ = "__st_%s_st" % self.field.__name__
self.__name = name
def setValue(self, inst, field, value):
field.set(inst, value)
def getValue(self, inst, field):
return field.query(inst, _marker)
def queryValue(self, inst, field, default):
return field.query(inst, default)
def __getattr__(self, name):
return getattr(self.field, name)
def __get__(self, inst, klass):
if inst is None:
return self
field = self.field.bind(inst)
value = self.getValue(inst, field)
if value is _marker:
value = getattr(field, 'default', _marker)
if value is _marker:
raise AttributeError(self.__name)
return value
def __set__(self, inst, value):
field = self.field.bind(inst)
field.validate(value)
if field.readonly:
if self.queryValue(inst, field, _marker) is _marker:
field.readonly = False
self.setValue(inst, field, value)
field.readonly = True
return
else:
raise ValueError(self.__name, 'field is readonly')
oldvalue = self.queryValue(inst, field, NO_VALUE)
self.setValue(inst, field, value)
event.notify(FieldUpdatedEvent(inst, self.field, oldvalue, value)) | zope.schema | /zope.schema-7.0.1-py3-none-any.whl/zope/schema/fieldproperty.py | fieldproperty.py |
from zope.interface import implementedBy
from zope.interface import providedBy
from zope.interface.declarations import Declaration
from zope.interface.interface import Method
class FieldReadAccessor(Method):
"""Field read accessor
"""
def __init__(self, field):
self.field = field
Method.__init__(self, '')
self.__doc__ = 'get %s' % field.__doc__
# A read field accessor is a method and a field.
# A read accessor is a decorator of a field, using the given
# field's properties to provide meta data.
@property
def __provides__(self):
provided = providedBy(self.field)
implemented = implementedBy(FieldReadAccessor)
# Declaration.__add__ is not very smart in zope.interface 5.0.0.
# It's very easy to produce C3 inconsistent orderings using
# it, because it uses itself plus any new interfaces from the
# second argument as the ``__bases__``, ignoring their
# relative order.
#
# Here, we can easily work around that. We know that ``field``
# will be some sub-class of Attribute, just as we are
# (FieldReadAccessor <- Method <- Attribute). So there will be
# overlap, and commonly only IMethod would be added to the end
# of the list of bases; but since IMethod extends IAttribute,
# having IAttribute earlier in the bases will be inconsistent.
# The fix here is to remove those duplicates from the first
# element so that we don't get into that situation.
provided_list = list(provided)
for iface in implemented:
if iface in provided_list:
provided_list.remove(iface)
provided = Declaration(*provided_list)
# pylint:disable=broad-except
try:
return provided + implemented
except BaseException as e: # pragma: no cover
# Sadly, zope.interface catches and silently ignores
# any exceptions raised in ``__providedBy__``,
# which is the class descriptor that invokes ``__provides__``.
# So, for example, if we're in strict C3 mode and fail to produce
# a resolution order, that gets ignored and we fallback to just
# what's implemented by the class.
# That's not good. Do our best to propagate the exception by
# returning it. There will be downstream errors later.
return e
def getSignatureString(self):
return '()'
def getSignatureInfo(self):
return {'positional': (),
'required': (),
'optional': (),
'varargs': None,
'kwargs': None,
}
def get(self, object):
return getattr(object, self.__name__)()
def query(self, object, default=None):
try:
f = getattr(object, self.__name__)
except AttributeError:
return default
else:
return f()
def set(self, object, value):
if self.readonly:
raise TypeError("Can't set values on read-only fields")
getattr(object, self.writer.__name__)(value)
def __getattr__(self, name):
return getattr(self.field, name)
def bind(self, object):
clone = self.__class__.__new__(self.__class__)
clone.__dict__.update(self.__dict__)
clone.field = self.field.bind(object)
return clone
class FieldWriteAccessor(Method):
def __init__(self, field):
Method.__init__(self, '')
self.field = field
self.__doc__ = 'set %s' % field.__doc__
def getSignatureString(self):
return '(newvalue)'
def getSignatureInfo(self):
return {'positional': ('newvalue',),
'required': ('newvalue',),
'optional': (),
'varargs': None,
'kwargs': None,
}
def accessors(field):
reader = FieldReadAccessor(field)
yield reader
if not field.readonly:
writer = FieldWriteAccessor(field)
reader.writer = writer
yield writer | zope.schema | /zope.schema-7.0.1-py3-none-any.whl/zope/schema/accessors.py | accessors.py |
from zope.schema._bootstrapfields import get_schema_validation_errors
from zope.schema._bootstrapfields import get_validation_errors
from zope.schema._bootstrapfields import getFields
__all__ = [
'getFieldNames',
'getFields',
'getFieldsInOrder',
'getFieldNamesInOrder',
'getValidationErrors',
'getSchemaValidationErrors',
]
def getFieldNames(schema):
"""Return a list of all the Field names in a schema.
"""
return list(getFields(schema).keys())
def getFieldsInOrder(schema, _field_key=lambda x: x[1].order):
"""Return a list of (name, value) tuples in native schema order.
"""
return sorted(getFields(schema).items(), key=_field_key)
def getFieldNamesInOrder(schema):
"""Return a list of all the Field names in a schema in schema order.
"""
return [name for name, field in getFieldsInOrder(schema)]
def getValidationErrors(schema, value):
"""
Validate that *value* conforms to the schema interface *schema*.
This includes checking for any schema validation errors (using
`getSchemaValidationErrors`). If that succeeds, then we proceed to
check for any declared invariants.
Note that this does not include a check to see if the *value*
actually provides the given *schema*.
:return: A sequence of (name, `zope.interface.Invalid`) tuples,
where *name* is None if the error was from an invariant.
If the sequence is empty, there were no errors.
"""
schema_error_dict, invariant_errors = get_validation_errors(
schema,
value,
)
if not schema_error_dict and not invariant_errors:
# Valid! Yay!
return []
return (
list(schema_error_dict.items()) +
[(None, e) for e in invariant_errors]
)
def getSchemaValidationErrors(schema, value):
"""
Validate that *value* conforms to the schema interface *schema*.
All :class:`zope.schema.interfaces.IField` members of the *schema*
are validated after being bound to *value*. (Note that we do not check for
arbitrary :class:`zope.interface.Attribute` members being present.)
:return: A sequence of (name, `ValidationError`) tuples. A non-empty
sequence indicates validation failed.
"""
items = get_schema_validation_errors(schema, value).items()
return items if isinstance(items, list) else list(items) | zope.schema | /zope.schema-7.0.1-py3-none-any.whl/zope/schema/_schema.py | _schema.py |
"""Schema package constructor
"""
from zope.schema._bootstrapinterfaces import NO_VALUE
# Field APIs
from zope.schema._field import ASCII
from zope.schema._field import URI
from zope.schema._field import ASCIILine
from zope.schema._field import Bool
from zope.schema._field import Bytes
from zope.schema._field import BytesLine
from zope.schema._field import Choice
from zope.schema._field import Collection
from zope.schema._field import Complex
from zope.schema._field import Container
from zope.schema._field import Date
from zope.schema._field import Datetime
from zope.schema._field import Decimal
from zope.schema._field import Dict
from zope.schema._field import DottedName
from zope.schema._field import Field
from zope.schema._field import Float
from zope.schema._field import FrozenSet
from zope.schema._field import Id
from zope.schema._field import Int
from zope.schema._field import Integral
from zope.schema._field import InterfaceField
from zope.schema._field import Iterable
from zope.schema._field import List
from zope.schema._field import Mapping
from zope.schema._field import MinMaxLen
from zope.schema._field import MutableMapping
from zope.schema._field import MutableSequence
from zope.schema._field import NativeString
from zope.schema._field import NativeStringLine
from zope.schema._field import Number
from zope.schema._field import Object
from zope.schema._field import Orderable
from zope.schema._field import Password
from zope.schema._field import PythonIdentifier
from zope.schema._field import Rational
from zope.schema._field import Real
from zope.schema._field import Sequence
from zope.schema._field import Set
from zope.schema._field import SourceText
from zope.schema._field import Text
from zope.schema._field import TextLine
from zope.schema._field import Time
from zope.schema._field import Timedelta
from zope.schema._field import Tuple
# Schema APIs
from zope.schema._schema import getFieldNames
from zope.schema._schema import getFieldNamesInOrder
from zope.schema._schema import getFields
from zope.schema._schema import getFieldsInOrder
from zope.schema._schema import getSchemaValidationErrors
from zope.schema._schema import getValidationErrors
# Acessor API
from zope.schema.accessors import accessors
# Error API
from zope.schema.interfaces import ValidationError
__all__ = [
'ASCII',
'ASCIILine',
'Bool',
'Bytes',
'BytesLine',
'Choice',
'Collection',
'Complex',
'Container',
'Date',
'Datetime',
'Decimal',
'Dict',
'DottedName',
'Field',
'Float',
'FrozenSet',
'Id',
'Int',
'Integral',
'InterfaceField',
'Iterable',
'List',
'Mapping',
'MutableMapping',
'MutableSequence',
'MinMaxLen',
'NativeString',
'NativeStringLine',
'Number',
'Object',
'Orderable',
'PythonIdentifier',
'Password',
'Rational',
'Real',
'Set',
'Sequence',
'SourceText',
'Text',
'TextLine',
'Time',
'Timedelta',
'Tuple',
'URI',
'getFields',
'getFieldsInOrder',
'getFieldNames',
'getFieldNamesInOrder',
'getValidationErrors',
'getSchemaValidationErrors',
'accessors',
'ValidationError',
'NO_VALUE'
] | zope.schema | /zope.schema-7.0.1-py3-none-any.whl/zope/schema/__init__.py | __init__.py |
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", help="use a specific zc.buildout version")
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
options, args = parser.parse_args()
######################################################################
# load/install setuptools
try:
if options.allow_site_packages:
import setuptools
import pkg_resources
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
sys.path[:] = [x for x in sys.path if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
cmd = [sys.executable, '-c',
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=setuptools_path)) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs) | zope.schemaevent | /zope.schemaevent-0.3.tar.gz/zope.schemaevent-0.3/bootstrap.py | bootstrap.py |
"""Security related configuration fields.
"""
__docformat__ = 'restructuredtext'
from zope.configuration.fields import GlobalObject
from zope.configuration.fields import MessageID
from zope.interface import Interface
from zope.schema import Id
from zope.schema.interfaces import IFromUnicode
from zope.security._compat import implementer_if_needed
from zope.security.interfaces import PUBLIC_PERMISSION_NAME as zope_Public
from zope.security.management import setSecurityPolicy
from zope.security.permission import checkPermission
@implementer_if_needed(IFromUnicode)
class Permission(Id):
r"""This field describes a permission.
"""
def fromUnicode(self, value):
u = super().fromUnicode(value)
map = getattr(self.context, 'permission_mapping', {})
return map.get(u, u)
def _validate(self, value):
super()._validate(value)
if value != zope_Public:
self.context.action(
discriminator=None,
callable=checkPermission,
args=(None, value),
# Delay execution till end. This is an
# optimization. We don't want to intersperse utility
# lookup, done when checking permissions, with utility
# definitions. Utility lookup is expensive after
# utility definition, as extensive caches have to be
# rebuilt.
order=9999999,
)
class ISecurityPolicyDirective(Interface):
"""Defines the security policy that will be used for Zope."""
component = GlobalObject(
title="Component",
description="Pointer to the object that will handle the security.",
required=True)
def securityPolicy(_context, component):
_context.action(
discriminator='defaultPolicy',
callable=setSecurityPolicy,
args=(component,)
)
class IPermissionDirective(Interface):
"""Define a new security object."""
id = Id(
title="ID",
description="ID as which this object will be known and used.",
required=True)
title = MessageID(
title="Title",
description="Provides a title for the object.",
required=True)
description = MessageID(
title="Description",
description="Provides a description for the object.",
required=False)
def permission(_context, id, title, description=''):
from zope.component.zcml import utility
from zope.security.interfaces import IPermission
from zope.security.permission import Permission
permission = Permission(id, title, description)
utility(_context, IPermission, permission, name=id)
class IRedefinePermission(Interface):
"""Define a permission to replace another permission."""
from_ = Permission(
title="Original permission",
description="Original permission ID to redefine.",
required=True)
to = Permission(
title="Substituted permission",
description="Substituted permission ID.",
required=True)
def redefinePermission(_context, from_, to):
_context = _context.context
# check if context has any permission mappings yet
if not hasattr(_context, 'permission_mapping'):
_context.permission_mapping = {}
_context.permission_mapping[from_] = to | zope.security | /zope.security-6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/zope/security/zcml.py | zcml.py |
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import implementer
from zope.interface.common.interfaces import IAttributeError
from zope.interface.common.interfaces import IException
from zope.schema import NativeStringLine
from zope.schema import Text
from zope.schema import TextLine
from zope.security.i18n import ZopeMessageFactory as _
#: The name (id) of the registered :class:`IPermission` utility that signifies
#: that the protected attribute is public.
#:
#: .. versionadded:: 4.2.0
PUBLIC_PERMISSION_NAME = 'zope.Public'
class IUnauthorized(IException):
"""
The action is not authorized.
Implemented in :class:`Unauthorized`.
"""
@implementer(IUnauthorized)
class Unauthorized(Exception):
"""
Some user wasn't allowed to access a resource.
Default implementation of :class:`IUnauthorized`.
"""
class IForbidden(IException):
"""
A resource cannot be accessed under any circumstances
Implemented in :class:`Forbidden`.
"""
@implementer(IForbidden)
class Forbidden(Exception):
"""
A resource cannot be accessed under any circumstances
Default implementation if :class:`IForbidden`.
"""
class IForbiddenAttribute(IForbidden, IAttributeError):
"""
An attribute is unavailable because it is forbidden (private).
Implemented in :class:`ForbiddenAttribute`.
"""
@implementer(IForbiddenAttribute)
class ForbiddenAttribute(Forbidden, AttributeError):
"""
An attribute is unavailable because it is forbidden (private).
Default implementation of :class:`IForbiddenAttribute`.
"""
class ISecurityManagement(Interface):
"""
Public security management API.
This is implemented by :mod:`zope.security.management`.
"""
def getSecurityPolicy():
"""Get the system default security policy."""
def setSecurityPolicy(aSecurityPolicy):
"""Set the system default security policy.
This method should only be called by system startup code. It
should never, for example, be called during a web request.
"""
class ISecurityChecking(Interface):
"""
Public security API.
"""
def checkPermission(permission, object, interaction=None):
"""
Return whether security policy allows permission on object.
:param str permission: The permission name.
:param object: The object being accessed according to the permission.
:keyword interaction: An :class:`IInteraction`, providing access to
information such as authenticated principals. If it is None, the
current interaction is used.
"""
class ISecurityProxyFactory(Interface):
"""
A factory for creating security-proxied objects.
See :class:`zope.security.checker.ProxyFactory` for the
default implementation.
"""
def __call__(object, checker=None):
"""
Create a security proxy
If a checker (:class:`IChecker`) is given, then use it,
otherwise, try to figure out a checker.
If the object is already a security proxy, then it will be
returned.
"""
class IChecker(Interface):
"""
Security-proxy plugin objects that implement low-level checks.
The checker is responsible for creating proxies for
operation return values, via the ``proxy`` method.
There are :meth:`check_getattr` and :meth:`check_setattr` methods
for checking getattr and setattr, and a :meth:`check` method for all
other operations.
The check methods will raise errors if access is not allowed.
They return no value.
Example (for ``__getitem__``)::
checker.check(ob, "__getitem__")
return checker.proxy(ob[key])
.. seealso:: :mod:`zope.security.checker`
"""
def check_getattr(ob, name):
"""
Check whether attribute access is allowed.
If a checker implements ``__setitem__``, then ``__setitem__``
will be called rather than ``check`` to ascertain whether an
operation is allowed. This is a hack that allows significantly
greater performance due to the fact that low-level operator
access is much faster than method access.
:raises: :class:`Unauthorized`
:raises: :class:`Forbidden`
:return: Nothing
"""
def check_setattr(ob, name):
"""
Check whether attribute assignment is allowed.
If a checker implements ``__setitem__``, then ``__setitem__``
will be called rather than ``check`` to ascertain whether an
operation is allowed. This is a hack that allows significantly
greater performance due to the fact that low-level operator
access is much faster than method access.
:raises: :class:`Unauthorized`
:raises: :class:`Forbidden`
:return: Nothing
"""
def check(ob, operation):
"""
Check whether *operation* is allowed.
The operation name is the Python special method name,
e.g. "__getitem__".
May raise Unauthorized or Forbidden. Returns no value.
If a checker implements ``__setitem__``, then ``__setitem__``
will be called rather than ``check`` to ascertain whether an
operation is allowed. This is a hack that allows significantly
greater performance due to the fact that low-level operator
access is much faster than method access.
:raises: :class:`Unauthorized`
:raises: :class:`Forbidden`
:return: Nothing
"""
def proxy(value):
"""
Return a security proxy for the *value*.
If a checker implements ``__getitem__``, then ``__getitem__``
will be called rather than ``proxy`` to proxy the value. This
is a hack that allows significantly greater performance due to
the fact that low-level operator access is much faster than
method access.
"""
class INameBasedChecker(IChecker):
"""
Security checker that uses permissions to check attribute
access.
"""
def permission_id(name):
"""
Return the permission used to check attribute access on *name*.
This permission is used by both :meth:`check` and
:meth:`check_getattr`.
"""
def setattr_permission_id(name):
"""
Return the permission used to check attribute assignment on *name*.
This permission is used by :meth:`check_setattr`.
"""
class ISecurityPolicy(Interface):
"""
A factory to get :class:`IInteraction` objects.
.. seealso:: :mod:`zope.security.simplepolicies`
For default implementations.
"""
def __call__(participation=None):
"""
Creates and returns a new :class:`IInteraction` for a given
request.
If *participation* is not None, it is added to the new interaction.
"""
class IInteraction(Interface):
"""
A representation of an interaction between some actors and the
system.
"""
participations = Attribute("""An iterable of participations.""")
def add(participation):
"""Add a participation."""
def remove(participation):
"""Remove a participation."""
def checkPermission(permission, object):
"""Return whether security context allows permission on object.
:param str permission: A permission name
:param object: The object being accessed according to the permission
:return: Whether the access is allowed or not.
:rtype: bool
"""
class IParticipation(Interface):
"""
A single participant in an interaction.
"""
interaction = Attribute("The interaction")
principal = Attribute("The authenticated :class:`IPrincipal`")
class NoInteraction(Exception):
"""No interaction started
"""
class IInteractionManagement(Interface):
"""
Interaction management API.
Every thread has at most one active interaction at a time.
.. seealso:: :mod:`zope.security.management`
That module provides the default implementation.
"""
def newInteraction(participation=None):
"""
Start a new interaction.
If *participation* is not None, it is added to the new interaction.
Raises an error if the calling thread already has an interaction.
"""
def queryInteraction():
"""
Return the current interaction.
Return None if there is no interaction.
"""
def getInteraction():
"""
Return the current interaction.
:raise NoInteraction: if there isn't a current interaction.
"""
def endInteraction():
"""
End the current interaction.
Does nothing if there is no interaction.
"""
class IPrincipal(Interface):
"""
Principals are security artifacts that execute actions in a
security environment.
The most common examples of principals include user and group
objects.
It is likely that ``IPrincipal`` objects will have associated
views used to list principals in management interfaces. For
example, a system in which other meta-data are provided for
principals might extend ``IPrincipal`` and register a view for the
extended interface that displays the extended information.
"""
id = TextLine(
title=_("Id"),
description=_("The unique identification of the principal."),
required=True,
readonly=True)
title = TextLine(
title=_("Title"),
description=_("The title of the principal. "
"This is usually used in the UI."),
required=False)
description = Text(
title=_("Description"),
description=_("A detailed description of the principal."),
required=False)
class ISystemPrincipal(IPrincipal):
"""
A principal that represents the system (application) itself.
Typically a system principal is granted extra capabilities
or excluded from certain checks. End users should *not* be able
to act as the system principal.
Because speed is often a factor, a single instance of a system principal
is found at ``zope.security.management.system_user`` and can
be compared for by identity (e.g., ``if principal is system_user:``).
"""
class IGroupAwarePrincipal(IPrincipal):
"""
Group aware principal interface.
Extends ``IPrincipal`` to contain direct group information.
"""
groups = Attribute(
'An iterable of :class:`IGroup` objects to which the principal'
' directly belongs')
class IGroupClosureAwarePrincipal(IGroupAwarePrincipal):
"""
A group-aware principal that can recursively flatten the membership
of groups to return all the groups.
"""
allGroups = Attribute(
"An iterable of the full closure of the principal's groups.")
class IGroup(IPrincipal):
"""
Group of principals
"""
class IMemberGetterGroup(IGroup):
"""
A group that can get its members.
"""
def getMembers():
"""Return an iterable of the members of the group"""
class IMemberAwareGroup(IMemberGetterGroup):
"""
A group that can both set and get its members.
"""
def setMembers(value):
"""
Set members of group to the principal IDs in the iterable
*value*.
"""
class IPermission(Interface):
"""A permission object.
Note that the ZCML ``<permission>`` directive restricts the ``id`` to
be an identifier (a dotted name or a URI), but this interface allows
any native string.
"""
id = NativeStringLine(
title=_("Id"),
description=_("Id as which this permission will be known and used."),
readonly=True,
required=True)
title = TextLine(
title=_("Title"),
description=_("Provides a title for the permission."),
required=True)
description = Text(
title=_("Description"),
description=_("Provides a description for the permission."),
required=False) | zope.security | /zope.security-6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/zope/security/interfaces.py | interfaces.py |
from zope.security.checker import Checker
from zope.security.checker import CheckerPublic
from zope.security.checker import defineChecker
from zope.security.checker import getCheckerForInstancesOf
from zope.security.interfaces import PUBLIC_PERMISSION_NAME as zope_Public
def protectName(class_, name, permission):
"""Set a permission on a particular name."""
checker = getCheckerForInstancesOf(class_)
if checker is None:
checker = Checker({}, {})
defineChecker(class_, checker)
if permission == zope_Public:
# Translate public permission to CheckerPublic
permission = CheckerPublic
# We know a dictionary was used because we set it
protections = checker.get_permissions
protections[name] = permission
def protectSetAttribute(class_, name, permission):
"""Set a permission on a particular name."""
checker = getCheckerForInstancesOf(class_)
if checker is None:
checker = Checker({}, {})
defineChecker(class_, checker)
if permission == zope_Public:
# Translate public permission to CheckerPublic
permission = CheckerPublic
# We know a dictionary was used because we set it
# Note however, that if a checker was created manually
# and the caller used say NamesChecker or MultiChecker,
# then set_permissions may be None here as Checker
# defaults a missing set_permissions parameter to None.
# Jim says this doensn't happens with the C version of the
# checkers because they use a 'shared dummy dict'.
protections = checker.set_permissions
protections[name] = permission
def protectLikeUnto(class_, like_unto):
"""Use the protections from like_unto for the given class."""
unto_checker = getCheckerForInstancesOf(like_unto)
if unto_checker is None:
return
# We know a dictionary was used because we set it
# Note however, that if a checker was created manually
# and the caller used say NamesChecker or MultiChecker,
# then set_permissions may be None here as Checker
# defaults a missing set_permissions parameter to None.
# Jim says this doensn't happens with the C version of the
# checkers because they use a 'shared dummy dict'.
unto_get_protections = unto_checker.get_permissions
unto_set_protections = unto_checker.set_permissions
checker = getCheckerForInstancesOf(class_)
if checker is None:
checker = Checker({}, {})
defineChecker(class_, checker)
get_protections = checker.get_permissions
for name in unto_get_protections:
get_protections[name] = unto_get_protections[name]
set_protections = checker.set_permissions
for name in unto_set_protections:
set_protections[name] = unto_set_protections[name] | zope.security | /zope.security-6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/zope/security/protectclass.py | protectclass.py |
from zope.proxy import ProxyBase
from zope.proxy import getProxiedObject
from zope.proxy.decorator import SpecificationDecoratorBase
from zope.security.checker import CombinedChecker
from zope.security.checker import selectChecker
from zope.security.proxy import Proxy
from zope.security.proxy import getChecker
class DecoratedSecurityCheckerDescriptor:
"""Descriptor for a Decorator that provides a decorated security checker.
"""
def __get__(self, inst, cls=None):
if inst is None:
return self
else:
proxied_object = getProxiedObject(inst)
if isinstance(proxied_object, Proxy):
checker = getChecker(proxied_object)
else:
checker = getattr(proxied_object, '__Security_checker__', None)
if checker is None:
checker = selectChecker(proxied_object)
wrapper_checker = selectChecker(inst)
if wrapper_checker is None and checker is None:
raise AttributeError("%r has no attribute %r" %
(proxied_object.__class__.__name__,
'__Security_checker__'))
elif wrapper_checker is None:
return checker
elif checker is None:
return wrapper_checker
else:
return CombinedChecker(wrapper_checker, checker)
def __set__(self, inst, value):
raise TypeError("Can't set __Security_checker__ on a decorated object")
class SecurityCheckerDecoratorBase(ProxyBase):
"""Base class for proxy that provides additional security declarations."""
__Security_checker__ = DecoratedSecurityCheckerDescriptor()
class DecoratorBase(SpecificationDecoratorBase, SecurityCheckerDecoratorBase):
"""Base class for a proxy that provides both additional interfaces and
security declarations."""
# zope.location was made independent of security. To work together with
# security, we re-inject the DecoratedSecurityCheckerDescriptor onto the
# location proxy from here.
# This is the only sane place we found for doing it: it kicks in as soon
# as someone starts using security proxies.
import zope.location.location # noqa: E402 module level import not at top
zope.location.location.LocationProxy.__Security_checker__ = (
DecoratedSecurityCheckerDescriptor()) | zope.security | /zope.security-6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/zope/security/decorator.py | decorator.py |
import functools
import sys
from zope.proxy import PyProxyBase
from zope.security._compat import PURE_PYTHON
def _check_name(meth, wrap_result=True):
name = meth.__name__
def _wrapper(self, *args, **kw):
wrapped = super(PyProxyBase, self).__getattribute__('_wrapped')
checker = super(PyProxyBase, self).__getattribute__('_checker')
checker.check(wrapped, name)
res = meth(self, *args, **kw)
if not wrap_result:
return res
return checker.proxy(res)
return functools.update_wrapper(_wrapper, meth)
def _check_name_inplace(meth):
name = meth.__name__
def _wrapper(self, *args, **kw):
wrapped = super(PyProxyBase, self).__getattribute__('_wrapped')
checker = super(PyProxyBase, self).__getattribute__('_checker')
checker.check(wrapped, name)
w_meth = getattr(wrapped, name, None)
if w_meth is not None:
# The proxy object cannot change; we are modifying in place.
self._wrapped = w_meth(*args, **kw)
return self
x_name = '__%s__' % name[3:-2]
return ProxyPy(getattr(wrapped, x_name)(*args, **kw), checker)
return functools.update_wrapper(_wrapper, meth)
def _fmt_address(obj):
# Try to replicate PyString_FromString("%p", obj), which actually uses
# the platform sprintf(buf, "%p", obj), which we cannot access from Python
# directly (and ctypes seems like overkill).
if sys.platform != 'win32':
return '0x%0x' % id(obj)
if sys.maxsize < 2**32: # pragma: no cover
return '0x%08X' % id(obj)
return '0x%016X' % id(obj) # pragma: no cover
class ProxyPy(PyProxyBase):
"""
The pure-Python reference implementation of a security proxy.
This should normally not be created directly, instead use the
:func:`~.ProxyFactory`.
You can choose to use this implementation instead of the C implementation
by default by setting the ``PURE_PYTHON`` environment variable before
:mod:`zope.security` is imported.
"""
__slots__ = ('_wrapped', '_checker')
def __new__(cls, value, checker):
inst = super().__new__(cls)
inst._wrapped = value
inst._checker = checker
return inst
def __init__(self, value, checker):
if checker is None:
raise ValueError('checker may now be None')
self._wrapped = value
self._checker = checker
# Attribute protocol
def __getattribute__(self, name):
if name in ('_wrapped', '_checker'):
# Only allow _wrapped and _checker to be accessed from inside.
if sys._getframe(1).f_locals.get('self') is not self:
raise AttributeError(name)
wrapped = super().__getattribute__('_wrapped')
if name == '_wrapped':
return wrapped
checker = super().__getattribute__('_checker')
if name == '_checker':
return checker
if name not in ('__cmp__', '__hash__', '__bool__',
'__lt__', '__le__', '__eq__', '__ne__', '__ge__',
'__gt__'):
checker.check_getattr(wrapped, name)
if name in ('__reduce__', '__reduce_ex__'):
# The superclass specifically denies access to __reduce__
# and __reduce__ex__, not letting proxies be pickled. But
# for backwards compatibility, we need to be able to
# pickle proxies. See checker:Global for an example.
val = getattr(wrapped, name)
elif name == '__module__':
# The superclass deals with descriptors found in the type
# of this object just like the Python language spec states, letting
# them have precedence over things found in the instance. This
# normally makes us a better proxy implementation. However, the
# C version of this code in _proxy doesn't take that same care and
# instead uses the generic object attribute access methods directly
# on the wrapped object. This is a behaviour difference; so far,
# it's only been noticed for the __module__ attribute, which
# checker:Global wants to override but couldn't because this
# object's type's __module__ would get in the way. That broke
# pickling, and checker:Global can't return anything more
# sophisticated than a str (a tuple) because it gets proxied and
# breaks pickling again. Our solution is to match the C version for
# this one attribute.
val = getattr(wrapped, name)
else:
val = super().__getattribute__(name)
return checker.proxy(val)
def __getattr__(self, name):
# We only get here if __getattribute__ has already raised an
# AttributeError (we have to implement this because the super
# class does). We expect that we will also raise that same
# error, one way or another---either it will be forbidden by
# the checker or it won't exist. However, if the underlying
# object is playing games in *its*
# __getattribute__/__getattr__, and we call getattr() on it,
# (maybe there are threads involved), we might actually
# succeed this time.
# The C implementation *does not* do two checks; it only does
# one check, and raises either the ForbiddenAttribute or the
# underlying AttributeError, *without* invoking any defined
# __getattribute__/__getattr__ more than once. So we
# explicitly do the same. The consequence is that we lose a
# good stack trace if the object implemented its own methods
# but we're consistent. We would provide a better error
# message or even subclass of AttributeError, but that's liable to
# break (doc)tests.
wrapped = super().__getattribute__('_wrapped')
checker = super().__getattribute__('_checker')
checker.check_getattr(wrapped, name)
raise AttributeError(name)
def __setattr__(self, name, value):
if name in ('_wrapped', '_checker'):
return super().__setattr__(name, value)
wrapped = super().__getattribute__('_wrapped')
checker = super().__getattribute__('_checker')
checker.check_setattr(wrapped, name)
setattr(wrapped, name, value)
def __delattr__(self, name):
if name in ('_wrapped', '_checker'):
raise AttributeError()
wrapped = super().__getattribute__('_wrapped')
checker = super().__getattribute__('_checker')
checker.check_setattr(wrapped, name)
delattr(wrapped, name)
def __lt__(self, other):
# no check
wrapped = super().__getattribute__('_wrapped')
return wrapped < other
def __le__(self, other):
# no check
wrapped = super().__getattribute__('_wrapped')
return wrapped <= other
def __eq__(self, other):
# no check
wrapped = super().__getattribute__('_wrapped')
return wrapped == other
def __ne__(self, other):
# no check
wrapped = super().__getattribute__('_wrapped')
return wrapped != other
def __ge__(self, other):
# no check
wrapped = super().__getattribute__('_wrapped')
return wrapped >= other
def __gt__(self, other):
# no check
wrapped = super().__getattribute__('_wrapped')
return wrapped > other
def __hash__(self):
# no check
wrapped = super().__getattribute__('_wrapped')
return hash(wrapped)
def __bool__(self):
# no check
wrapped = super().__getattribute__('_wrapped')
return bool(wrapped)
def __length_hint__(self):
# no check
wrapped = super().__getattribute__('_wrapped')
try:
hint = wrapped.__length_hint__
except AttributeError:
return NotImplemented
else:
return hint()
def __str__(self):
try:
return _check_name(PyProxyBase.__str__)(self)
# The C implementation catches almost all exceptions; the
# exception is a TypeError that's raised when the repr returns
# the wrong type of object.
except TypeError:
raise
except: # noqa: E722 do not use bare 'except'
# The C implementation catches all exceptions.
wrapped = super().__getattribute__('_wrapped')
return '<security proxied {}.{} instance at {}>'.format(
wrapped.__class__.__module__, wrapped.__class__.__name__,
_fmt_address(wrapped))
def __repr__(self):
try:
return _check_name(PyProxyBase.__repr__)(self)
# The C implementation catches almost all exceptions; the
# exception is a TypeError that's raised when the repr returns
# the wrong type of object.
except TypeError:
raise
except: # noqa: E722 do not use bare 'except'
wrapped = super().__getattribute__('_wrapped')
return '<security proxied {}.{} instance at {}>'.format(
wrapped.__class__.__module__, wrapped.__class__.__name__,
_fmt_address(wrapped))
for name in ['__call__',
# '__repr__',
# '__str__',
# '__unicode__', # Unchecked in C proxy
'__reduce__',
'__reduce_ex__',
# '__lt__', # Unchecked in C proxy (rich comparison)
# '__le__', # Unchecked in C proxy (rich comparison)
# '__eq__', # Unchecked in C proxy (rich comparison)
# '__ne__', # Unchecked in C proxy (rich comparison)
# '__ge__', # Unchecked in C proxy (rich comparison)
# '__gt__', # Unchecked in C proxy (rich comparison)
# '__bool__', # Unchecked in C proxy (rich comparison)
# '__hash__', # Unchecked in C proxy (rich comparison)
# '__cmp__', # Unchecked in C proxy
'__getitem__',
'__setitem__',
'__delitem__',
'__iter__',
'__next__',
'next',
'__contains__',
'__neg__',
'__pos__',
'__abs__',
'__invert__',
'__complex__',
'__int__',
'__float__',
'__index__',
'__add__',
'__sub__',
'__mul__',
'__truediv__',
'__floordiv__',
'__mod__',
'__divmod__',
'__pow__',
'__radd__',
'__rsub__',
'__rmul__',
'__rtruediv__',
'__rfloordiv__',
'__rmod__',
'__rdivmod__',
'__rpow__',
'__lshift__',
'__rshift__',
'__and__',
'__xor__',
'__or__',
'__rlshift__',
'__rrshift__',
'__rand__',
'__rxor__',
'__ror__',
]:
meth = getattr(PyProxyBase, name)
setattr(ProxyPy, name, _check_name(meth))
for name in (
'__len__',
):
meth = getattr(PyProxyBase, name)
setattr(ProxyPy, name, _check_name(meth, False))
for name in ['__iadd__',
'__isub__',
'__imul__',
'__itruediv__',
'__ifloordiv__',
'__imod__',
'__ilshift__',
'__irshift__',
'__iand__',
'__ixor__',
'__ior__',
'__ipow__',
]:
meth = getattr(PyProxyBase, name)
setattr(ProxyPy, name, _check_name_inplace(meth))
def getCheckerPy(proxy):
return super(ProxyPy, proxy).__getattribute__('_checker')
_builtin_isinstance = sys.modules['builtins'].isinstance
def getObjectPy(proxy):
if not _builtin_isinstance(proxy, ProxyPy):
return proxy
return super(ProxyPy, proxy).__getattribute__('_wrapped')
_c_available = not PURE_PYTHON
if _c_available: # pragma: no cover
try:
from zope.security._proxy import _Proxy
except (ImportError, AttributeError):
_c_available = False
getChecker = getCheckerPy
getObject = getObjectPy
Proxy = ProxyPy
if _c_available: # pragma: no cover
from zope.security._proxy import getChecker
from zope.security._proxy import getObject
Proxy = _Proxy
removeSecurityProxy = getObject
def getTestProxyItems(proxy):
"""Return a sorted sequence of checker names and permissions for testing
"""
checker = getChecker(proxy)
return sorted(checker.get_permissions.items())
def isinstance(object, cls):
"""Test whether an *object* is an instance of a type.
This works even if the object is security proxied.
"""
# The removeSecurityProxy call is OK here because it is *only*
# being used for isinstance
return _builtin_isinstance(removeSecurityProxy(object), cls) | zope.security | /zope.security-6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/zope/security/proxy.py | proxy.py |
from zope.interface import moduleProvides
from zope.security._definitions import system_user
from zope.security._definitions import thread_local
from zope.security.checker import CheckerPublic
from zope.security.interfaces import IInteractionManagement
from zope.security.interfaces import ISecurityManagement
from zope.security.interfaces import NoInteraction
from zope.security.simplepolicies import ParanoidSecurityPolicy
__all__ = [
'system_user',
'getSecurityPolicy',
'setSecurityPolicy',
'queryInteraction',
'getInteraction',
'ExistingInteraction',
'newInteraction',
'endInteraction',
'restoreInteraction',
'checkPermission',
]
_defaultPolicy = ParanoidSecurityPolicy
moduleProvides(
ISecurityManagement,
IInteractionManagement)
#
# ISecurityManagement implementation
#
def getSecurityPolicy():
"""Get the system default security policy."""
return _defaultPolicy
def setSecurityPolicy(aSecurityPolicy):
"""Set the system default security policy, and return the previous
value.
This method should only be called by system startup code.
It should never, for example, be called during a web request.
"""
global _defaultPolicy
last, _defaultPolicy = _defaultPolicy, aSecurityPolicy
return last
#
# IInteractionManagement implementation
#
def queryInteraction():
"""Return a current interaction, if there is one."""
return getattr(thread_local, 'interaction', None)
def getInteraction():
"""Get the current interaction."""
try:
return thread_local.interaction
except AttributeError:
raise NoInteraction
class ExistingInteraction(ValueError,
AssertionError, # BBB
):
"""
The exception that :func:`newInteraction` will raise if called
during an existing interaction.
"""
def newInteraction(*participations):
"""Start a new interaction."""
if queryInteraction() is not None:
raise ExistingInteraction("newInteraction called"
" while another interaction is active.")
thread_local.interaction = getSecurityPolicy()(*participations)
def endInteraction():
"""End the current interaction."""
try:
thread_local.previous_interaction = thread_local.interaction
except AttributeError:
# if someone does a restore later, it should be restored to not having
# an interaction. If there was a previous interaction from a previous
# call to endInteraction, it should be removed.
try:
del thread_local.previous_interaction
except AttributeError:
pass
else:
del thread_local.interaction
def restoreInteraction():
try:
previous = thread_local.previous_interaction
except AttributeError:
try:
del thread_local.interaction
except AttributeError:
pass
else:
thread_local.interaction = previous
def checkPermission(permission, object, interaction=None):
"""Return whether security policy allows permission on object.
:param str permission: A permission name.
:param object: The object being accessed according to the permission.
:param interaction: An interaction, providing access to information
such as authenticated principals. If it is None, the current
interaction is used.
:return: A boolean value. ``checkPermission`` is guaranteed to
return ``True`` if *permission* is
:data:`zope.security.checker.CheckerPublic` or ``None``.
:raise NoInteraction: If there is no current interaction and no
interaction argument was given.
"""
if permission is CheckerPublic or permission is None:
return True
if interaction is None:
try:
interaction = thread_local.interaction
except AttributeError:
raise NoInteraction
return interaction.checkPermission(permission, object)
def _clear():
global _defaultPolicy
_defaultPolicy = ParanoidSecurityPolicy
try:
from zope.testing.cleanup import addCleanUp
except ImportError: # pragma: no cover
pass
else:
addCleanUp(_clear)
addCleanUp(endInteraction) | zope.security | /zope.security-6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/zope/security/management.py | management.py |
from zope.location import ILocation
from zope.location import LocationProxy
from zope.security.checker import ProxyFactory
from zope.security.proxy import removeSecurityProxy
def assertLocation(adapter, parent):
"""
Assert locatable adapters.
This function asserts that the adapter get location-proxied if it
doesn't provide :class:`zope.location.interfaces.ILocation`
itself. Furthermore, the returned locatable adapter get its parent
set if its ``__parent__`` attribute is currently None.
"""
# handle none-locatable adapters (A)
if not ILocation.providedBy(adapter):
locatable = LocationProxy(adapter)
locatable.__parent__ = parent
return locatable
# handle locatable, parentless adapters (B)
if adapter.__parent__ is None:
adapter.__parent__ = parent
return adapter
# handle locatable, parentful adapters (C)
return adapter
class LocatingTrustedAdapterFactory:
"""
Adapt an adapter factory to provide trusted and (locatable) adapters.
Trusted adapters always adapt unproxied objects. If asked to
adapt any proxied objects, it will unproxy them and then
security-proxy the resulting adapter (S) unless the objects where not
security-proxied before (N).
Further locating trusted adapters provide a location for protected
adapters only (S). If such a protected adapter itself does not
provide ILocation it is wrapped within a location proxy and it
parent will be set. If the adapter does provide
:class:`zope.location.interfaces.ILocation` and its
``__parent__`` is None, we set the ``__parent__`` to the adapter's
context.
"""
def __init__(self, factory):
self.factory = factory
self.__name__ = factory.__name__
self.__module__ = factory.__module__
# protected methods
def _customizeProtected(self, adapter, context):
return assertLocation(adapter, context)
def _customizeUnprotected(self, adapter, context):
if ILocation.providedBy(adapter) and adapter.__parent__ is None:
adapter.__parent__ = context
return adapter
def __call__(self, *args):
for arg in args:
if removeSecurityProxy(arg) is not arg:
args = [removeSecurityProxy(x) for x in args]
adapter = self.factory(*args)
adapter = self._customizeProtected(adapter, args[0])
return ProxyFactory(adapter)
adapter = self.factory(*args)
adapter = self._customizeUnprotected(adapter, args[0])
return adapter
class TrustedAdapterFactory(LocatingTrustedAdapterFactory):
"""
Adapt an adapter factory to provide trusted adapters.
Trusted adapters always adapt unproxied objects. If asked to
adapt any proxied objects, it will unproxy them and then
security-proxy the resulting adapter unless the objects where not
security-proxied before.
If the adapter does provide
:class:`zope.location.interfaces.ILocation` and its ``__parent__``
is None, we set the ``__parent__`` to the adapter's context.
"""
# do not location-proxy the adapter
def _customizeProtected(self, adapter, context):
return self._customizeUnprotected(adapter, context)
class LocatingUntrustedAdapterFactory:
"""
Adapt an adapter factory to provide locatable untrusted adapters
Untrusted adapters always adapt proxied objects. If any permission
other than :const:`zope.Public
<zope.security.interfaces.PUBLIC_PERMISSION_NAME>` is required,
untrusted adapters need a location in order that the local
authentication mechanism can be invoked correctly.
If the adapter does not provide
:class:`zope.location.interfaces.ILocation`, we location proxy it
and set the parent. If the adapter does provide ``ILocation`` and
its ``__parent__`` is None, we set the ``__parent__`` to the
adapter's context only.
"""
def __init__(self, factory):
self.factory = factory
self.__name__ = factory.__name__
self.__module__ = factory.__module__
def __call__(self, *args):
adapter = self.factory(*args)
return assertLocation(adapter, args[0]) | zope.security | /zope.security-6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/zope/security/adapter.py | adapter.py |
"""Component architecture related 'zope' ZCML namespace directive interfaces
"""
__docformat__ = 'restructuredtext'
import zope.configuration.fields
import zope.interface
import zope.schema
from zope.configuration.fields import GlobalInterface
from zope.configuration.fields import GlobalObject
from zope.configuration.fields import PythonIdentifier
from zope.configuration.fields import Tokens
from zope.interface import Interface
import zope.security.zcml
from zope.security.i18n import ZopeMessageFactory as _
from zope.security.zcml import Permission
class IClassDirective(zope.interface.Interface):
"""Make statements about a class"""
class_ = zope.configuration.fields.GlobalObject(
title=_("Class"),
required=True
)
class IImplementsSubdirective(zope.interface.Interface):
"""Declare that the class given by the content directive's class
attribute implements a given interface
"""
interface = zope.configuration.fields.Tokens(
title=_("One or more interfaces"),
required=True,
value_type=zope.configuration.fields.GlobalInterface()
)
class IRequireSubdirective(zope.interface.Interface):
"""Indicate that the a specified list of names or the names in a
given Interface require a given permission for access.
"""
permission = zope.security.zcml.Permission(
title=_("Permission"),
description=_("""
Specifies the permission by id that will be required to
access or mutate the attributes and methods specified."""),
required=False,
)
attributes = zope.configuration.fields.Tokens(
title=_("Attributes and methods"),
description=_("This is a list of attributes and methods"
" that can be accessed."),
required=False,
value_type=zope.configuration.fields.PythonIdentifier(),
)
set_attributes = zope.configuration.fields.Tokens(
title=_("Attributes that can be set"),
description=_("This is a list of attributes that can be"
" modified/mutated."),
required=False,
value_type=zope.configuration.fields.PythonIdentifier(),
)
interface = zope.configuration.fields.Tokens(
title=_("Interfaces"),
description=_("The listed interfaces' methods and attributes"
" can be accessed."),
required=False,
value_type=zope.configuration.fields.GlobalInterface(),
)
set_schema = zope.configuration.fields.Tokens(
title=_("The attributes specified by the schema can be set"),
description=_("The listed schemas' properties can be"
" modified/mutated."),
required=False,
value_type=zope.configuration.fields.GlobalInterface(),
)
like_class = zope.configuration.fields.GlobalObject(
title=_("Configure like this class"),
description=_("""
This argument says that this content class should be configured in the
same way the specified class' security is. If this argument is
specified, no other argument can be used."""),
required=False,
)
class IAllowSubdirective(zope.interface.Interface):
"""
Declare a part of the class to be publicly viewable (that is,
requires the zope.Public permission). Only one of the following
two attributes may be used.
"""
attributes = zope.configuration.fields.Tokens(
title=_("Attributes"),
required=False,
value_type=zope.configuration.fields.PythonIdentifier(),
)
interface = zope.configuration.fields.Tokens(
title=_("Interface"),
required=False,
value_type=zope.configuration.fields.GlobalInterface(),
)
class IFactorySubdirective(zope.interface.Interface):
"""Specify the factory used to create this content object"""
id = zope.schema.Id(
title=_("ID"),
description=_("""
the identifier for this factory in the ZMI factory
identification scheme. If not given, defaults to the literal
string given as the content directive's 'class' attribute."""),
required=False,
)
title = zope.configuration.fields.MessageID(
title=_("Title"),
description=_("Text suitable for use in the 'add content' menu"
" of a management interface"),
required=False,
)
description = zope.configuration.fields.MessageID(
title=_("Description"),
description=_("Longer narrative description of what this"
" factory does"),
required=False,
)
class IModule(Interface):
"""Group security declarations about a module"""
module = GlobalObject(
title="Module",
description="Pointer to the module object.",
required=True)
class IAllow(Interface):
"""Allow access to selected module attributes
Access is unconditionally allowed to any names provided directly
in the attributes attribute or to any names defined by
interfaces listed in the interface attribute.
"""
attributes = Tokens(
title="Attributes",
description="The attributes to provide access to.",
value_type=PythonIdentifier(),
required=False)
interface = Tokens(
title="Interface",
description=("Interfaces whos names to provide access to. Access "
"will be provided to all of the names defined by the "
"interface(s). Multiple interfaces can be supplied."),
value_type=GlobalInterface(),
required=False)
class IRequire(Interface):
"""Require a permission to access selected module attributes
The given permission is required to access any names provided
directly in the attributes attribute or any names defined by
interfaces listed in the interface attribute.
"""
attributes = Tokens(
title="Attributes",
description="The attributes to require permission for.",
value_type=PythonIdentifier(),
required=False)
permission = Permission(
title="Permission ID",
description="The ID of the permission to require.") | zope.security | /zope.security-6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/zope/security/metadirectives.py | metadirectives.py |
import abc
import datetime
import decimal
import os
import sys
import types
import weakref
import zope.interface.declarations
import zope.interface.interface
import zope.interface.interfaces
from zope.i18nmessageid import Message
from zope.interface import Interface
from zope.interface import directlyProvides
from zope.interface import implementer
from zope.interface.interfaces import IDeclaration
from zope.interface.interfaces import IInterface
from zope.security._compat import PURE_PYTHON
from zope.security._compat import implementer_if_needed
from zope.security._definitions import thread_local
from zope.security.interfaces import ForbiddenAttribute
from zope.security.interfaces import IChecker
from zope.security.interfaces import INameBasedChecker
from zope.security.interfaces import ISecurityProxyFactory
from zope.security.interfaces import Unauthorized
from zope.security.proxy import Proxy
from zope.security.proxy import getChecker
try:
from zope.exceptions import DuplicationError
except ImportError: # pragma: no cover
class DuplicationError(Exception):
"""A duplicate registration was attempted"""
WATCH_CHECKERS = 0
if os.environ.get('ZOPE_WATCH_CHECKERS'): # pragma: no cover
try:
WATCH_CHECKERS = int(os.environ.get('ZOPE_WATCH_CHECKERS'))
except ValueError:
WATCH_CHECKERS = 1
def ProxyFactory(object, checker=None):
"""Factory function that creates a proxy for an object
The proxy checker is looked up if not provided.
"""
if isinstance(object, Proxy):
if checker is None or checker is getChecker(object):
return object
else:
# We have a proxy, but someone asked us to change its checker.
# Let's raise an exception.
#
# Other reasonable actions would be to either keep the existing
# proxy, or to create a new one with the given checker.
# The latter might be a security hole though, if untrusted code
# can call ProxyFactory.
raise TypeError("Tried to use ProxyFactory to change a Proxy's"
" checker.")
if checker is None:
checker = getattr(object, '__Security_checker__', None)
if checker is None:
checker = selectChecker(object)
if checker is None:
return object
return Proxy(object, checker)
directlyProvides(ProxyFactory, ISecurityProxyFactory)
# This import represents part of the API for the proxy module
from . import proxy # noqa: E402 module level import not at top
proxy.ProxyFactory = ProxyFactory
def canWrite(obj, name):
"""Check whether the interaction may write an attribute named name on obj.
Convenience method. Rather than using checkPermission in high level code,
use canWrite and canAccess to avoid binding code to permissions.
"""
obj = ProxyFactory(obj)
checker = getChecker(obj)
try:
checker.check_setattr(obj, name)
except Unauthorized:
return False
except ForbiddenAttribute:
# we are going to be a bit DWIM-y here: see
# http://www.zope.org/Collectors/Zope3-dev/506
# generally, if the check is ForbiddenAttribute we want it to be
# raised: it probably indicates a programming or configuration error.
# However, we special case a write ForbiddenAttribute when one can
# actually read the attribute: this represents a reasonable
# configuration of a readonly attribute, and returning False (meaning
# "no, you can't write it") is arguably more useful than raising the
# exception.
try:
checker.check_getattr(obj, name)
# we'll let *this* ForbiddenAttribute fall through, if any. It
# means that both read and write are forbidden.
except Unauthorized:
pass
return False
# all other exceptions, other than Unauthorized and ForbiddenAttribute,
# should be passed through uncaught, as they indicate programmer error
return True
def canAccess(obj, name):
"""Check whether the interaction may access an attribute named name on obj.
Convenience method. Rather than using checkPermission in high level code,
use canWrite and canAccess to avoid binding code to permissions.
"""
# access attributes and methods, including, in the current checker
# implementation, special names like __getitem__
obj = ProxyFactory(obj)
checker = getChecker(obj)
try:
checker.check_getattr(obj, name)
except Unauthorized:
return False
# if it is Forbidden (or anything else), let it be raised: it probably
# indicates a programming or configuration error
return True
@implementer(INameBasedChecker)
class CheckerPy:
"""
The Python reference implementation of
:class:`zope.security.interfaces.INameBasedChecker`.
Ordinarily there will be no reason to ever explicitly use this class;
instead use the class assigned to :class:`Checker`.
"""
def __init__(self, get_permissions, set_permissions=None):
"""Create a checker
A dictionary must be provided for computing permissions for
names. The dictionary get will be called with attribute names
and must return a permission ID, None, or the special marker,
:const:`CheckerPublic`. If None is returned, then access to the name is
forbidden. If :const:`CheckerPublic` is returned, then access will be
granted without checking a permission.
An optional setattr dictionary may be provided for checking
set attribute access.
"""
if not isinstance(get_permissions, dict):
raise TypeError('get_permissions must be a dict')
self.get_permissions = get_permissions
if set_permissions is not None:
if not isinstance(set_permissions, dict):
raise TypeError('set_permissions must be a dict')
else:
set_permissions = {}
self.set_permissions = set_permissions
def permission_id(self, name):
'See INameBasedChecker'
return self.get_permissions.get(name)
def setattr_permission_id(self, name):
'See INameBasedChecker'
if self.set_permissions:
return self.set_permissions.get(name)
def check_setattr(self, object, name):
'See IChecker'
if self.set_permissions:
permission = self.set_permissions.get(name)
else:
permission = None
if permission is not None:
if permission is CheckerPublic:
return # Public
if thread_local.interaction.checkPermission(permission, object):
return # allowed
else:
__traceback_supplement__ = (TracebackSupplement, object)
raise Unauthorized(object, name, permission)
__traceback_supplement__ = (TracebackSupplement, object)
raise ForbiddenAttribute(name, object)
def check(self, object, name):
'See IChecker'
permission = self.get_permissions.get(name)
if permission is not None:
if permission is CheckerPublic:
return # Public
if thread_local.interaction.checkPermission(permission, object):
return
else:
__traceback_supplement__ = (TracebackSupplement, object)
raise Unauthorized(object, name, permission)
elif name in _available_by_default:
return
if name != '__iter__' or hasattr(object, name):
__traceback_supplement__ = (TracebackSupplement, object)
raise ForbiddenAttribute(name, object)
check_getattr = check # 'See IChecker'
def proxy(self, value):
'See IChecker'
if isinstance(value, Proxy):
return value
checker = getattr(value, '__Security_checker__', None)
if checker is None:
checker = selectChecker(value)
if checker is None:
return value
return Proxy(value, checker)
Checker = CheckerPy # in case no C optimizations
# Helper class for __traceback_supplement__
class TracebackSupplement:
def __init__(self, obj):
self.obj = obj
def getInfo(self):
result = []
try:
cls = self.obj.__class__
if hasattr(cls, "__module__"):
s = "{}.{}".format(cls.__module__, cls.__name__)
else: # pragma: no cover XXX
s = str(cls.__name__)
result.append(" - class: " + s)
except: # pragma: no cover # noqa: E722 do not use bare 'except'
pass
try:
cls = type(self.obj)
if hasattr(cls, "__module__"):
s = "{}.{}".format(cls.__module__, cls.__name__)
else: # pragma: no cover XXX
s = str(cls.__name__)
result.append(" - type: " + s)
except: # pragma: no cover # noqa: E722 do not use bare 'except'
pass
return "\n".join(result)
class Global:
"""A global object that behaves like a string.
We want this to behave as a global, meaning it's pickled
by name, rather than value. We need to arrange that it has a suitable
__reduce__.
"""
def __init__(self, name, module=None):
if module is None: # pragma: no cover XXX
module = sys._getframe(1).f_locals['__name__']
self.__name__ = name
self.__module__ = module
def __reduce__(self):
return self.__name__
def __repr__(self):
return "{}({},{})".format(self.__class__.__name__,
self.__name__, self.__module__)
CheckerPublic = Global('CheckerPublic')
CP_HACK_XXX = CheckerPublic
# Now we wrap it in a security proxy so that it retains its
# identity when it needs to be security proxied.
# XXX: This means that we can't directly document it with
# sphinx because issubclass() will fail.
d = {}
CheckerPublic = Proxy(CheckerPublic, Checker(d)) # XXX uses CheckerPy
d['__reduce__'] = CheckerPublic
d['__module__'] = CheckerPublic
del d
# TODO: It's a bit scary above that we can pickle a proxy if access is
# granted to __reduce__. We might want to bother to prevent this in
# general and only allow it in this specific case.
def NamesChecker(names=(), permission_id=CheckerPublic, **__kw__):
"""Return a checker that grants access to a set of names.
A sequence of names is given as the first argument. If a second
argument, permission_id, is given, it is the permission required
to access the names. Additional names and permission IDs can be
supplied as keyword arguments.
"""
data = {}
data.update(__kw__)
for name in names:
if data.get(name, permission_id) is not permission_id:
raise DuplicationError(name)
data[name] = permission_id
return Checker(data)
def InterfaceChecker(interface, permission_id=CheckerPublic, **__kw__):
"""
Create a :func:`NamesChecker` for all the names defined in the *interface*
(a subclass of :class:`zope.interface.Interface`).
"""
return NamesChecker(interface.names(all=True), permission_id, **__kw__)
def MultiChecker(specs):
"""
Create a checker from a sequence of specifications
A specification is:
- A two-tuple with:
o a sequence of names or an interface
o a permission ID
All the names in the sequence of names or the interface are
protected by the permission.
- A dictionary (having an items method), with items that are
name/permission-id pairs.
"""
data = {}
for spec in specs:
if isinstance(spec, tuple):
names, permission_id = spec
if IInterface.providedBy(names):
names = names.names(all=True)
for name in names:
if data.get(name, permission_id) is not permission_id:
raise DuplicationError(name)
data[name] = permission_id
else:
for name, permission_id in spec.items():
if data.get(name, permission_id) is not permission_id:
raise DuplicationError(name)
data[name] = permission_id
return Checker(data)
def selectCheckerPy(object):
"""Get a checker for the given object
The appropriate checker is returned or None is returned. If the
return value is None, then object should not be wrapped in a proxy.
"""
# We need to be careful here. We might have a proxy, in which case
# we can't use the type. OTOH, we might not be able to use the
# __class__ either, since not everything has one.
# TODO: we really need formal proxy introspection
# if type(object) is Proxy:
# # Is this already a security proxy?
# return None
checker = _getChecker(type(object), _defaultChecker)
# checker = _getChecker(getattr(object, '__class__', type(object)),
# _defaultChecker)
if checker is NoProxy:
return None
while not isinstance(checker, Checker):
checker = checker(object)
if checker is NoProxy or checker is None:
return None
return checker
selectChecker = selectCheckerPy # in case no C optimizations
def getCheckerForInstancesOf(class_):
return _checkers.get(class_)
DEFINABLE_TYPES = (type, types.ModuleType)
def defineChecker(type_, checker):
"""Define a checker for a given type of object
The checker can be a :class:`Checker`, or a function that, when called with
an object, returns a :class:`Checker`.
"""
if not isinstance(type_, DEFINABLE_TYPES):
raise TypeError(
'type_ must be a type, class or module, not a %s' % type_)
if type_ in _checkers:
raise DuplicationError(type_)
_checkers[type_] = checker
def undefineChecker(type_):
del _checkers[type_]
NoProxy = object()
# _checkers is a mapping.
#
# - Keys are types
#
# - Values are
#
# o None => rock
# o a Checker
# o a function returning None or a Checker
#
_checkers = {}
_defaultChecker = Checker({})
_available_by_default = []
# Get optimized versions
_c_available = not PURE_PYTHON
if _c_available: # pragma: no cover
try:
import zope.security._zope_security_checker
except (ImportError, AttributeError):
_c_available = False
if _c_available: # pragma: no cover
from zope.security._zope_security_checker import Checker
from zope.security._zope_security_checker import NoProxy
from zope.security._zope_security_checker import _available_by_default
from zope.security._zope_security_checker import _checkers
from zope.security._zope_security_checker import _defaultChecker
from zope.security._zope_security_checker import selectChecker
zope.interface.classImplements(Checker, INameBasedChecker)
_getChecker = _checkers.get
@implementer_if_needed(IChecker)
class CombinedChecker(Checker):
"""A checker that combines two other checkers in a logical-or fashion.
The following table describes the result of a combined checker in detail.
+--------------------+--------------------+-------------------------------------+
| checker1 | checker2 | CombinedChecker(checker1, checker2) |
+====================+====================+=====================================+
| ok | anything | ok (checker 2 never called) |
+--------------------+--------------------+-------------------------------------+
| Unauthorized | ok | ok |
+--------------------+--------------------+-------------------------------------+
| Unauthorized | Unauthorized | Unauthorized |
+--------------------+--------------------+-------------------------------------+
| Unauthorized | ForbiddenAttribute | Unauthorized |
+--------------------+--------------------+-------------------------------------+
| ForbiddenAttribute | ok | ok |
+--------------------+--------------------+-------------------------------------+
| ForbiddenAttribute | Unauthorized | Unauthorized |
+--------------------+--------------------+-------------------------------------+
| ForbiddenAttribute | ForbiddenAttribute | ForbiddenAttribute |
+--------------------+--------------------+-------------------------------------+
""" # noqa: E501 line too long
def __init__(self, checker1, checker2):
"""Create a combined checker."""
Checker.__init__(self,
checker1.get_permissions,
checker1.set_permissions)
self._checker2 = checker2
def check(self, object, name):
'See IChecker'
try:
Checker.check(self, object, name)
except ForbiddenAttribute:
self._checker2.check(object, name)
except Unauthorized as unauthorized_exception:
try:
self._checker2.check(object, name)
except ForbiddenAttribute:
raise unauthorized_exception
check_getattr = __setitem__ = check
def check_setattr(self, object, name):
'See IChecker'
try:
Checker.check_setattr(self, object, name)
except ForbiddenAttribute:
self._checker2.check_setattr(object, name)
except Unauthorized as unauthorized_exception:
try:
self._checker2.check_setattr(object, name)
except ForbiddenAttribute:
raise unauthorized_exception
class CheckerLoggingMixin:
"""
Debugging mixin for checkers.
Prints verbose debugging information about every performed check to
:data:`sys.stderr`.
"""
#: If set to 1 (the default), only displays ``Unauthorized`` and
#: ``Forbidden`` messages. If verbosity is set to a larger number,
#: displays all messages. Normally this is controlled via the environment
#: variable ``ZOPE_WATCH_CHECKERS``.
verbosity = 1
_file = sys.stderr
def _log(self, msg, verbosity=1):
if self.verbosity >= verbosity:
self._file.write('%s\n' % msg)
def check(self, object, name):
try:
super().check(object, name)
if self.verbosity > 1:
if name in _available_by_default:
self._log('[CHK] + Always available: %s on %r'
% (name, object), 2)
else:
self._log(
'[CHK] + Granted: {} on {!r}'.format(name, object), 2)
except Unauthorized:
self._log(
'[CHK] - Unauthorized: {} on {!r}'.format(name, object))
raise
except ForbiddenAttribute:
self._log(
'[CHK] - Forbidden: {} on {!r}'.format(name, object))
raise
def check_getattr(self, object, name):
try:
super().check(object, name)
if self.verbosity > 1:
if name in _available_by_default:
self._log(
'[CHK] + Always available getattr: %s on %r'
% (name, object), 2)
else:
self._log(
'[CHK] + Granted getattr: %s on %r'
% (name, object), 2)
except Unauthorized:
self._log(
'[CHK] - Unauthorized getattr: {} on {!r}'.format(name, object)
)
raise
except ForbiddenAttribute:
self._log(
'[CHK] - Forbidden getattr: {} on {!r}'.format(name, object))
raise
__setitem__ = check_getattr
def check_setattr(self, object, name):
try:
super().check_setattr(object, name)
if self.verbosity > 1:
self._log(
'[CHK] + Granted setattr: {} on {!r}'.format(
name, object), 2)
except Unauthorized:
self._log(
'[CHK] - Unauthorized setattr: {} on {!r}'.format(
name, object))
raise
except ForbiddenAttribute:
self._log(
'[CHK] - Forbidden setattr: {} on {!r}'.format(name, object))
raise
# We have to be careful with the order of inheritance
# here. See https://github.com/zopefoundation/zope.security/issues/8
class WatchingChecker(CheckerLoggingMixin, Checker):
"""
A checker that will perform verbose logging. This will be set
as the default when ``ZOPE_WATCH_CHECKERS`` is set when this
module is imported.
"""
verbosity = WATCH_CHECKERS
class WatchingCombinedChecker(CombinedChecker, WatchingChecker):
"""
A checker that will perform verbose logging. This will be set
as the default when ``ZOPE_WATCH_CHECKERS`` is set when this
module is imported.
"""
verbosity = WATCH_CHECKERS
if WATCH_CHECKERS: # pragma: no cover
# When we make these the default, we also need to be sure
# to update the _defaultChecker's type (if it's not the C
# extension) so that selectCheckerPy can properly recognize
# it as a Checker.
# See https://github.com/zopefoundation/zope.security/issues/8
Checker = WatchingChecker
CombinedChecker = WatchingCombinedChecker
if not _c_available:
_defaultChecker.__class__ = Checker
def _instanceChecker(inst):
return _checkers.get(inst.__class__, _defaultChecker)
def moduleChecker(module):
"""
Return the :class:`zope.security.interfaces.IChecker` defined for the
*module*, if any.
.. seealso:: :func:`zope.security.metaconfigure.protectModule`
To define module protections.
"""
return _checkers.get(module)
_available_by_default[:] = [
'__lt__', '__le__', '__eq__',
'__gt__', '__ge__', '__ne__',
'__hash__', '__bool__',
'__class__', '__providedBy__', '__implements__',
'__repr__', '__conform__',
'__name__', '__parent__',
]
_callableChecker = NamesChecker(['__str__', '__name__', '__call__'])
_typeChecker = NamesChecker([
'__str__', '__name__', '__module__', '__bases__', '__mro__',
'__implemented__',
])
_namedChecker = NamesChecker(['__name__'])
_iteratorChecker = NamesChecker([
'next', '__next__', '__iter__', '__len__',
'__length_hint__',
])
_setChecker = NamesChecker([
'__iter__', '__len__', '__str__', '__contains__',
'copy', 'difference', 'intersection', 'issubset',
'issuperset', 'symmetric_difference', 'union',
'__and__', '__or__', '__sub__', '__xor__',
'__rand__', '__ror__', '__rsub__', '__rxor__',
'__eq__', '__ne__', '__lt__', '__gt__',
'__le__', '__ge__',
])
class _BasicTypes(dict):
"""Basic Types Dictionary
Make sure that checkers are really updated, when a new type is added.
"""
def __setitem__(self, name, value):
dict.__setitem__(self, name, value)
_checkers[name] = value
def __delitem__(self, name):
dict.__delitem__(self, name)
del _checkers[name]
def clear(self):
# Make sure you cannot clear the values
raise NotImplementedError
def update(self, d):
dict.update(self, d)
_checkers.update(d)
_basic_types = {
object: NoProxy,
int: NoProxy,
float: NoProxy,
complex: NoProxy,
type(None): NoProxy,
str: NoProxy,
bytes: NoProxy,
Message: NoProxy, # Messages are immutable, so it's okay
bool: NoProxy,
datetime.timedelta: NoProxy,
datetime.datetime: NoProxy,
datetime.date: NoProxy,
datetime.time: NoProxy,
datetime.tzinfo: NoProxy,
type({}.values()): NoProxy,
type({}.keys()): NoProxy,
type({}.items()): NoProxy,
}
try:
import pytz
except ImportError: # pragma: no cover
pass
else:
_basic_types[type(pytz.UTC)] = NoProxy
BasicTypes = _BasicTypes(_basic_types)
del _basic_types
# Available for tests. Located here so it can be kept in sync with BasicTypes.
BasicTypes_examples = {
object: object(),
int: 65536,
float: -1.4142,
complex: -1.4142j,
type(None): None,
bytes: b'abc',
bool: True,
datetime.timedelta: datetime.timedelta(3),
datetime.datetime: datetime.datetime(2003, 1, 1),
datetime.date: datetime.date(2003, 1, 1),
datetime.time: datetime.time(23, 58),
Message: Message('message', domain='hello')
}
class _Sequence:
def __len__(self):
raise NotImplementedError()
def __getitem__(self, i):
raise NotImplementedError()
_Declaration_checker = InterfaceChecker(
IDeclaration,
_implied=CheckerPublic,
subscribe=CheckerPublic,
unsubscribe=CheckerPublic,
__call__=CheckerPublic,
)
def f(): # pragma: no cover
yield f
_default_checkers = {
dict: NamesChecker(['__getitem__', '__len__', '__iter__',
'get', 'has_key', 'copy', '__str__', 'keys',
'values', 'items', 'iterkeys', 'iteritems',
'itervalues', '__contains__']),
list: NamesChecker(['__getitem__', '__len__', '__iter__',
'__contains__', 'index', 'count', '__str__',
'__add__', '__radd__', ]),
set: _setChecker,
frozenset: _setChecker,
# XXX: actually decimal.Decimal has more methods, which are unlisted here
# so expect ForbiddenAttribute on such
decimal.Decimal: NamesChecker(['__bool__', '__cmp__', '__eq__',
'__ne__', '__hash__',
'__str__',
'__neg__', '__pos__', '__abs__',
'__add__', '__radd__',
'__sub__', '__rsub__',
'__mul__', '__rmul__',
'__truediv__', '__rtruediv__',
'__divmod__', '__rdivmod__',
'__mod__', '__rmod__',
'__floordiv__', '__rfloordiv__',
'__float__', '__int__',
'__pow__', '__rpow__',
'adjusted', 'as_tuple', 'compare',
'max', 'min', 'normalize',
'quantize', 'remainder_near',
'same_quantum', 'sqrt',
'to_eng_string', 'to_integral']),
# YAGNI: () a rock
tuple: NamesChecker(['__getitem__', '__add__', '__radd__',
'__contains__', '__len__', '__iter__',
'__str__']),
Proxy: NoProxy,
type(weakref.ref(_Sequence())): NamesChecker(['__call__']),
types.FunctionType: _callableChecker,
types.MethodType: _callableChecker,
types.BuiltinFunctionType: _callableChecker,
types.BuiltinMethodType: _callableChecker,
# method-wrapper
type(().__repr__): _callableChecker,
type: _typeChecker,
types.ModuleType: lambda module: _checkers.get(module, _namedChecker),
type(iter([])): _iteratorChecker,
type(iter(())): _iteratorChecker,
type(iter({})): _iteratorChecker,
type(iter(set())): _iteratorChecker,
type(iter(_Sequence())): _iteratorChecker,
type(f()): _iteratorChecker,
type(Interface): InterfaceChecker(
IInterface,
__str__=CheckerPublic,
_implied=CheckerPublic,
subscribe=CheckerPublic,
# To iterate, Python calls __len__ as a hint.
# AttributeErrors are passed.
__len__=CheckerPublic,
),
zope.interface.interface.Method: InterfaceChecker(
zope.interface.interfaces.IMethod),
zope.interface.declarations.ProvidesClass: _Declaration_checker,
zope.interface.declarations.ClassProvides: _Declaration_checker,
zope.interface.declarations.Implements: _Declaration_checker,
zope.interface.declarations.Declaration: _Declaration_checker,
abc.ABCMeta: _typeChecker,
}
def _fixup_dictlike(dict_type):
empty_dict = dict_type()
populated_dict = dict_type({1: 2})
for dictlike in (empty_dict, populated_dict):
for attr in ('__iter__', 'keys', 'items', 'values'):
obj = getattr(dictlike, attr)()
o_type = type(obj)
if o_type not in _default_checkers:
_default_checkers[o_type] = _iteratorChecker
# PyPy3 has special types for iter({}.items()) etc.
iter_type = type(iter(obj))
if iter_type not in _default_checkers:
_default_checkers[iter_type] = _iteratorChecker
def _fixup_odict():
from collections import OrderedDict
# The `_fixup_dictlike` is detected as undefined because it is deleted
# later on but this function is called beforehand:
_fixup_dictlike(OrderedDict) # noqa: F821 undefined name '_fixup_dictlike'
_fixup_odict()
del _fixup_odict
try:
import BTrees # noqa: F401 'BTrees' imported but unused
except ImportError: # pragma: no cover
pass
else:
# The C implementation of BTree.items() is its own iterator
# and doesn't need any special entries to enable iteration.
# But the Python implementation has to call __iter__ to be able
# to do iteration. Whitelist it so that they behave the same.
# In addition, Python 3 will attempt to call __len__ on iterators
# for a length hint, so the C implementations also need to be
# added to the _iteratorChecker. The same thing automatically
# applies for .keys() and .values() since they return the same type.
# We do this here so that all users of zope.security can benefit
# without knowing implementation details.
# See https://github.com/zopefoundation/zope.security/issues/20
def _fixup_btrees():
import BTrees._base
_default_checkers[BTrees._base._TreeItems] = _iteratorChecker
for name in ('IF', 'II', 'IO', 'OI', 'OO'):
for family_name in ('family32', 'family64'):
family = getattr(BTrees, family_name)
btree = getattr(family, name).BTree
# The `_fixup_dictlike` is detected as undefined because it is
# deleted later on but this function is called beforehand:
_fixup_dictlike(btree) # noqa: F821 undefined name
_fixup_btrees()
del _fixup_btrees
del _fixup_dictlike
def _fixup_zope_interface():
# Make sure the provided and implementedBy objects
# can be iterated.
# Note that we DO NOT use the _iteratorChecker, but instead
# we use NoProxy to be sure that the results (of iteration or not) are not
# proxied. On Python 2, these objects are builtin and don't go through the
# checking process at all, much like BTrees, so NoProxy is necessary for
# compatibility. On Python 3, prior to this, iteration was simply not
# allowed.
from zope.interface import alsoProvides
from zope.interface import providedBy
class I1(Interface):
pass
class I2(Interface):
pass
@implementer(I1)
class Obj:
pass
o = Obj()
# This will be the zope.interface.implementedBy from the class
# a zope.interface.declarations.Implements
_default_checkers[type(providedBy(o))] = NoProxy
alsoProvides(o, I2)
# This will be the zope.interface.Provides from the instance
_default_checkers[type(providedBy(o))] = NoProxy
_fixup_zope_interface()
del _fixup_zope_interface
def _fixup_itertools():
# itertools.groupby is a built-in custom iterator type introduced
# in Python 2.4. It should have the same checker as other built-in
# iterators.
# Also, itertools._grouper also needs to be exposed as an
# iterator. Its type is not exposed by name, but can be accessed
# like so: type(list(itertools.groupby([0]))[0][1])
import itertools
group = itertools.groupby([0])
type_group = type(group)
if type_group not in _default_checkers:
_default_checkers[type_group] = _iteratorChecker
type_grouper = type(list(group)[0][1])
if type_grouper not in _default_checkers:
_default_checkers[type_grouper] = _iteratorChecker
# There are also many other custom types in itertools that need the
# same treatment. See a similar list in
# test_checker.py:test_itertools_checkers
def pred(x):
return x
iterable = (1, 2, 3)
pred_iterable = (pred, iterable)
for func, args in (
('count', ()),
('cycle', ((),)),
('dropwhile', pred_iterable),
('islice', (iterable, 2)),
('permutations', (iterable,)),
('product', (iterable,)),
('repeat', (1, 2)),
('starmap', pred_iterable),
('takewhile', pred_iterable),
('tee', (iterable,)),
('zip_longest', (iterable,)),
('accumulate', (iterable,)),
('compress', (iterable, ())),
('combinations', (iterable, 1)),
('combinations_with_replacement', (iterable, 1)),
):
func = getattr(itertools, func)
result = func(*args)
if func == itertools.tee:
result = result[0]
tresult = type(result)
if tresult not in _default_checkers:
_default_checkers[tresult] = _iteratorChecker
_fixup_itertools()
del _fixup_itertools
def _clear():
_checkers.clear()
_checkers.update(_default_checkers)
_checkers.update(BasicTypes)
_clear()
try:
from zope.testing.cleanup import addCleanUp
except ImportError: # pragma: no cover
pass
else:
addCleanUp(_clear) | zope.security | /zope.security-6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/zope/security/checker.py | checker.py |
"""Permissions
"""
__docformat__ = "reStructuredText"
import operator
from zope.component import getUtilitiesFor
from zope.component import queryUtility
from zope.interface import directlyProvides
from zope.interface import implementer
from zope.schema.interfaces import IVocabularyFactory
from zope.schema.vocabulary import SimpleTerm
from zope.schema.vocabulary import SimpleVocabulary
from zope.security.checker import CheckerPublic
from zope.security.interfaces import PUBLIC_PERMISSION_NAME as zope_Public
from zope.security.interfaces import IPermission
@implementer(IPermission)
class Permission:
"""
Default implementation of :class:`zope.security.interfaces.IPermission`.
"""
def __init__(self, id, title="", description=""):
self.id = id
self.title = title
self.description = description
def checkPermission(context, permission_id):
"""
Check whether a given permission object exists in the provided
context as a utility.
"""
if permission_id is CheckerPublic:
return
if not queryUtility(IPermission, permission_id, context=context):
raise ValueError("Undefined permission ID", permission_id)
def allPermissions(context=None):
"""
Get the IDs of all defined permission object utilities.
"""
for name, _permission in getUtilitiesFor(IPermission, context):
if name != zope_Public:
yield name
def PermissionsVocabulary(context=None):
"""
A vocabulary of permission IDs.
Term values are permissions, while term tokens are permission IDs.
"""
terms = []
for name, permission in getUtilitiesFor(IPermission, context):
terms.append(SimpleTerm(permission, name))
return SimpleVocabulary(terms)
directlyProvides(PermissionsVocabulary, IVocabularyFactory)
def PermissionIdsVocabulary(context=None):
"""
A vocabulary of permission IDs.
Term values are the permission ID strings except for
:data:`zope.Public
<zope.security.interfaces.PUBLIC_PERMISSION_NAME>`, which is the
global permission :data:`zope.security.checker.CheckerPublic`.
Term titles are the permission ID strings except for
:data:`zope.Public
<zope.security.interfaces.PUBLIC_PERMISSION_NAME>`, which is
shortened to 'Public'.
Terms are sorted by title except for 'Public', which always appears as
the first term.
"""
terms = []
has_public = False
for name, _permission in getUtilitiesFor(IPermission, context):
if name == zope_Public:
has_public = True
else:
terms.append(SimpleTerm(name, name, name))
terms = sorted(terms, key=operator.attrgetter('title'))
if has_public:
terms.insert(0, SimpleTerm(CheckerPublic, zope_Public, 'Public'))
return SimpleVocabulary(terms)
directlyProvides(PermissionIdsVocabulary, IVocabularyFactory) | zope.security | /zope.security-6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/zope/security/permission.py | permission.py |
""" Register class directive.
"""
__docformat__ = 'restructuredtext'
from types import ModuleType
from zope.component.factory import Factory
from zope.component.interface import provideInterface
from zope.component.interfaces import IFactory
from zope.component.zcml import utility
from zope.configuration.exceptions import ConfigurationError
from zope.interface import classImplements
from zope.schema.interfaces import IField
from zope.security.checker import Checker
from zope.security.checker import CheckerPublic
from zope.security.checker import defineChecker
from zope.security.checker import moduleChecker
from zope.security.interfaces import PUBLIC_PERMISSION_NAME as PublicPermission
from zope.security.protectclass import protectLikeUnto
from zope.security.protectclass import protectName
from zope.security.protectclass import protectSetAttribute
def dottedName(klass):
if klass is None:
return 'None'
return klass.__module__ + '.' + klass.__name__
class ProtectionDeclarationException(Exception):
"""Security-protection-specific exceptions."""
pass
class ClassDirective:
def __init__(self, _context, class_):
self.__id = dottedName(class_) # this would barf on a module, anyway
self.__class = class_
if isinstance(self.__class, ModuleType): # pragma: no cover
raise ConfigurationError('Content class attribute must be a class')
self.__context = _context
def implements(self, _context, interface):
for interface in interface:
_context.action(
discriminator=(
'ContentDirective', self.__class, object()),
callable=classImplements,
args=(self.__class, interface),
)
_context.action(
discriminator=None,
callable=provideInterface,
args=(interface.__module__ + '.' + interface.getName(),
interface)
)
def require(self, _context,
permission=None, attributes=None, interface=None,
like_class=None, set_attributes=None, set_schema=None):
"""Require a permission to access a specific aspect"""
if like_class:
self.__mimic(_context, like_class)
if not (interface or attributes or set_attributes or set_schema):
if like_class:
return
raise ConfigurationError("Nothing required")
if not permission:
raise ConfigurationError("No permission specified")
if interface:
for i in interface:
if i:
self.__protectByInterface(i, permission)
if attributes:
self.__protectNames(attributes, permission)
if set_attributes:
self.__protectSetAttributes(set_attributes, permission)
if set_schema:
for s in set_schema:
self.__protectSetSchema(s, permission)
def __mimic(self, _context, class_):
"""Base security requirements on those of the given class"""
_context.action(
discriminator=('mimic', self.__class, object()),
callable=protectLikeUnto,
args=(self.__class, class_),
)
def allow(self, _context, attributes=None, interface=None):
"""Like require, but with permission_id zope.Public"""
return self.require(_context, PublicPermission, attributes, interface)
def __protectByInterface(self, interface, permission_id):
"Set a permission on names in an interface."
for n, d in sorted(interface.namesAndDescriptions(1)):
self.__protectName(n, permission_id)
self.__context.action(
discriminator=None,
callable=provideInterface,
args=(interface.__module__ + '.' + interface.getName(),
interface)
)
def __protectName(self, name, permission_id):
"Set a permission on a particular name."
self.__context.action(
discriminator=('protectName', self.__class, name),
callable=protectName,
args=(self.__class, name, permission_id)
)
def __protectNames(self, names, permission_id):
"Set a permission on a bunch of names."
for name in names:
self.__protectName(name, permission_id)
def __protectSetAttributes(self, names, permission_id):
"Set a permission on a bunch of names."
for name in names:
self.__context.action(
discriminator=('protectSetAttribute', self.__class, name),
callable=protectSetAttribute,
args=(self.__class, name, permission_id)
)
def __protectSetSchema(self, schema, permission_id):
"Set a permission on a bunch of names."
_context = self.__context
for name in sorted(schema):
field = schema[name]
if IField.providedBy(field) and not field.readonly:
_context.action(
discriminator=('protectSetAttribute', self.__class, name),
callable=protectSetAttribute,
args=(self.__class, name, permission_id)
)
_context.action(
discriminator=None,
callable=provideInterface,
args=(schema.__module__ + '.' + schema.getName(),
schema)
)
def __call__(self):
"Handle empty/simple declaration."
return ()
def factory(self, _context, id=None, title="", description=''):
"""Register a zmi factory for this class"""
id = id or self.__id
factoryObj = Factory(self.__class, title, description)
# note factories are all in one pile, utilities and content,
# so addable names must also act as if they were all in the
# same namespace, despite the utilities/content division
utility(_context, IFactory, factoryObj,
permission=PublicPermission, name=id)
def protectModule(module, name, permission):
"""Set up a module checker to require a permission to access a name
If there isn't a checker for the module, create one.
"""
checker = moduleChecker(module)
if checker is None:
checker = Checker({}, {})
defineChecker(module, checker)
if permission == PublicPermission:
# Translate public permission to CheckerPublic
permission = CheckerPublic
# We know a dictionary get method was used because we set it
protections = checker.get_permissions
protections[name] = permission
def _names(attributes, interfaces):
seen = {}
for name in attributes:
if name not in seen:
seen[name] = 1
yield name
for interface in interfaces:
for name in interface:
if name not in seen:
seen[name] = 1
yield name
def allow(context, attributes=(), interface=()):
for name in _names(attributes, interface):
context.action(
discriminator=('http://namespaces.zope.org/zope:module',
context.module, name),
callable=protectModule,
args=(context.module, name, PublicPermission),
)
def require(context, permission, attributes=(), interface=()):
for name in _names(attributes, interface):
context.action(
discriminator=('http://namespaces.zope.org/zope:module',
context.module, name),
callable=protectModule,
args=(context.module, name, permission),
) | zope.security | /zope.security-6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/zope/security/metaconfigure.py | metaconfigure.py |
"""A small sandbox application.
"""
import random
import time
from zope.interface import Interface
from zope.interface import implementer
class IAgent(Interface):
"""A player/agent in the world.
The agent represents an autonomous unit, that lives in various
homes/sandboxes and accesses services present at the sandboxes. Agents are
imbued with a sense of wanderlust and attempt to find new homes after a
few turns of the time generator (think turn based games).
"""
def action():
"""Perform agent's action."""
def setHome(home):
"""Move to a different home."""
def getHome():
"""Return the place where the agent currently lives."""
def getAuthenticationToken():
"""Return the authority by which the agent perform actions."""
class IService(Interface):
"""Marker to designate some form of functionality.
Services are available from sandboxes, examples include time service,
agent discovery, and sandbox discovery.
"""
class ISandbox(Interface):
"""A container for agents to live in and services to be available."""
def getService(service_id):
"""Get the service having the provided id in this sandbox."""
def getAgents():
"""Return a list of agents living in this sandbox."""
def addAgent(agent):
"""Add a new agent to the sandbox."""
def transportAgent(agent, destination):
"""Move the specified agent to the destination sandbox."""
class SandboxError(Exception):
"""A sandbox error is thrown, if any action could not be performed."""
pass
class Identity:
"""Mixin for pretty printing and identity method"""
def __init__(self, id, *args, **kw):
self.id = id
def getId(self):
return self.id
def __str__(self):
return "<{}> {}".format(str(self.__class__.__name__), str(self.id))
__repr__ = __str__
@implementer(IAgent)
class Agent(Identity):
def __init__(self, id, home, auth_token, action):
"""Initialize agent."""
self.id = id
self.auth_token = auth_token
self.home = home
self._action = action
def action(self):
"""See IAgent."""
self._action(self, self.getHome())
def setHome(self, home):
"""See IAgent."""
self.home = home
def getHome(self):
"""See IAgent."""
return self.home
def getAuthenticationToken(self):
"""See IAgent."""
return self.auth_token
@implementer(ISandbox)
class Sandbox(Identity):
"""
see ISandbox doc
"""
def __init__(self, id, service_factories):
self.id = id
self._services = {}
self._agents = {}
for sf in service_factories:
self.addService(sf())
def getAgentIds(self):
return self._agents.keys()
def getAgents(self):
return self._agents.values()
def getServiceIds(self):
return self._services.keys()
def getService(self, sid):
return self._services.get(sid)
def getHome(self):
return self
def addAgent(self, agent):
if agent.getId() not in self._agents \
and IAgent.providedBy(agent):
self._agents[agent.getId()] = agent
agent.setHome(self)
else:
raise SandboxError("couldn't add agent %s" % agent)
def addService(self, service):
if not service.getId() in self._services \
and IService.providedBy(service):
self._services[service.getId()] = service
service.setHome(self)
else:
raise SandboxError("couldn't add service %s" % service)
def transportAgent(self, agent, destination):
if agent.getId() in self._agents \
and destination is not self \
and ISandbox.providedBy(destination):
destination.addAgent(agent)
del self._agents[agent.getId()]
else:
raise SandboxError("couldn't transport agent {} to {}".format(
agent, destination)
)
@implementer(IService)
class Service:
def getId(self):
return self.__class__.__name__
def setHome(self, home):
self._home = home
def getHome(self):
return getattr(self, '_home')
class HomeDiscoveryService(Service):
"""
returns the ids of available agent homes
"""
def getAvailableHomes(self):
return _homes.keys()
class AgentDiscoveryService(Service):
"""
returns the agents available at a given home
"""
def getLocalAgents(self, home):
return home.getAgents()
class TimeService(Service):
"""
returns the local time
"""
def getTime(self):
return time.time()
default_service_factories = (
HomeDiscoveryService,
AgentDiscoveryService,
TimeService
)
def action_find_homes(agent, home):
home_service = home.getService('HomeDiscoveryService')
return home_service.getAvailableHomes()
def action_find_neighbors(agent, home):
agent_service = home.getService('AgentDiscoveryService')
return agent_service.getLocalAgents(home)
def action_find_time(agent, home):
time_service = home.getService('TimeService')
return time_service.getTime()
class TimeGenerator:
"""Represents the passage of time in the agent simulation.
each turn represents some discrete unit of time, during
which all agents attempt to perform their action. Additionally,
all agents are checked to see if they have a desire to move,
and if so are transported to a new random home.
"""
def setupAgent(self, agent):
pass
def teardownAgent(self, agent):
pass
def turn(self):
global _homes
for h in _homes.values():
agents = h.getAgents()
for a in agents:
self.setupAgent(a)
try:
a.action()
except Exception as e:
print('-- Exception --')
print('"%s" in "%s" not allow to "%s"'
% (a, h, a._action.__name__))
print(e)
print()
self.teardownAgent(a)
agents = filter(WanderLust, agents)
for a in agents:
self.setupAgent(a)
try:
home = a.getHome()
new_home = GreenerPastures(a)
home.transportAgent(a, new_home)
except Exception as e:
print('-- Exception --')
print('moving "%s" from "%s" to "%s"' %
(a, h, repr(new_home)))
print(e)
print()
self.teardownAgent(a)
def WanderLust(agent):
""" is agent ready to move """
if int(random.random() * 100) <= 30:
return 1
def GreenerPastures(agent):
""" where do they want to go today """
global _homes
possible_homes = _homes.keys()
possible_homes.remove(agent.getHome().getId())
return _homes.get(random.choice(possible_homes))
# boot strap initial setup.
# global list of homes
_homes = {}
all_homes = (
Sandbox('jail', default_service_factories),
Sandbox('origin', default_service_factories),
Sandbox('valhalla', default_service_factories)
)
origin = all_homes[1]
for h in all_homes:
_homes[h.getId()] = h
agents = [
Agent('odin', None, 'norse legend', action_find_time),
Agent('loki', None, 'norse legend', action_find_neighbors),
Agent('thor', None, 'norse legend', action_find_homes),
Agent('thucydides', None, 'greek men', action_find_time),
Agent('archimedes', None, 'greek men', action_find_neighbors),
Agent('prometheus', None, 'greek men', action_find_homes),
]
for a in agents:
origin.addAgent(a)
def main():
world = TimeGenerator()
for x in range(5):
print('world turning')
world.turn()
for h in _homes.values():
print(h.getId(), h.getAgentIds())
if __name__ == '__main__':
main() | zope.security | /zope.security-6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/zope/security/examples/sandbox.py | sandbox.py |
import sandbox
from zope.interface import implementer
from zope.security import checker
from zope.security import management
from zope.security import simplepolicies
from zope.security.interfaces import IParticipation
# Define all permissions that will be available
NotAllowed = 'Not Allowed'
Public = checker.CheckerPublic
TransportAgent = 'Transport Agent'
AccessServices = 'Access Services'
AccessAgents = 'Access Agents'
AccessTimeService = 'Access Time Services'
AccessAgentService = 'Access Agent Service'
AccessHomeService = 'Access Home Service'
AddAgent = 'Add Agent'
ALL = 'All'
def NoSetAttr(name): return NotAllowed
class SimulationSecurityDatabase:
"""Security Database
In the database, locations are mapped to authentication tokens to
permissions.
"""
origin = {
'any': [ALL]
}
jail = {
'norse legend': [TransportAgent, AccessServices, AccessAgentService,
AccessHomeService, TransportAgent, AccessAgents],
'any': [AccessTimeService, AddAgent]
}
valhalla = {
'norse legend': [AddAgent],
'any': [AccessServices, AccessTimeService, AccessAgentService,
AccessHomeService, TransportAgent, AccessAgents]
}
class SimulationSecurityPolicy(simplepolicies.ParanoidSecurityPolicy):
"""Security Policy during the Simulation.
A very simple security policy that is specific to the simulations.
"""
def checkPermission(self, permission, object):
"""See zope.security.interfaces.ISecurityPolicy"""
home = object.getHome()
db = getattr(SimulationSecurityDatabase, home.getId(), None)
if db is None:
return False
allowed = db.get('any', ())
if permission in allowed or ALL in allowed:
return True
if not self.participations:
return False
for participation in self.participations:
token = participation.principal.getAuthenticationToken()
allowed = db.get(token, ())
if permission not in allowed:
return False
return True
@implementer(IParticipation)
class AgentParticipation:
"""Agent Participation during the Simulation.
A very simple participation that is specific to the simulations.
"""
def __init__(self, agent):
self.principal = agent
self.interaction = None
def PermissionMapChecker(permissions_map=None, set_permissions=None):
"""Create a checker from using the 'permission_map.'"""
if permissions_map is None:
permissions_map = {}
if set_permissions is None:
set_permissions = {}
res = {}
for key, value in permissions_map.items():
for method in value:
res[method] = key
return checker.Checker(res, set_permissions)
#################################
# sandbox security settings
sandbox_security = {
AccessServices: ['getService', 'addService', 'getServiceIds'],
AccessAgents: ['getAgentsIds', 'getAgents'],
AddAgent: ['addAgent'],
TransportAgent: ['transportAgent'],
Public: ['getId', 'getHome']
}
sandbox_checker = PermissionMapChecker(sandbox_security)
#################################
# service security settings
# time service
tservice_security = {AccessTimeService: ['getTime']}
time_service_checker = PermissionMapChecker(tservice_security)
# home service
hservice_security = {AccessHomeService: ['getAvailableHomes']}
home_service_checker = PermissionMapChecker(hservice_security)
# agent service
aservice_security = {AccessAgentService: ['getLocalAgents']}
agent_service_checker = PermissionMapChecker(aservice_security)
def wire_security():
management.setSecurityPolicy(SimulationSecurityPolicy)
checker.defineChecker(sandbox.Sandbox, sandbox_checker)
checker.defineChecker(sandbox.TimeService, time_service_checker)
checker.defineChecker(sandbox.AgentDiscoveryService, agent_service_checker)
checker.defineChecker(sandbox.HomeDiscoveryService, home_service_checker)
def addAgent(self, agent):
if (agent.getId() not in self._agents
and sandbox.IAgent.providedBy(agent)):
self._agents[agent.getId()] = agent
agentChecker = checker.selectChecker(self)
wrapped_home = agentChecker.proxy(self)
agent.setHome(wrapped_home)
else:
raise sandbox.SandboxError("couldn't add agent %s" % agent)
sandbox.Sandbox.addAgent = addAgent
def setupAgent(self, agent):
management.newInteraction(AgentParticipation(agent))
sandbox.TimeGenerator.setupAgent = setupAgent
def teardownAgent(self, agent):
management.endInteraction()
sandbox.TimeGenerator.teardownAgent = teardownAgent
def GreenerPastures(agent):
""" where do they want to go today """
import random
_homes = sandbox._homes
possible_homes = _homes.keys()
possible_homes.remove(agent.getHome().getId())
new_home = _homes.get(random.choice(possible_homes))
return checker.selectChecker(new_home).proxy(new_home)
sandbox.GreenerPastures = GreenerPastures
if __name__ == '__main__':
wire_security()
sandbox.main() | zope.security | /zope.security-6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/zope/security/examples/sandbox_security.py | sandbox_security.py |
from zope.interface import Interface
from zope.schema import Text
from zope.schema import TextLine
# These are the "setting" values returned by several methods defined
# in these interfaces. The implementation may move to another
# location in the future, so this should be the preferred module to
# import these from.
from zope.securitypolicy.settings import Allow # noqa: F401 unused
from zope.securitypolicy.settings import Deny # noqa: F401 imported but unused
from zope.securitypolicy.settings import Unset
class IRole(Interface):
"""A role object."""
id = TextLine(
title="Id",
description="Id as which this role will be known and used.",
readonly=True,
required=True)
title = TextLine(
title="Title",
description="Provides a title for the role.",
required=True)
description = Text(
title="Description",
description="Provides a description for the role.",
required=False)
class IPrincipalRoleMap(Interface):
"""Mappings between principals and roles."""
def getPrincipalsForRole(role_id):
"""Get the principals that have been granted a role.
Return the list of (principal id, setting) who have been assigned or
removed from a role.
If no principals have been assigned this role,
then the empty list is returned.
"""
def getRolesForPrincipal(principal_id):
"""Get the roles granted to a principal.
Return the list of (role id, setting) assigned or removed from
this principal.
If no roles have been assigned to
this principal, then the empty list is returned.
"""
def getSetting(role_id, principal_id, default=Unset):
"""Return the setting for this principal, role combination
"""
def getPrincipalsAndRoles():
"""Get all settings.
Return all the principal/role combinations along with the
setting for each combination as a sequence of tuples with the
role id, principal id, and setting, in that order.
"""
class IPrincipalRoleManager(IPrincipalRoleMap):
"""Management interface for mappings between principals and roles."""
def assignRoleToPrincipal(role_id, principal_id):
"""Assign the role to the principal."""
def removeRoleFromPrincipal(role_id, principal_id):
"""Remove a role from the principal."""
def unsetRoleForPrincipal(role_id, principal_id):
"""Unset the role for the principal."""
class IRolePermissionMap(Interface):
"""Mappings between roles and permissions."""
def getPermissionsForRole(role_id):
"""Get the premissions granted to a role.
Return a sequence of (permission id, setting) tuples for the given
role.
If no permissions have been granted to this
role, then the empty list is returned.
"""
def getRolesForPermission(permission_id):
"""Get the roles that have a permission.
Return a sequence of (role id, setting) tuples for the given
permission.
If no roles have been granted this permission, then the empty list is
returned.
"""
def getSetting(permission_id, role_id, default=Unset):
"""Return the setting for the given permission id and role id
If there is no setting, Unset is returned
"""
def getRolesAndPermissions():
"""Return a sequence of (permission_id, role_id, setting) here.
The settings are returned as a sequence of permission, role,
setting tuples.
If no principal/role assertions have been made here, then the empty
list is returned.
"""
class IRolePermissionManager(IRolePermissionMap):
"""Management interface for mappings between roles and permissions."""
def grantPermissionToRole(permission_id, role_id):
"""Bind the permission to the role.
"""
def denyPermissionToRole(permission_id, role_id):
"""Deny the permission to the role
"""
def unsetPermissionFromRole(permission_id, role_id):
"""Clear the setting of the permission to the role.
"""
class IPrincipalPermissionMap(Interface):
"""Mappings between principals and permissions."""
def getPrincipalsForPermission(permission_id):
"""Get the principas that have a permission.
Return the list of (principal_id, setting) tuples that describe
security assertions for this permission.
If no principals have been set for this permission, then the empty
list is returned.
"""
def getPermissionsForPrincipal(principal_id):
"""Get the permissions granted to a principal.
Return the list of (permission, setting) tuples that describe
security assertions for this principal.
If no permissions have been set for this principal, then the empty
list is returned.
"""
def getSetting(permission_id, principal_id, default=Unset):
"""Get the setting for a permission and principal.
Get the setting (Allow/Deny/Unset) for a given permission and
principal.
"""
def getPrincipalsAndPermissions():
"""Get all principal permission settings.
Get the principal security assertions here in the form
of a list of three tuple containing
(permission id, principal id, setting)
"""
class IPrincipalPermissionManager(IPrincipalPermissionMap):
"""Management interface for mappings between principals and permissions."""
def grantPermissionToPrincipal(permission_id, principal_id):
"""Assert that the permission is allowed for the principal.
"""
def denyPermissionToPrincipal(permission_id, principal_id):
"""Assert that the permission is denied to the principal.
"""
def unsetPermissionForPrincipal(permission_id, principal_id):
"""Remove the permission (either denied or allowed) from the
principal.
"""
class IGrantInfo(Interface):
"""Get grant info needed for checking access
"""
def principalPermissionGrant(principal, permission):
"""Return the principal-permission grant if any
The return value is one of Allow, Deny, or Unset
"""
def getRolesForPermission(permission):
"""Return the role grants for the permission
The role grants are an iterable of role, setting tuples, where
setting is either Allow or Deny.
"""
def getRolesForPrincipal(principal):
"""Return the role grants for the principal
The role grants are an iterable of role, setting tuples, where
setting is either Allow or Deny.
"""
class IGrantVocabulary(Interface):
"""Marker interface for register the RadioWidget.""" | zope.securitypolicy | /zope.securitypolicy-5.0-py3-none-any.whl/zope/securitypolicy/interfaces.py | interfaces.py |
"""Mappings between principals and roles, stored in an object locally.
"""
from zope.authentication.principal import checkPrincipal
from zope.interface import implementer
from zope.securitypolicy.interfaces import Allow
from zope.securitypolicy.interfaces import Deny
from zope.securitypolicy.interfaces import IPrincipalRoleManager
from zope.securitypolicy.interfaces import Unset
from zope.securitypolicy.role import checkRole
from zope.securitypolicy.securitymap import AnnotationSecurityMap
from zope.securitypolicy.securitymap import SecurityMap
@implementer(IPrincipalRoleManager)
class AnnotationPrincipalRoleManager(AnnotationSecurityMap):
"""Mappings between principals and roles."""
# the annotation key is a holdover from this module's old
# location, but cannot change without breaking existing databases
key = 'zope.app.security.AnnotationPrincipalRoleManager'
def assignRoleToPrincipal(self, role_id, principal_id):
AnnotationSecurityMap.addCell(self, role_id, principal_id, Allow)
def removeRoleFromPrincipal(self, role_id, principal_id):
AnnotationSecurityMap.addCell(self, role_id, principal_id, Deny)
unsetRoleForPrincipal = AnnotationSecurityMap.delCell
getPrincipalsForRole = AnnotationSecurityMap.getRow
getRolesForPrincipal = AnnotationSecurityMap.getCol
def getSetting(self, role_id, principal_id, default=Unset):
return AnnotationSecurityMap.queryCell(
self, role_id, principal_id, default)
getPrincipalsAndRoles = AnnotationSecurityMap.getAllCells
@implementer(IPrincipalRoleManager)
class PrincipalRoleManager(SecurityMap):
"""Mappings between principals and roles."""
def assignRoleToPrincipal(self, role_id, principal_id, check=True):
''' See the interface IPrincipalRoleManager '''
if check:
checkPrincipal(None, principal_id)
checkRole(None, role_id)
self.addCell(role_id, principal_id, Allow)
def removeRoleFromPrincipal(self, role_id, principal_id, check=True):
''' See the interface IPrincipalRoleManager '''
if check:
checkPrincipal(None, principal_id)
checkRole(None, role_id)
self.addCell(role_id, principal_id, Deny)
def unsetRoleForPrincipal(self, role_id, principal_id):
''' See the interface IPrincipalRoleManager '''
# Don't check validity intentionally.
# After all, we certainly want to unset invalid ids.
self.delCell(role_id, principal_id)
def getPrincipalsForRole(self, role_id):
''' See the interface IPrincipalRoleMap '''
return self.getRow(role_id)
def getRolesForPrincipal(self, principal_id):
''' See the interface IPrincipalRoleMap '''
return self.getCol(principal_id)
def getSetting(self, role_id, principal_id, default=Unset):
''' See the interface IPrincipalRoleMap '''
return self.queryCell(role_id, principal_id, default)
def getPrincipalsAndRoles(self):
''' See the interface IPrincipalRoleMap '''
return self.getAllCells()
# Roles are our rows, and principals are our columns
principalRoleManager = PrincipalRoleManager()
# Register our cleanup with Testing.CleanUp to make writing unit tests
# simpler.
try:
from zope.testing.cleanup import addCleanUp
except ImportError: # pragma: no cover
pass
else:
addCleanUp(principalRoleManager._clear)
del addCleanUp | zope.securitypolicy | /zope.securitypolicy-5.0-py3-none-any.whl/zope/securitypolicy/principalrole.py | principalrole.py |
import zope.interface
from zope.authentication.interfaces import IAuthentication
from zope.authentication.interfaces import PrincipalLookupError
from zope.component import getUtility
from zope.security.checker import CheckerPublic
from zope.security.interfaces import ISecurityPolicy
from zope.security.management import system_user
from zope.security.proxy import removeSecurityProxy
from zope.security.simplepolicies import ParanoidSecurityPolicy
from zope.securitypolicy.interfaces import Allow
from zope.securitypolicy.interfaces import Deny
from zope.securitypolicy.interfaces import IPrincipalPermissionMap
from zope.securitypolicy.interfaces import IPrincipalRoleMap
from zope.securitypolicy.interfaces import IRolePermissionMap
from zope.securitypolicy.interfaces import Unset
from zope.securitypolicy.principalpermission import principalPermissionManager
from zope.securitypolicy.principalrole import principalRoleManager
from zope.securitypolicy.rolepermission import rolePermissionManager
globalPrincipalPermissionSetting = principalPermissionManager.getSetting
globalRolesForPermission = rolePermissionManager.getRolesForPermission
globalRolesForPrincipal = principalRoleManager.getRolesForPrincipal
SettingAsBoolean = {Allow: True, Deny: False, Unset: None, None: None}
class CacheEntry:
pass
@zope.interface.provider(ISecurityPolicy)
class ZopeSecurityPolicy(ParanoidSecurityPolicy):
def __init__(self, *args, **kw):
ParanoidSecurityPolicy.__init__(self, *args, **kw)
self._cache = {}
def invalidate_cache(self):
self._cache = {}
def cache(self, parent):
cache = self._cache.get(id(parent))
if cache:
cache = cache[0]
else:
cache = CacheEntry()
self._cache[id(parent)] = cache, parent
return cache
def cached_decision(self, parent, principal, groups, permission):
# Return the decision for a principal and permission
cache = self.cache(parent)
try:
cache_decision = cache.decision
except AttributeError:
cache_decision = cache.decision = {}
cache_decision_prin = cache_decision.get(principal)
if not cache_decision_prin:
cache_decision_prin = cache_decision[principal] = {}
try:
return cache_decision_prin[permission]
except KeyError:
pass
# cache_decision_prin[permission] is the cached decision for a
# principal and permission.
decision = self.cached_prinper(parent, principal, groups, permission)
if (decision is None) and groups:
decision = self._group_based_cashed_prinper(parent, principal,
groups, permission)
if decision is not None:
cache_decision_prin[permission] = decision
return decision
roles = self.cached_roles(parent, permission)
if roles:
prin_roles = self.cached_principal_roles(parent, principal)
if groups:
prin_roles = self.cached_principal_roles_w_groups(
parent, principal, groups, prin_roles)
for role, setting in prin_roles.items():
if setting and (role in roles):
cache_decision_prin[permission] = decision = True
return decision
cache_decision_prin[permission] = decision = False
return decision
def cached_prinper(self, parent, principal, groups, permission):
# Compute the permission, if any, for the principal.
cache = self.cache(parent)
try:
cache_prin = cache.prin
except AttributeError:
cache_prin = cache.prin = {}
cache_prin_per = cache_prin.get(principal)
if not cache_prin_per:
cache_prin_per = cache_prin[principal] = {}
try:
return cache_prin_per[permission]
except KeyError:
pass
if parent is None:
prinper = SettingAsBoolean[
globalPrincipalPermissionSetting(permission, principal, None)]
cache_prin_per[permission] = prinper
return prinper
prinper = IPrincipalPermissionMap(parent, None)
if prinper is not None:
prinper = SettingAsBoolean[
prinper.getSetting(permission, principal, None)]
if prinper is not None:
cache_prin_per[permission] = prinper
return prinper
parent = removeSecurityProxy(getattr(parent, '__parent__', None))
prinper = self.cached_prinper(parent, principal, groups, permission)
cache_prin_per[permission] = prinper
return prinper
def _group_based_cashed_prinper(self, parent, principal, groups,
permission):
denied = False
for group_id, ggroups in groups:
decision = self.cached_prinper(parent, group_id, ggroups,
permission)
if (decision is None) and ggroups:
decision = self._group_based_cashed_prinper(
parent, group_id, ggroups, permission)
if decision is None:
continue
if decision:
return decision
denied = True
if denied:
return False
return None
def cached_roles(self, parent, permission):
cache = self.cache(parent)
try:
cache_roles = cache.roles
except AttributeError:
cache_roles = cache.roles = {}
try:
return cache_roles[permission]
except KeyError:
pass
if parent is None:
roles = {
role: 1
for (role, setting) in globalRolesForPermission(permission)
if setting is Allow}
cache_roles[permission] = roles
return roles
roles = self.cached_roles(
removeSecurityProxy(getattr(parent, '__parent__', None)),
permission)
roleper = IRolePermissionMap(parent, None)
if roleper:
roles = roles.copy()
for role, setting in roleper.getRolesForPermission(permission):
if setting is Allow:
roles[role] = 1
elif role in roles:
del roles[role]
cache_roles[permission] = roles
return roles
def cached_principal_roles_w_groups(self, parent,
principal, groups, prin_roles):
denied = {}
allowed = {}
for group_id, ggroups in groups:
group_roles = dict(self.cached_principal_roles(parent, group_id))
if ggroups:
group_roles = self.cached_principal_roles_w_groups(
parent, group_id, ggroups, group_roles)
for role, setting in group_roles.items():
if setting:
allowed[role] = setting
else:
denied[role] = setting
denied.update(allowed)
denied.update(prin_roles)
return denied
def cached_principal_roles(self, parent, principal):
cache = self.cache(parent)
try:
cache_principal_roles = cache.principal_roles
except AttributeError:
cache_principal_roles = cache.principal_roles = {}
try:
return cache_principal_roles[principal]
except KeyError:
pass
if parent is None:
roles = {
role: SettingAsBoolean[setting]
for (role, setting) in globalRolesForPrincipal(principal)}
roles['zope.Anonymous'] = True # Everybody has Anonymous
cache_principal_roles[principal] = roles
return roles
roles = self.cached_principal_roles(
removeSecurityProxy(getattr(parent, '__parent__', None)),
principal)
prinrole = IPrincipalRoleMap(parent, None)
if prinrole:
roles = roles.copy()
for role, setting in prinrole.getRolesForPrincipal(principal):
roles[role] = SettingAsBoolean[setting]
cache_principal_roles[principal] = roles
return roles
def checkPermission(self, permission, object):
if permission is CheckerPublic:
return True
object = removeSecurityProxy(object)
seen = {}
for participation in self.participations:
principal = participation.principal
if principal is system_user:
continue # always allow system_user
if principal.id in seen:
continue
if not self.cached_decision(
object, principal.id, self._groupsFor(principal), permission,
):
return False
seen[principal.id] = 1
return True
def _findGroupsFor(self, principal, getPrincipal, seen):
result = []
for group_id in getattr(principal, 'groups', ()):
if group_id in seen:
# Dang, we have a cycle. We don't want to
# raise an exception here (or do we), so we'll skip it
continue
seen.append(group_id)
try:
group = getPrincipal(group_id)
except PrincipalLookupError:
# It's bad if we have an undefined principal,
# but we don't want to fail here. But we won't
# honor any grants for the group. We'll just skip it.
continue
result.append((group_id,
self._findGroupsFor(group, getPrincipal, seen)))
seen.pop()
return tuple(result)
def _groupsFor(self, principal):
groups = self._cache.get(principal.id)
if groups is None:
groups = getattr(principal, 'groups', ())
if groups:
getPrincipal = getUtility(IAuthentication).getPrincipal
groups = self._findGroupsFor(principal, getPrincipal, [])
else:
groups = ()
self._cache[principal.id] = groups
return groups
def settingsForObject(ob):
"""Analysis tool to show all of the grants to a process
"""
result = []
while ob is not None:
data = {}
result.append((getattr(ob, '__name__', '(no name)'), data))
principalPermissions = IPrincipalPermissionMap(ob, None)
if principalPermissions is not None:
settings = principalPermissions.getPrincipalsAndPermissions()
data['principalPermissions'] = [
{'principal': pr, 'permission': p, 'setting': s}
for (p, pr, s) in sorted(settings)]
principalRoles = IPrincipalRoleMap(ob, None)
if principalRoles is not None:
settings = principalRoles.getPrincipalsAndRoles()
data['principalRoles'] = [
{'principal': p, 'role': r, 'setting': s}
for (r, p, s) in sorted(settings)]
rolePermissions = IRolePermissionMap(ob, None)
if rolePermissions is not None:
settings = rolePermissions.getRolesAndPermissions()
data['rolePermissions'] = [
{'permission': p, 'role': r, 'setting': s}
for (p, r, s) in sorted(settings)]
ob = getattr(ob, '__parent__', None)
data = {}
result.append(('global settings', data))
settings = principalPermissionManager.getPrincipalsAndPermissions()
data['principalPermissions'] = [
{'principal': pr, 'permission': p, 'setting': s}
for (p, pr, s) in sorted(settings)]
settings = principalRoleManager.getPrincipalsAndRoles()
data['principalRoles'] = [
{'principal': p, 'role': r, 'setting': s}
for (r, p, s) in sorted(settings)]
settings = rolePermissionManager.getRolesAndPermissions()
data['rolePermissions'] = [
{'permission': p, 'role': r, 'setting': s}
for (p, r, s) in sorted(settings)]
return result | zope.securitypolicy | /zope.securitypolicy-5.0-py3-none-any.whl/zope/securitypolicy/zopepolicy.py | zopepolicy.py |
"""Role implementation
"""
__docformat__ = 'restructuredtext'
from persistent import Persistent
from zope.component import getUtilitiesFor
from zope.i18nmessageid import ZopeMessageFactory as _
from zope.interface import implementer
from zope.location import Location
from zope.securitypolicy.interfaces import IRole
NULL_ID = _('<role not activated>')
@implementer(IRole)
class Role:
def __init__(self, id, title, description=""):
self.id = id
self.title = title
self.description = description
@implementer(IRole)
class LocalRole(Persistent, Location):
def __init__(self, title, description=""):
self.id = NULL_ID
self.title = title
self.description = description
def setIdOnActivation(role, event):
"""Set the permission id upon registration activation.
Let's see how this notifier can be used. First we need to create an event
using the permission instance and a registration stub:
>>> class Registration:
... def __init__(self, obj, name):
... self.component = obj
... self.name = name
>>> role1 = LocalRole('Role 1', 'A first role')
>>> role1.id
'<role not activated>'
>>> import zope.interface.interfaces
>>> event = zope.interface.interfaces.Registered(
... Registration(role1, 'role1'))
Now we pass the event into this function, and the id of the role should be
set to 'role1'.
>>> setIdOnActivation(role1, event)
>>> role1.id
'role1'
"""
role.id = event.object.name
def unsetIdOnDeactivation(role, event):
"""Unset the permission id up registration deactivation.
Let's see how this notifier can be used. First we need to create an event
using the permission instance and a registration stub:
>>> class Registration:
... def __init__(self, obj, name):
... self.component = obj
... self.name = name
>>> role1 = LocalRole('Role 1', 'A first role')
>>> role1.id = 'role1'
>>> import zope.interface.interfaces
>>> event = zope.interface.interfaces.Unregistered(
... Registration(role1, 'role1'))
Now we pass the event into this function, and the id of the role should be
set to NULL_ID.
>>> unsetIdOnDeactivation(role1, event)
>>> role1.id
'<role not activated>'
"""
role.id = NULL_ID
def checkRole(context, role_id):
names = [name for name, util in getUtilitiesFor(IRole, context)]
if role_id not in names:
raise ValueError("Undefined role id", role_id) | zope.securitypolicy | /zope.securitypolicy-5.0-py3-none-any.whl/zope/securitypolicy/role.py | role.py |
"""Permission to Roles Manager (Adapter)
"""
from zope.interface import implementer
from zope.security.permission import allPermissions
from zope.securitypolicy.interfaces import Allow
from zope.securitypolicy.interfaces import Deny
from zope.securitypolicy.interfaces import IRolePermissionManager
from zope.securitypolicy.interfaces import Unset
from zope.securitypolicy.role import checkRole
from zope.securitypolicy.securitymap import AnnotationSecurityMap
from zope.securitypolicy.securitymap import SecurityMap
@implementer(IRolePermissionManager)
class AnnotationRolePermissionManager(AnnotationSecurityMap):
"""Provide adapter that manages role permission data in an object attribute
"""
# the annotation key is a holdover from this module's old
# location, but cannot change without breaking existing databases
key = 'zope.app.security.AnnotationRolePermissionManager'
def grantPermissionToRole(self, permission_id, role_id):
AnnotationSecurityMap.addCell(self, permission_id, role_id, Allow)
def denyPermissionToRole(self, permission_id, role_id):
AnnotationSecurityMap.addCell(self, permission_id, role_id, Deny)
unsetPermissionFromRole = AnnotationSecurityMap.delCell
getRolesForPermission = AnnotationSecurityMap.getRow
getPermissionsForRole = AnnotationSecurityMap.getCol
getRolesAndPermissions = AnnotationSecurityMap.getAllCells
def getSetting(self, permission_id, role_id, default=Unset):
return AnnotationSecurityMap.queryCell(
self, permission_id, role_id, default)
@implementer(IRolePermissionManager)
class RolePermissionManager(SecurityMap):
"""Mappings between roles and permissions."""
def grantPermissionToRole(self, permission_id, role_id, check=True):
'''See interface IRolePermissionMap'''
if check:
checkRole(None, role_id)
self.addCell(permission_id, role_id, Allow)
def grantAllPermissionsToRole(self, role_id):
for permission_id in allPermissions(None):
self.grantPermissionToRole(permission_id, role_id, False)
def denyPermissionToRole(self, permission_id, role_id, check=True):
'''See interface IRolePermissionMap'''
if check:
checkRole(None, role_id)
self.addCell(permission_id, role_id, Deny)
def unsetPermissionFromRole(self, permission_id, role_id):
'''See interface IRolePermissionMap'''
# Don't check validity intentionally.
# After all, we certianly want to unset invalid ids.
self.delCell(permission_id, role_id)
def getRolesForPermission(self, permission_id):
'''See interface IRolePermissionMap'''
return self.getRow(permission_id)
def getPermissionsForRole(self, role_id):
'''See interface IRolePermissionMap'''
return self.getCol(role_id)
def getSetting(self, permission_id, role_id, default=Unset):
'''See interface IRolePermissionMap'''
return self.queryCell(permission_id, role_id, default)
def getRolesAndPermissions(self):
'''See interface IRolePermissionMap'''
return self.getAllCells()
# Permissions are our rows, and roles are our columns
rolePermissionManager = RolePermissionManager()
# Register our cleanup with Testing.CleanUp to make writing unit tests
# simpler.
try:
from zope.testing.cleanup import addCleanUp
except ImportError: # pragma: no cover
pass
else:
addCleanUp(rolePermissionManager._clear)
del addCleanUp | zope.securitypolicy | /zope.securitypolicy-5.0-py3-none-any.whl/zope/securitypolicy/rolepermission.py | rolepermission.py |
"""Generic two-dimensional array type (in context of security)
"""
from persistent import Persistent
from zope.annotation import IAnnotations
from zope.security.management import queryInteraction
class SecurityMap:
def __init__(self):
self._clear()
def _clear(self):
self._byrow = {}
self._bycol = {}
def __nonzero__(self):
return bool(self._byrow)
__bool__ = __nonzero__
def addCell(self, rowentry, colentry, value):
# setdefault may get expensive if an empty mapping is
# expensive to create, for PersistentDict for instance.
row = self._byrow.get(rowentry)
if row:
if row.get(colentry) is value:
return False
else:
row = self._byrow[rowentry] = {}
col = self._bycol.get(colentry)
if not col:
col = self._bycol[colentry] = {}
row[colentry] = value
col[rowentry] = value
self._invalidated_interaction_cache()
return True
def _invalidated_interaction_cache(self):
# Invalidate this threads interaction cache
interaction = queryInteraction()
if interaction is not None:
try:
invalidate_cache = interaction.invalidate_cache
except AttributeError:
pass
else:
invalidate_cache()
def delCell(self, rowentry, colentry):
row = self._byrow.get(rowentry)
if row and (colentry in row):
del row[colentry]
if not row:
del self._byrow[rowentry]
col = self._bycol[colentry]
del col[rowentry]
if not col:
del self._bycol[colentry]
self._invalidated_interaction_cache()
return True
return False
def queryCell(self, rowentry, colentry, default=None):
row = self._byrow.get(rowentry)
if row:
return row.get(colentry, default)
else:
return default
def getCell(self, rowentry, colentry):
marker = object()
cell = self.queryCell(rowentry, colentry, marker)
if cell is marker:
raise KeyError('Not a valid row and column pair.')
return cell
def getRow(self, rowentry):
row = self._byrow.get(rowentry)
if row:
return list(row.items())
else:
return []
def getCol(self, colentry):
col = self._bycol.get(colentry)
if col:
return list(col.items())
else:
return []
def getAllCells(self):
res = []
for r in self._byrow.keys():
for c in self._byrow[r].items():
res.append((r,) + c)
return res
class PersistentSecurityMap(SecurityMap, Persistent):
def addCell(self, rowentry, colentry, value):
if SecurityMap.addCell(self, rowentry, colentry, value):
self._p_changed = 1
def delCell(self, rowentry, colentry):
if SecurityMap.delCell(self, rowentry, colentry):
self._p_changed = 1
class AnnotationSecurityMap(SecurityMap):
def __init__(self, context):
self.__parent__ = context
self._context = context
annotations = IAnnotations(self._context)
map = annotations.get(self.key)
if map is None:
self._byrow = {}
self._bycol = {}
else:
self._byrow = map._byrow
self._bycol = map._bycol
self.map = map
def _changed(self):
map = self.map
if isinstance(map, PersistentSecurityMap):
map._p_changed = 1
else:
map = self.map = PersistentSecurityMap()
map._byrow = self._byrow
map._bycol = self._bycol
annotations = IAnnotations(self._context)
annotations[self.key] = map
def addCell(self, rowentry, colentry, value):
if SecurityMap.addCell(self, rowentry, colentry, value):
self._changed()
def delCell(self, rowentry, colentry):
if SecurityMap.delCell(self, rowentry, colentry):
self._changed() | zope.securitypolicy | /zope.securitypolicy-5.0-py3-none-any.whl/zope/securitypolicy/securitymap.py | securitymap.py |
""" Register security related configuration directives.
"""
from zope.component.zcml import utility
from zope.configuration.exceptions import ConfigurationError
from zope.securitypolicy.interfaces import IRole
from zope.securitypolicy.principalpermission import \
principalPermissionManager as principal_perm_mgr
from zope.securitypolicy.principalrole import \
principalRoleManager as principal_role_mgr
from zope.securitypolicy.role import Role
from zope.securitypolicy.rolepermission import \
rolePermissionManager as role_perm_mgr
def grant(_context, principal=None, role=None, permission=None,
permissions=None):
nspecified = ((principal is not None)
+ (role is not None)
+ (permission is not None)
+ (permissions is not None))
permspecified = ((permission is not None)
+ (permissions is not None))
if nspecified != 2 or permspecified == 2:
raise ConfigurationError(
"Exactly two of the principal, role, and permission resp. "
"permissions attributes must be specified")
if permission:
permissions = [permission]
if principal and role:
_context.action(
discriminator=('grantRoleToPrincipal', role, principal),
callable=principal_role_mgr.assignRoleToPrincipal,
args=(role, principal),
)
elif principal and permissions:
for permission in permissions:
_context.action(
discriminator=('grantPermissionToPrincipal',
permission,
principal),
callable=principal_perm_mgr.grantPermissionToPrincipal,
args=(permission, principal),
)
elif role and permissions:
for permission in permissions:
_context.action(
discriminator=('grantPermissionToRole', permission, role),
callable=role_perm_mgr.grantPermissionToRole,
args=(permission, role),
)
def deny(_context, principal=None, role=None, permission=None,
permissions=None):
nspecified = ((principal is not None)
+ (role is not None)
+ (permission is not None)
+ (permissions is not None))
permspecified = ((permission is not None)
+ (permissions is not None))
if nspecified != 2 or permspecified == 2:
raise ConfigurationError(
"Exactly two of the principal, role, and permission resp. "
"permissions attributes must be specified")
if permission:
permissions = [permission]
if principal and role:
_context.action(
discriminator=('denyRoleFromPrincipal', role, principal),
callable=principal_role_mgr.removeRoleFromPrincipal,
args=(role, principal),
)
elif principal and permissions:
for permission in permissions:
_context.action(
discriminator=('denyPermissionToPrincipal',
permission,
principal),
callable=principal_perm_mgr.denyPermissionToPrincipal,
args=(permission, principal),
)
elif role and permissions:
for permission in permissions:
_context.action(
discriminator=('denyPermissionToRole', permission, role),
callable=role_perm_mgr.denyPermissionToRole,
args=(permission, role),
)
def grantAll(_context, principal=None, role=None):
"""Grant all permissions to a role or principal
"""
nspecified = ((principal is not None)
+ (role is not None))
if nspecified != 1:
raise ConfigurationError(
"Exactly one of the principal and role attributes "
"must be specified")
if principal:
_context.action(
discriminator=('grantAllPermissionsToPrincipal',
principal),
callable=principal_perm_mgr.grantAllPermissionsToPrincipal,
args=(principal, ),
)
else:
_context.action(
discriminator=('grantAllPermissionsToRole', role),
callable=role_perm_mgr.grantAllPermissionsToRole,
args=(role, ),
)
def defineRole(_context, id, title, description=''):
role = Role(id, title, description)
utility(_context, IRole, role, name=id) | zope.securitypolicy | /zope.securitypolicy-5.0-py3-none-any.whl/zope/securitypolicy/metaconfigure.py | metaconfigure.py |
from zope.authentication.principal import checkPrincipal
from zope.interface import implementer
from zope.security.permission import allPermissions
from zope.securitypolicy.interfaces import Allow
from zope.securitypolicy.interfaces import Deny
from zope.securitypolicy.interfaces import IPrincipalPermissionManager
from zope.securitypolicy.interfaces import Unset
from zope.securitypolicy.securitymap import AnnotationSecurityMap
from zope.securitypolicy.securitymap import SecurityMap
@implementer(IPrincipalPermissionManager)
class AnnotationPrincipalPermissionManager(AnnotationSecurityMap):
"""Mappings between principals and permissions."""
# the annotation key is a holdover from this module's old
# location, but cannot change without breaking existing databases
# It is also is misspelled, but that's OK. It just has to be unique.
# we'll keep it as is, to prevent breaking old data:
key = 'zopel.app.security.AnnotationPrincipalPermissionManager'
def grantPermissionToPrincipal(self, permission_id, principal_id):
AnnotationSecurityMap.addCell(self, permission_id, principal_id, Allow)
def denyPermissionToPrincipal(self, permission_id, principal_id):
AnnotationSecurityMap.addCell(self, permission_id, principal_id, Deny)
unsetPermissionForPrincipal = AnnotationSecurityMap.delCell
getPrincipalsForPermission = AnnotationSecurityMap.getRow
getPermissionsForPrincipal = AnnotationSecurityMap.getCol
def getSetting(self, permission_id, principal_id, default=Unset):
return AnnotationSecurityMap.queryCell(
self, permission_id, principal_id, default)
getPrincipalsAndPermissions = AnnotationSecurityMap.getAllCells
@implementer(IPrincipalPermissionManager)
class PrincipalPermissionManager(SecurityMap):
"""Mappings between principals and permissions."""
def grantPermissionToPrincipal(self, permission_id, principal_id,
check=True):
''' See the interface IPrincipalPermissionManager '''
if check:
checkPrincipal(None, principal_id)
self.addCell(permission_id, principal_id, Allow)
def grantAllPermissionsToPrincipal(self, principal_id):
''' See the interface IPrincipalPermissionManager '''
for permission_id in allPermissions(None):
self.grantPermissionToPrincipal(permission_id, principal_id, False)
def denyPermissionToPrincipal(self, permission_id, principal_id,
check=True):
''' See the interface IPrincipalPermissionManager '''
if check:
checkPrincipal(None, principal_id)
self.addCell(permission_id, principal_id, Deny)
def unsetPermissionForPrincipal(self, permission_id, principal_id):
''' See the interface IPrincipalPermissionManager '''
# Don't check validity intentionally.
# After all, we certianly want to unset invalid ids.
self.delCell(permission_id, principal_id)
def getPrincipalsForPermission(self, permission_id):
''' See the interface IPrincipalPermissionManager '''
return self.getRow(permission_id)
def getPermissionsForPrincipal(self, principal_id):
''' See the interface IPrincipalPermissionManager '''
return self.getCol(principal_id)
def getSetting(self, permission_id, principal_id, default=Unset):
''' See the interface IPrincipalPermissionManager '''
return self.queryCell(permission_id, principal_id, default)
def getPrincipalsAndPermissions(self):
''' See the interface IPrincipalPermissionManager '''
return self.getAllCells()
# Permissions are our rows, and principals are our columns
principalPermissionManager = PrincipalPermissionManager()
# Register our cleanup with Testing.CleanUp to make writing unit tests
# simpler.
try:
from zope.testing.cleanup import addCleanUp
except ImportError: # pragma: no cover
pass
else:
addCleanUp(principalPermissionManager._clear)
del addCleanUp | zope.securitypolicy | /zope.securitypolicy-5.0-py3-none-any.whl/zope/securitypolicy/principalpermission.py | principalpermission.py |
=========
Changes
=========
6.0 (2023-08-22)
================
- Drop support for Python 2.7, 3.5, 3.6.
- Add support for Python 3.11.
5.3 (2022-10-06)
================
- Add support for Python 3.10.
- Do not try to send queued emails to an empty address
(`#45 <https://github.com/zopefoundation/zope.sendmail/issues/45>`_).
5.2 (2021-01-18)
================
- Add minimal savepoint support, so we do not fail if any code tries to create a savepoint.
(`#35 <https://github.com/zopefoundation/zope.sendmail/issues/35>`_).
- Fix TypeError: 'error' object is not subscriptable during error handling on
Windows (`#33 <https://github.com/zopefoundation/zope.sendmail/pull/33>`_).
- Add support for Python 3.9.
5.1 (2020-07-31)
================
- Use ``pywin32`` again, not any longer the meanwhile outdated fork named ``pypiwin32``.
Add some information for installation with buildout.
(`#30 <https://github.com/zopefoundation/zope.sendmail/issues/30>`_)
- Support ``bytes`` messages; consistently convert messages
using a "text" type (i.e. ``str`` for Python 3, ``unicode`` for Python 2)
into ``bytes`` via utf-8 encoding.
Prerequisite to fix
`Products.MailHost#30
<https://github.com/zopefoundation/Products.MailHost/issues/30>`_.
5.0 (2019-04-03)
================
- Drop support for Python 3.4.
- Add support for Python 3.8a3.
- Fix text/bytes issue in MailDir for Python 3.
(`#24 <https://github.com/zopefoundation/zope.sendmail/pull/24>`_)
4.2.1 (2019-02-07)
==================
- Fix SMTP authentication on Python 3. See `issue 16
<https://github.com/zopefoundation/zope.sendmail/issues/16>`_.
4.2 (2018-10-10)
================
- Add support for Python 3.7.
4.1.0 (2017-09-02)
==================
- Host documentation at https://zopesendmail.readthedocs.io/
- Make the data manager sort key a string, this fixes Python 3 where
strings and integers are not sortable. This would happen when using
other data managers with string sort keys.
- Add support for Python 3.5 and 3.6.
- Drop support for Python 2.6 and 3.3.
- Declare explicit dependency on ``pywin32`` on Windows.
- Replace hard-coded constants with equivalents from the standard
``errno`` module.
- Fix SSL support on Python 3. See `issue 9
<https://github.com/zopefoundation/zope.sendmail/issues/9>`_.
- Reach 100% test coverage and maintain it via tox.ini and Travis CI.
- Replaced deprecated dependency on ``optparse`` with equivalent
``argparse``. The help messages have changed and errors are
generally more clear. Specifying a ``--config`` path that doesn't
exist is now an error instead of being silently ignored.
- Fix SMTPMailer sending more than one message. It now reconnects to
the SMTP server as needed. Previously it could only send one message
since it closed the connection after each send. This also makes the
SMTPMailer thread safe. See `issue 1
<https://github.com/zopefoundation/zope.sendmail/issues/1>`_.
4.0.1 (2014-12-29)
==================
- Add support for PyPy3.
4.0.0 (2014-12-20)
==================
- Add support for testing on Travis-CI against supported Python verisons.
- Drop use of ``zope.testrunner`` for testing.
- Drop dependency on ``six``.
- Replace doctests with equivalent unittests.
4.0.0a2 (2013-02-26)
====================
- Fix license Trove classifier.
4.0.0a1 (2013-02-25)
====================
- Add support for Python 3.3.
- Delete event fossils (interfaces ``zope.sendmail.interfaces.IMailSent`` and
``zope.sendmail.interfaces.IMailError``. plus the ``zope.sendmail.events``
module and associated tests). These events were never emitted, and couldn't
have been used safely even if they had been, due to two-phase commit.
https://bugs.launchpad.net/zope3/+bug/177739
- Replace deprecated ``zope.interface.classProvides`` usage with equivalent
``zope.interface.provider`` decorator.
- Replace deprecated ``zope.interface.implements`` usage with equivalent
``zope.interface.implementer`` decorator.
- Drop support for Python 2.4 and 2.5.
- Add a vote method to Mailer implementations to allow them to abort a
transaction if it is known to be unsafe.
- Prevent fatal errors in mail delivery causing potential database corruption.
- Add not declared, but needed test dependency on `zope.component [test]`.
- Add handling for unicode usernames and passwords, encoding them to UTF-8.
Fix for https://bugs.launchpad.net/zope.sendmail/+bug/597143
- Give the background queue processor thread a name.
- Document the ini file keys for ``zope-sendmail --config`` in the help
message printed by ``zope-sendmail --help``. Also rewrote the command-line
parsing to use optparse (not argparse, since Python 2.6 is still supported).
3.7.5 (2012-05-23)
==================
- Ensure that the 'queuedDelivery' directive has the same discriminator
as the 'directDelivery' directive (they are mutually incompatible).
https://bugs.launchpad.net/zope.sendmail/+bug/191143
- Avoid requeuing messages after an SMTP "recipients refused" error.
https://bugs.launchpad.net/zope.sendmail/+bug/1003288
3.7.4 (2010-10-01)
==================
- Handle unicode usernames and passwords, encoding them to UTF-8. Fix for
https://bugs.launchpad.net/zope.sendmail/+bug/597143
3.7.3 (2010-09-25)
==================
- Add not declared, but needed test dependency on `zope.component [test]`.
3.7.2 (2010-04-30)
==================
- Remove no longer required testing dependency on zope.testing.
- Maildir storage for queue can now handle unicode passed in for message or
to/from addresses (change backported from repoze.sendmail).
- Tests use stdlib doctest instead of zope.testing.doctest.
3.7.1 (2010-01-13)
==================
- Backward compatibility import of zope.sendmail.queue.QueueProcessorThread in
zope.sendmail.delivery.
3.7.0 (2010-01-12)
==================
- Remove dependency on ``zope.security``: the security support is optional,
and only available if the ``zope.security`` package is available. This change
is similar to the optional security support introduced in ``zope.component``
3.8.0, and in fact it uses the same helpers.
- Sort by modification time the messages in zope.sendmail.maildir so earlier
messages are sent before later messages during queue processing.
- Add the new parameter ``processorThread`` to the queuedDelivery ZCML
directive: if False, the QueueProcessorThread is not started and thus an
independent process must process the queue; it defaults to True for b/c.
- Provide a console script ``zope-sendmail`` which can be used to process the
delivery queue in case processorThread is False. The console script can
either process the messages in the queue once, or run in "daemon" mode.
3.6.1 (2009-11-16)
==================
- Depend on ``zope.component`` >= 3.8.0, which supports the new semantic of
zope.component.zcml.proxify needed by zope.sendmail.zcml.
3.6.0 (2009-09-14)
==================
- Use simple vocabulary factory function instead of custom `UtilityTerm`
and `UtilityVocabulary` classes, copied from ``zope.app.component`` in
the previous release.
- Depend on the ``transaction`` package instead of ``ZODB3``.
- Remove zcml slugs and zpkg-related files.
- Work around problem when used with Python >=2.5.1. See
https://bugs.edge.launchpad.net/zope.sendmail/+bug/413335 .
3.5.1 (2009-01-26)
==================
- Copyover the UtilityTerm and UtilityVocabulary implementation from
zope.app.component to avoid a dependency.
- Work around a problem when smtp quit fails, the mail was considered not
delivered where just the quit failed.
3.5.0 (2008-07-05)
==================
- final release (identical with 3.5.0b2)
3.5.0b2 (2007-12-19)
====================
- If the SMTP server rejects a message (for example, when the sender or
recipient address is malformed), that email stays in the queue forever
(https://bugs.launchpad.net/zope3/+bug/157104).
3.5.0b1 (2007-11-08)
====================
- Add README.txt
- Can now talk to servers that don't implement EHLO
- Fix bug that caused files with very long names to be created
- Fix for https://bugs.launchpad.net/zope3/+bug/157104: move aside mail that's
causing 5xx server responses.
3.5.0a2 (2007-10-23)
====================
- Clean up ``does_esmtp`` in faux SMTP connection classes provided by the
tests.
- If the ``QueueProcessorThread`` is asked to stop while sending messages, do
so after sending the current message; previously if there were many, many
messages to send, the thread could stick around for quite a while.
3.5.0a1 (2007-10-23)
====================
- ``QueueProcessorThread`` now accepts an optional parameter *interval* for
defining how often to process the mail queue (default is 3 seconds)
- Several ``QueueProcessorThreads`` (either in the same process, or multiple
processes) can now deliver messages from a single maildir without duplicates
being sent.
3.4.0 (2007-08-20)
==================
- Bugfix: Don't keep open files around for every email message
to be sent on transaction commit. People who try to send many emails
in a single transaction now will not run out of file descriptors.
3.4.0a1 (2007-04-22)
====================
Initial release as a separate project, corresponds to ``zope.sendmail``
from Zope 3.4.0a1.
| zope.sendmail | /zope.sendmail-6.0.tar.gz/zope.sendmail-6.0/CHANGES.rst | CHANGES.rst |
===============
zope.sendmail
===============
.. image:: https://img.shields.io/pypi/v/zope.sendmail.svg
:target: https://pypi.python.org/pypi/zope.sendmail/
:alt: Latest release
.. image:: https://img.shields.io/pypi/pyversions/zope.sendmail.svg
:target: https://pypi.org/project/zope.sendmail/
:alt: Supported Python versions
.. image:: https://github.com/zopefoundation/zope.sendmail/actions/workflows/tests.yml/badge.svg
:target: https://github.com/zopefoundation/zope.sendmail/actions/workflows/tests.yml
.. image:: https://coveralls.io/repos/github/zopefoundation/zope.sendmail/badge.svg?branch=master
:target: https://coveralls.io/github/zopefoundation/zope.sendmail?branch=master
.. image:: https://readthedocs.org/projects/zopesendmail/badge/?version=latest
:target: https://zopesendmail.readthedocs.io/en/latest/
:alt: Documentation Status
zope.sendmail is a package for email sending from Zope 3 applications.
Email sending from Zope 3 applications works as follows:
A Zope 3 application locates a mail delivery utility
(``IMailDelivery``) and feeds a message to it. It gets back a unique
message ID so it can keep track of the message by subscribing to
``IMailEvent`` events.
The utility registers with the transaction system to make sure the
message is only sent when the transaction commits successfully.
(Among other things this avoids duplicate messages on
``ConflictErrors``.)
If the delivery utility is a ``IQueuedMailDelivery``, it puts the
message into a queue (a Maildir mailbox in the file system). A
separate process or thread (``IMailQueueProcessor``) watches the queue
and delivers messages asynchronously. Since the queue is located in
the file system, it survives Zope restarts or crashes and the mail is
not lost. The queue processor can implement batching to keep the
server load low.
If the delivery utility is a ``IDirectMailDelivery``, it delivers
messages synchronously during the transaction commit. This is not a
very good idea, as it makes the user wait. Note that transaction
commits must not fail, but that is not a problem, because mail
delivery problems dispatch an event instead of raising an exception.
However, there is a problem -- sending events causes unknown code to
be executed during the transaction commit phase. There should be a
way to start a new transaction for event processing after this one is
commited.
An ``IMailQueueProcessor`` or ``IDirectMailDelivery`` actually
delivers the messages by using a mailer (``IMailer``) component that
encapsulates the delivery process. There currently is only one
mailer:
``ISMTPMailer`` sends all messages to a relay host using SMTP.
Documentation is hosted at https://zopesendmail.readthedocs.io/
| zope.sendmail | /zope.sendmail-6.0.tar.gz/zope.sendmail-6.0/README.rst | README.rst |
=====================
Using zope.sendmail
=====================
This package is useful when your Zope 3 application wants to send email. It
integrates with the transaction mechanism and queues your emails to be sent on
successful commits only.
Installation on Windows/ Buildout
=================================
When using Windows, `pywin32 <https://github.com/mhammond/pywin32>`_ is required.
``pywin32`` can only be installed using ``pip``, it **does not work with ``zc.buildout``**.
When using buildout, prior to the execution of buildout, use ``pip`` to install ``pywin32``.
API
===
An application that wants to send an email can do so by getting the appropriate
:class:`zope.sendmail.interfaces.IMailDelivery` utility. The standard library's email module is useful for
formatting the message according to RFC-2822::
import email.MIMEText
import email.Header
from zope.sendmail.interfaces import IMailDelivery
from zope.component import getUtility
def send_email(sender, recipient, subject, body):
msg = email.MIMEText.MIMEText(body.encode('UTF-8'), 'plain', 'UTF-8')
msg["From"] = sender
msg["To"] = recipient
msg["Subject"] = email.Header.Header(subject, 'UTF-8')
mailer = getUtility(IMailDelivery, 'my-app.mailer')
mailer.send(sender, [recipient], msg.as_string())
In real-world code you may need to do extra work to format the 'From' and 'To'
headers correctly, if the addresses contain a real-name part with non-ASCII
characters. You can find a recipe for that in this blog post:
http://mg.pov.lt/blog/unicode-emails-in-python.html
Configuration
=============
The code above used a named :class:`~.IMailDelivery` utility. It is your responsibility
to define one, as Zope 3 doesn't provide one by default. You can define
an IMailDelivery utility in your site.zcml with a configuration directive::
<configure xmlns="http://namespaces.zope.org/zope"
xmlns:mail="http://namespaces.zope.org/mail"
<mail:queuedDelivery
name="my-app.mailer"
permission="zope.Public"
mailer="smtp"
queuePath="var/mailqueue"
/>
</configure>
The ``mail:queuedDelivery`` directive stores every email in a queue (a standard
Maildir folder on the file system in a given directory) and sends them from a
background thread. There's an alternative directive, ``mail:directDelivery``,
that sends them from the same thread. This may slow down transaction commits
(especially if the SMTP server is slow to respond) and increase the loading
time of web pages.
Mailers
=======
The ``mailer`` argument of the ``mail:queuedDelivery`` utility chooses the
appropriate IMailer utility that will be used to deliver email. There
are alternative ways of doing that, for example, SMTP or piping the message to
an external program. Currently ``zope.sendmail`` supports only plain SMTP.
[#]_
.. [#] There was once a mailer utility that invoked /usr/sbin/sendmail, but
it had security issues related to the difficulty of quoting command-line
arguments in a portable way.
If the same system that runs your Zope 3 server also has an SMTP server on
port 25, you can use the default ``smtp`` mailer. If you want to use a
different SMTP server, define your own utility like this::
<configure xmlns="http://namespaces.zope.org/zope"
xmlns:mail="http://namespaces.zope.org/mail"
<mail:smtpMailer
name="my-app.smtp"
hostname="mail.my-app.com"
port="25"
/>
<mail:queuedDelivery
name="my-app.mailer"
permission="zope.Public"
mailer="my-app.smtp"
queuepath="var/mailqueue"
/>
</configure>
Testing
=======
Obviously, you don't want your automated unit/functional test runs to send
real emails. You'll have to define a fake email delivery utility in your
test layer. Something like this will do the trick::
@implements(IMailDelivery)
class FakeMailDelivery(object):
def send(self, source, dest, body):
print("*** Sending email from %s to %s:" % (source, dest))
print(body)
return '[email protected]'
Register it with the standard ``utility`` directive::
<utility name="my-app.mailer" factory="my-app.testing.FakeMailDelivery" />
Problems with zope.sendmail
===========================
* The API is a bit inconvenient to use (e.g. you have to do the message
formatting by yourself).
* The configuration should be done in zope.conf, not in ZCML.
| zope.sendmail | /zope.sendmail-6.0.tar.gz/zope.sendmail-6.0/src/zope/sendmail/README.rst | README.rst |
"""'mail' ZCML Namespaces Schemas
"""
__docformat__ = 'restructuredtext'
from zope.component import getUtility
from zope.component.zcml import handler
from zope.configuration.exceptions import ConfigurationError
from zope.configuration.fields import Path
from zope.interface import Interface
from zope.schema import ASCIILine
from zope.schema import Bool
from zope.schema import Int
from zope.schema import TextLine
from zope.sendmail.delivery import DirectMailDelivery
from zope.sendmail.delivery import QueuedMailDelivery
from zope.sendmail.interfaces import IMailDelivery
from zope.sendmail.interfaces import IMailer
from zope.sendmail.mailer import SMTPMailer
from zope.sendmail.queue import QueueProcessorThread
try:
from zope.component.security import proxify
from zope.security.zcml import Permission
except ImportError: # pragma: no cover
SECURITY_SUPPORT = False
Permission = TextLine
def _assertPermission(permission, interfaces, component):
raise ConfigurationError(
"security proxied components are not "
"supported because zope.security is not available")
else:
SECURITY_SUPPORT = True
def _assertPermission(permission, interfaces, component):
return proxify(component, provides=interfaces, permission=permission)
class IDeliveryDirective(Interface):
"""This abstract directive describes a generic mail delivery utility
registration."""
name = TextLine(
title="Name",
description='Specifies the Delivery name of the mail utility. '
'The default is "Mail".',
default="Mail",
required=False)
mailer = TextLine(
title="Mailer",
description="Defines the mailer to be used for sending mail.",
required=True)
permission = Permission(
title="Permission",
description="Defines the permission needed to use this service.",
required=False)
class IQueuedDeliveryDirective(IDeliveryDirective):
"""This directive creates and registers a global queued mail utility. It
should be only called once during startup."""
queuePath = Path(
title="Queue Path",
description="Defines the path for the queue directory.",
required=True)
processorThread = Bool(
title="Run Queue Processor Thread",
description=("Indicates whether to run queue processor in a thread "
"in this process."),
required=False,
default=True)
def _get_mailer(mailer):
try:
return getUtility(IMailer, mailer)
except LookupError:
raise ConfigurationError("Mailer %r is not defined" % mailer)
def queuedDelivery(_context, queuePath, mailer, permission=None, name="Mail",
processorThread=True):
def createQueuedDelivery():
delivery = QueuedMailDelivery(queuePath)
if permission is not None:
delivery = _assertPermission(permission, IMailDelivery, delivery)
handler('registerUtility', delivery, IMailDelivery, name)
mailerObject = _get_mailer(mailer)
if processorThread:
thread = QueueProcessorThread()
thread.setMailer(mailerObject)
thread.setQueuePath(queuePath)
thread.start()
_context.action(
discriminator=('utility', IMailDelivery, name),
callable=createQueuedDelivery,
args=())
class IDirectDeliveryDirective(IDeliveryDirective):
"""This directive creates and registers a global direct mail utility. It
should be only called once during startup."""
def directDelivery(_context, mailer, permission=None, name="Mail"):
def createDirectDelivery():
mailerObject = _get_mailer(mailer)
delivery = DirectMailDelivery(mailerObject)
if permission is not None:
delivery = _assertPermission(permission, IMailDelivery, delivery)
handler('registerUtility', delivery, IMailDelivery, name)
_context.action(
discriminator=('utility', IMailDelivery, name),
callable=createDirectDelivery,
args=())
class IMailerDirective(Interface):
"""A generic directive registering a mailer for the mail utility."""
name = TextLine(
title="Name",
description="Name of the Mailer.",
required=True)
class ISMTPMailerDirective(IMailerDirective):
"""Registers a new SMTP mailer."""
hostname = ASCIILine(
title="Hostname",
description="Hostname of the SMTP host.",
default="localhost",
required=False)
port = Int(
title="Port",
description="Port of the SMTP server.",
default=25,
required=False)
username = TextLine(
title="Username",
description="A username for SMTP AUTH.",
required=False)
password = TextLine(
title="Password",
description="A password for SMTP AUTH.",
required=False)
def smtpMailer(_context, name, hostname="localhost", port="25",
username=None, password=None):
_context.action(
discriminator=('utility', IMailer, name),
callable=handler,
args=('registerUtility',
SMTPMailer(hostname, port, username, password), IMailer, name)
) | zope.sendmail | /zope.sendmail-6.0.tar.gz/zope.sendmail-6.0/src/zope/sendmail/zcml.py | zcml.py |
__docformat__ = 'restructuredtext'
from zope.i18nmessageid import MessageFactory
from zope.interface import Attribute
from zope.interface import Interface
from zope.schema import Bool
from zope.schema import Int
from zope.schema import Password
from zope.schema import TextLine
_ = MessageFactory('zope')
class IMailDelivery(Interface):
"""A mail delivery utility allows someone to send an email to a group of
people."""
def send(fromaddr, toaddrs, message):
"""Send an email message.
`fromaddr` is the sender address (byte string),
`toaddrs` is a sequence of recipient addresses (byte strings).
`message` is a byte string that contains both headers and body
formatted according to RFC 2822. If it does not contain a Message-Id
header, it will be generated and added automatically.
Returns the message ID.
You can subscribe to `IMailEvent` events for notification about
problems or successful delivery.
Messages are actually sent during transaction commit.
"""
class IDirectMailDelivery(IMailDelivery):
"""A mail delivery utility that delivers messages synchronously during
transaction commit.
Not useful for production use, but simpler to set up and use.
"""
mailer = Attribute("IMailer that is used for message delivery")
class IQueuedMailDelivery(IMailDelivery):
"""A mail delivery utility that puts all messages into a queue in the
filesystem.
Messages will be delivered asynchronously by a separate component.
"""
queuePath = TextLine(
title=_("Queue path"),
description=_("Pathname of the directory used to queue mail."))
class IMailQueueProcessor(Interface):
"""A mail queue processor that delivers queueud messages asynchronously.
"""
queuePath = TextLine(
title=_("Queue Path"),
description=_("Pathname of the directory used to queue mail."))
pollingInterval = Int(
title=_("Polling Interval"),
description=_("How often the queue is checked for new messages"
" (in milliseconds)"),
default=5000)
mailer = Attribute("IMailer that is used for message delivery")
class IMailer(Interface):
"""Mailer handles synchronous mail delivery."""
def send(fromaddr, toaddrs, message):
"""Send an email message.
`fromaddr` is the sender address (unicode string),
`toaddrs` is a sequence of recipient addresses (unicode strings).
`message` contains both headers and body formatted according to RFC
2822. It should contain at least Date, From, To, and Message-Id
headers.
Messages are sent immediately.
"""
def abort():
"""Abort sending the message for asynchronous subclasses."""
def vote(fromaddr, toaddrs, message):
"""Raise an exception if there is a known reason why the message
cannot be sent."""
class ISMTPMailer(IMailer):
"""A mailer that delivers mail to a relay host via SMTP."""
hostname = TextLine(
title=_("Hostname"),
description=_("Name of server to be used as SMTP server."))
port = Int(
title=_("Port"),
description=_("Port of SMTP service"),
default=25)
username = TextLine(
title=_("Username"),
description=_("Username used for optional SMTP authentication."))
password = Password(
title=_("Password"),
description=_("Password used for optional SMTP authentication."))
no_tls = Bool(
title=_("No TLS"),
description=_("Never use TLS for sending email."))
force_tls = Bool(
title=_("Force TLS"),
description=_("Use TLS always for sending email."))
class IMaildirFactory(Interface):
def __call__(dirname, create=False):
"""Opens a `Maildir` folder at a given filesystem path.
If `create` is ``True``, the folder will be created when it does not
exist. If `create` is ``False`` and the folder does not exist, an
exception (``OSError``) will be raised.
If path points to a file or an existing directory that is not a
valid `Maildir` folder, an exception is raised regardless of the
`create` argument.
"""
class IMaildir(Interface):
"""Read/write access to `Maildir` folders.
See http://www.qmail.org/man/man5/maildir.html for detailed format
description.
"""
def __iter__():
"""Returns an iterator over the pathnames of messages in this folder.
"""
def newMessage():
"""Creates a new message in the `maildir`.
Returns a file-like object for a new file in the ``tmp`` subdirectory
of the `Maildir`. After writing message contents to it, call the
``commit()`` or ``abort()`` method on it.
The returned object implements `IMaildirMessageWriter`.
"""
class IMaildirMessageWriter(Interface):
"""A file-like object to a new message in a `Maildir`."""
def write(str):
"""Writes a string to the file.
There is no return value. Due to buffering, the string may not actually
show up in the file until the ``commit()`` method is called.
"""
def writelines(sequence):
"""Writes a sequence of strings to the file.
The sequence can be any iterable object producing strings, typically a
list of strings. There is no return value. ``writelines`` does not add
any line separators.
"""
def close():
"""Closes the message file.
No further writes are allowed. You can call ``close()`` before calling
``commit()`` or ``abort()`` to avoid having too many open files.
Calling ``close()`` more than once is allowed.
"""
def commit():
"""Commits the new message using the `Maildir` protocol.
First, the message file is flushed, closed, then it is moved from
``tmp`` into ``new`` subdirectory of the maildir.
Calling ``commit()`` more than once is allowed.
"""
def abort():
"""Aborts the new message.
The message file is closed and removed from the ``tmp`` subdirectory
of the `maildir`.
Calling ``abort()`` more than once is allowed.
""" | zope.sendmail | /zope.sendmail-6.0.tar.gz/zope.sendmail-6.0/src/zope/sendmail/interfaces.py | interfaces.py |
"""Classes which abstract different channels a message could be sent to.
"""
__docformat__ = 'restructuredtext'
from smtplib import SMTP
from ssl import SSLError
from threading import local
from zope.interface import implementer
from zope.sendmail.interfaces import ISMTPMailer
class _SMTPState(local):
connection = None
code = None
response = None
@implementer(ISMTPMailer)
class SMTPMailer:
"""Implementation of :class:`zope.sendmail.interfaces.ISMTPMailer`."""
smtp = SMTP
def __init__(self, hostname='localhost', port=25,
username=None, password=None, no_tls=False, force_tls=False):
self.hostname = hostname
self.port = port
self.username = username
self.password = password
self.force_tls = force_tls
self.no_tls = no_tls
self._smtp = _SMTPState()
def _make_property(name):
return property(lambda self: getattr(self._smtp, name),
lambda self, nv: setattr(self._smtp, name, nv))
connection = _make_property('connection')
code = _make_property('code')
response = _make_property('response')
del _make_property
def vote(self, fromaddr, toaddrs, message):
self.connection = self.smtp(self.hostname, str(self.port))
code, response = self.connection.ehlo()
if code < 200 or code >= 300:
code, response = self.connection.helo()
if code < 200 or code >= 300:
raise RuntimeError('Error sending HELO to the SMTP server '
'(code=%s, response=%s)' % (code, response))
self.code, self.response = code, response
def _close_connection(self):
try:
self.connection.quit()
except SSLError:
# something weird happened while quiting
self.connection.close()
self.connection = None
def abort(self):
if self.connection is None:
return
self._close_connection()
def send(self, fromaddr, toaddrs, message):
connection = self.connection
if connection is None:
self.vote(fromaddr, toaddrs, message)
connection = self.connection
# encryption support
have_tls = connection.has_extn('starttls')
if not have_tls and self.force_tls:
raise RuntimeError('TLS is not available but TLS is required')
if have_tls and not self.no_tls:
connection.starttls()
connection.ehlo()
if connection.does_esmtp:
if self.username is not None and self.password is not None:
username, password = self.username, self.password
connection.login(username, password)
elif self.username:
raise RuntimeError(
'Mailhost does not support ESMTP but a username is configured')
try:
connection.sendmail(fromaddr, toaddrs, message)
finally:
self._close_connection() | zope.sendmail | /zope.sendmail-6.0.tar.gz/zope.sendmail-6.0/src/zope/sendmail/mailer.py | mailer.py |
__docformat__ = 'restructuredtext'
import argparse
import atexit
import configparser
import errno
import logging
import os
import smtplib
import sys
import threading
import time
from zope.sendmail.maildir import Maildir
from zope.sendmail.mailer import SMTPMailer
if sys.platform == 'win32': # pragma: no cover
import pywintypes
import win32file
import winerror
def _os_link(src, dst):
return win32file.CreateHardLink(dst, src, None)
else:
_os_link = os.link
pywintypes = None
# The longest time sending a file is expected to take. Longer than this and
# the send attempt will be assumed to have failed. This means that sending
# very large files or using very slow mail servers could result in duplicate
# messages sent.
MAX_SEND_TIME = 60*60*3
# The below diagram depicts the operations performed while sending a message in
# the ``run`` method of ``QueueProcessorThread``. This sequence of operations
# will be performed for each file in the maildir each time the thread "wakes
# up" to send messages.
#
# Any error conditions not depicted on the diagram will provoke the catch-all
# exception logging of the ``run`` method.
#
# In the diagram the "message file" is the file in the maildir's "cur"
# directory that contains the message and "tmp file" is a hard link to the
# message file created in the maildir's "tmp" directory.
#
# ( start trying to deliver a message )
# |
# |
# V
# +-----( get tmp file mtime )
# | |
# | | file exists
# | V
# | ( check age )-----------------------------+
# tmp file | | file is new |
# does not | | file is old |
# exist | | |
# | ( unlink tmp file )-----------------------+ |
# | | file does | |
# | | file unlinked not exist | |
# | V | |
# +---->( touch message file )------------------+ | |
# | file does | | |
# | not exist | | |
# V | | |
# ( link message file to tmp file )----------+ | | |
# | tmp file | | | |
# | already exists | | | |
# | | | | |
# V V V V V
# ( send message ) ( skip this message )
# |
# V
# ( unlink message file )---------+
# | |
# | file unlinked | file no longer exists
# | |
# | +-----------------+
# | |
# | V
# ( unlink tmp file )------------+
# | |
# | file unlinked | file no longer exists
# V |
# ( message delivered )<---------+
class QueueProcessorThread(threading.Thread):
"""This thread is started at configuration time from the
`mail:queuedDelivery` directive handler if processorThread is True.
"""
log = logging.getLogger("QueueProcessorThread")
_stopped = False
interval = 3.0 # process queue every X second
maildir = None
mailer = None
def __init__(self, interval=3.0):
threading.Thread.__init__(
self, name="zope.sendmail.queue.QueueProcessorThread")
self.interval = interval
self._lock = threading.Lock()
self.daemon = True
def setMaildir(self, maildir):
"""Set the maildir.
This method is used just to provide a `maildir` stub.
"""
self.maildir = maildir
def _makeMaildir(self, path):
return Maildir(path, True)
def setQueuePath(self, path):
self.setMaildir(self._makeMaildir(path))
def setMailer(self, mailer):
self.mailer = mailer
def _parseMessage(self, message):
"""Extract fromaddr and toaddrs from the first two lines of
the `message`.
Returns a fromaddr string, a toaddrs tuple and the message
string.
"""
fromaddr = ""
toaddrs = ()
rest = ""
try:
first, second, rest = message.split(b'\n', 2)
except ValueError:
return fromaddr, toaddrs, message
if first.startswith(b"X-Zope-From: "):
i = len(b"X-Zope-From: ")
fromaddr = first[i:].decode()
if second.startswith(b"X-Zope-To: "):
i = len(b"X-Zope-To: ")
toaddrs = tuple(
addr.decode() for addr in second[i:].split(b", ") if addr
)
return fromaddr, toaddrs, rest
def _action_if_exists(self, fname, func, default=None):
# apply the func to the fname, ignoring exceptions that
# happen when the file does not exist.
try:
return func(fname)
except OSError as e:
if e.errno != errno.ENOENT:
# The file existed, but something unexpected
# happened. Report it
raise
return default
def _unlink_if_exists(self, fname):
self._action_if_exists(fname, os.unlink)
def run(self, forever=True):
atexit.register(self.stop)
while not self._stopped:
for filename in self.maildir:
# if we are asked to stop while sending messages, do so
if self._stopped:
break
self._process_one_file(filename)
else:
if forever:
time.sleep(self.interval)
# A testing plug
if not forever:
break
def _process_one_file(self, filename):
fromaddr = ''
toaddrs = ()
head, tail = os.path.split(filename)
tmp_filename = os.path.join(head, '.sending-' + tail)
rejected_filename = os.path.join(head, '.rejected-' + tail)
try:
# perform a series of operations in an attempt to ensure
# that no two threads/processes send this message
# simultaneously as well as attempting to not generate
# spurious failure messages in the log; a diagram that
# represents these operations is included in a
# comment above this class
# find the age of the tmp file (if it exists)
mtime = self._action_if_exists(
tmp_filename,
lambda fname: os.stat(fname).st_mtime)
age = time.time() - mtime if mtime is not None else None
# if the tmp file exists, check its age
if age is not None:
if age > MAX_SEND_TIME:
# the tmp file is "too old"; this suggests
# that during an attempt to send it, the
# process died; remove the tmp file so we
# can try again
try:
os.unlink(tmp_filename)
except OSError as e:
if e.errno == errno.ENOENT: # file does not exist
# it looks like someone else removed the tmp
# file, that's fine, we'll try to deliver the
# message again later
return
# XXX: we're silently ignoring the exception here.
# Is that right?
# If permissions or something are not right, we'll fail
# on _os_link later on.
# if we get here, the file existed, but was too
# old, so it was unlinked
else:
# the tmp file is "new", so someone else may
# be sending this message, try again later
return
# now we know (hope, given the above XXX) that the
# tmp file doesn't exist, we need to "touch" the
# message before we create the tmp file so the
# mtime will reflect the fact that the file is
# being processed (there is a race here, but it's
# OK for two or more processes to touch the file
# "simultaneously")
try:
os.utime(filename, None)
except OSError as e:
if e.errno == errno.ENOENT: # file does not exist
# someone removed the message before we could
# touch it, no need to complain, we'll just keep
# going
return
# XXX: Silently ignoring all other errors
# creating this hard link will fail if another process is
# also sending this message
try:
_os_link(filename, tmp_filename)
except OSError as e:
if e.errno == errno.EEXIST: # file exists, *nix
# it looks like someone else is sending this
# message too; we'll try again later
return
# XXX: Silently ignoring all other errno
except Exception as e: # pragma: no cover
if (pywintypes is not None
and isinstance(e, pywintypes.error)
and e.funcname == 'CreateHardLink'
and e.winerror == winerror.ERROR_ALREADY_EXISTS):
# file exists, win32
return
# XXX: Silently ignoring all other causes here.
# read message file and send contents
with open(filename, 'rb') as f:
message = f.read()
fromaddr, toaddrs, message = self._parseMessage(message)
# The next block is the only one that is sensitive to
# interruptions. Everywhere else, if this daemon thread
# stops, we should be able to correctly handle a restart.
# In this block, if we send the message, but we are
# stopped before we unlink the file, we will resend the
# message when we are restarted. We limit the likelihood
# of this somewhat by using a lock to link the two
# operations. When the process gets an interrupt, it
# will call the atexit that we registered (``stop``
# below). This will try to get the same lock before it
# lets go. Because this can cause the daemon thread to
# continue (that is, to not act like a daemon thread), we
# still use the _stopped flag to communicate.
with self._lock:
try:
self.mailer.send(fromaddr, toaddrs, message)
except smtplib.SMTPResponseException as e:
if 500 <= e.smtp_code <= 599:
# permanent error, ditch the message
self.log.error(
"Discarding email from %s to %s due to"
" a permanent error: %s",
fromaddr, ", ".join(toaddrs), str(e))
_os_link(filename, rejected_filename)
else:
# Log an error and retry later
raise
except smtplib.SMTPRecipientsRefused as e:
# All recipients are refused by smtp
# server. Dont try to redeliver the message.
self.log.error("Email recipients refused: %s",
', '.join(e.recipients))
_os_link(filename, rejected_filename)
self._unlink_if_exists(filename)
self._unlink_if_exists(tmp_filename)
# TODO: maybe log the Message-Id of the message sent
self.log.info("Mail from %s to %s sent.",
fromaddr, ", ".join(toaddrs))
# Blanket except because we don't want
# this thread to ever die
except Exception:
if fromaddr != '' or toaddrs != ():
self.log.error(
"Error while sending mail from %s to %s.",
fromaddr, ", ".join(toaddrs), exc_info=True)
else:
self.log.error(
"Error while sending mail : %s ",
filename, exc_info=True)
def stop(self):
self._stopped = True
self._lock.acquire()
self._lock.release()
def boolean(s):
s = str(s).lower()
return s.startswith("t") or s.startswith("y") or s.startswith("1")
def string_or_none(s):
if s == 'None':
return None
return s
class ConsoleApp:
"""Allows running of Queue Processor from the console."""
INI_SECTION = "app:zope-sendmail"
INI_NAMES = [
"interval",
"hostname",
"port",
"username",
"password",
"force_tls",
"no_tls",
"queue_path",
]
parser = argparse.ArgumentParser()
parser.add_argument(
'--daemon', action='store_true',
help=("Run in daemon mode, periodically checking queue "
"and sending messages. Default is to send all "
"messages in queue once and exit."))
parser.add_argument(
'--interval', metavar='<#secs>', type=float, default=3,
help=("How often to check queue when in daemon mode. "
"Default is %(default)s seconds."))
smtp_group = parser.add_argument_group(
"SMTP Server",
"Connection information for the SMTP server")
smtp_group.add_argument(
'--hostname', default='localhost',
help=("Name of SMTP host to use for delivery. Default is "
"%(default)s."))
smtp_group.add_argument(
'--port', type=int, default=25,
help=("Which port on SMTP server to deliver mail to. "
"Default is %(default)s."))
auth_group = parser.add_argument_group(
"Authentication",
("Authentication information for the SMTP server. "
"If one is provided, they must both be. One or both "
"can be provided in the --config file."))
auth_group.add_argument(
'--username',
help=("Username to use to log in to SMTP server. Default "
"is none."))
auth_group.add_argument(
'--password',
help=("Password to use to log in to SMTP server. Must be "
"specified if username is specified."))
del auth_group
tls_group = smtp_group.add_mutually_exclusive_group()
tls_group.add_argument(
'--force-tls', action='store_true',
help=("Do not connect if TLS is not available. Not "
"enabled by default."))
tls_group.add_argument(
'--no-tls', action='store_true',
help=("Do not use TLS even if is available. Not enabled "
"by default."))
del tls_group
del smtp_group
parser.add_argument(
'--config', metavar='<inifile>',
type=argparse.FileType(),
help=("Get configuration from specified ini file; it must "
"contain a section [%s] that can contain the "
"following keys: %s. If you specify the queue path "
"in the ini file, you don't need to specify it on "
"the command line. With the exception of the queue path, "
"options specified in the ini file override options on the "
"command line." % (INI_SECTION, ', '.join(INI_NAMES))))
parser.add_argument(
"maildir", default=None, nargs="?",
help=("The path to the mail queue directory."
"If not given, it must be found in the --config file."
"If given, this overrides a value in the --config file"))
daemon = False
interval = 3
hostname = 'localhost'
port = 25
username = None
password = None
force_tls = False
no_tls = False
queue_path = None
QueueProcessorKind = QueueProcessorThread
MailerKind = SMTPMailer
def __init__(self, argv=None, verbose=True):
argv = sys.argv if argv is None else argv
self.script_name = argv[0]
self.verbose = verbose
self._process_args(argv[1:])
self.mailer = self.MailerKind(
self.hostname, self.port, self.username, self.password,
self.no_tls, self.force_tls)
def main(self):
queue = self.QueueProcessorKind(self.interval)
queue.setMailer(self.mailer)
queue.setQueuePath(self.queue_path)
queue.run(forever=self.daemon)
def _process_args(self, args):
opts = self.parser.parse_args(args)
self.daemon = opts.daemon
self.interval = opts.interval
self.hostname = opts.hostname
self.port = opts.port
self.username = opts.username
self.password = opts.password
self.force_tls = opts.force_tls
self.no_tls = opts.no_tls
if opts.config:
fname = opts.config.name
opts.config.close()
self._load_config(fname)
self.queue_path = opts.maildir or self.queue_path
if not self.queue_path:
self.parser.error('please specify the queue path')
if (self.username or self.password) and \
not (self.username and self.password):
self.parser.error('Must use username and password together.')
def _load_config(self, path):
section = self.INI_SECTION
names = self.INI_NAMES
defaults = {name: str(getattr(self, name)) for name in names}
config = configparser.ConfigParser(defaults)
config.read(path)
self.interval = float(config.get(section, "interval"))
self.hostname = config.get(section, "hostname")
self.port = int(config.get(section, "port"))
self.username = string_or_none(config.get(section, "username"))
self.password = string_or_none(config.get(section, "password"))
self.force_tls = boolean(config.get(section, "force_tls"))
self.no_tls = boolean(config.get(section, "no_tls"))
self.queue_path = string_or_none(config.get(section, "queue_path"))
def run(argv=None):
logging.basicConfig()
app = ConsoleApp(argv)
app.main()
if __name__ == '__main__':
run() | zope.sendmail | /zope.sendmail-6.0.tar.gz/zope.sendmail-6.0/src/zope/sendmail/queue.py | queue.py |
__docformat__ = 'restructuredtext'
import email.parser
import logging
import os
import warnings
from random import randrange
from socket import gethostname
from time import strftime
import transaction
from transaction.interfaces import IDataManagerSavepoint
from transaction.interfaces import ISavepointDataManager
from zope.interface import implementer
from zope.sendmail.interfaces import IDirectMailDelivery
from zope.sendmail.interfaces import IQueuedMailDelivery
from zope.sendmail.maildir import Maildir
# BBB: this import is needed for backward compatibility with older versions of
# zope.sendmail which defined QueueProcessorThread in this module
from zope.sendmail.queue import QueueProcessorThread # noqa: F401
log = logging.getLogger("MailDataManager")
@implementer(IDataManagerSavepoint)
class _NoOpSavepoint:
def rollback(self):
return
@implementer(ISavepointDataManager)
class MailDataManager:
def __init__(self, callable, args=(), vote=None, onAbort=None):
self.callable = callable
self.args = args
self.vote = vote
self.onAbort = onAbort
# Use the default thread transaction manager.
self.transaction_manager = transaction.manager
def commit(self, txn):
pass
def abort(self, txn):
if self.onAbort:
self.onAbort()
def sortKey(self):
return str(id(self))
def savepoint(self):
# We do not need savepoint/rollback, but some code (like CMFEditions)
# uses savepoints, and breaks when one datamanager does not have this.
# So provide a dummy implementation.
return _NoOpSavepoint()
# No subtransaction support.
def abort_sub(self, txn):
"This object does not do anything with subtransactions"
commit_sub = abort_sub
def beforeCompletion(self, txn):
"This object does not do anything in beforeCompletion"
afterCompletion = beforeCompletion
def tpc_begin(self, txn, subtransaction=False):
assert not subtransaction
def tpc_vote(self, txn):
if self.vote is not None:
return self.vote(*self.args)
def tpc_finish(self, txn):
try:
self.callable(*self.args)
except Exception:
# Any exceptions here can cause database corruption.
# Better to protect the data and potentially miss emails than
# leave a database in an inconsistent state which requires a
# guru to fix.
log.exception("Failed in tpc_finish for %r", self.callable)
tpc_abort = abort
class AbstractMailDelivery:
def newMessageId(self):
"""Generates a new message ID according to RFC 2822 rules"""
randmax = 0x7fffffff
left_part = '%s.%d.%d' % (strftime('%Y%m%d%H%M%S'),
os.getpid(),
randrange(0, randmax))
return f"{left_part}@{gethostname()}"
def send(self, fromaddr, toaddrs, message):
# Switch the message to be bytes immediately, any encoding
# peculiarities should be handled before.
if message is None:
header = b''
else:
if not isinstance(message, bytes):
message = message.encode('utf-8')
# determine line separator type (assumes consistency)
nli = message.find(b'\n')
line_sep = b'\n' if nli < 1 or message[nli - 1] != b'\r' \
else b'\r\n'
header = message.split(line_sep * 2, 1)[0]
parse = email.parser.BytesParser().parsebytes
messageid = parse(header).get('Message-Id')
if messageid:
if not messageid.startswith('<') or not messageid.endswith('>'):
raise ValueError('Malformed Message-Id header')
messageid = messageid[1:-1]
else:
messageid = self.newMessageId()
message = b'Message-Id: <%s>\n%s' % (messageid.encode(), message)
transaction.get().join(
self.createDataManager(fromaddr, toaddrs, message))
return messageid
def createDataManager(self, fromaddr, toaddrs, message):
raise NotImplementedError()
@implementer(IDirectMailDelivery)
class DirectMailDelivery(AbstractMailDelivery):
__doc__ = IDirectMailDelivery.__doc__
def __init__(self, mailer):
self.mailer = mailer
def createDataManager(self, fromaddr, toaddrs, message):
try:
vote = self.mailer.vote
except AttributeError:
# We've got an old mailer, just pass through voting
warnings.warn("The mailer %s does not provide a vote method"
% (repr(self.mailer)), DeprecationWarning)
def vote(*args, **kwargs):
pass
return MailDataManager(self.mailer.send,
args=(fromaddr, toaddrs, message),
vote=vote,
onAbort=self.mailer.abort)
@implementer(IQueuedMailDelivery)
class QueuedMailDelivery(AbstractMailDelivery):
__doc__ = IQueuedMailDelivery.__doc__
def __init__(self, queuePath):
self._queuePath = queuePath
queuePath = property(lambda self: self._queuePath)
def createDataManager(self, fromaddr, toaddrs, message):
maildir = Maildir(self.queuePath, True)
msg = maildir.newMessage()
msg.write(b'X-Zope-From: %s\n' % fromaddr.encode())
msg.write(b'X-Zope-To: %s\n' % ", ".join(toaddrs).encode())
msg.write(message)
msg.close()
return MailDataManager(msg.commit, onAbort=msg.abort) | zope.sendmail | /zope.sendmail-6.0.tar.gz/zope.sendmail-6.0/src/zope/sendmail/delivery.py | delivery.py |
"""Read/write access to `Maildir` folders.
"""
import errno
import os
import random
import socket
import time
from zope.interface import implementer
from zope.interface import provider
from zope.sendmail.interfaces import IMaildir
from zope.sendmail.interfaces import IMaildirFactory
from zope.sendmail.interfaces import IMaildirMessageWriter
@provider(IMaildirFactory)
@implementer(IMaildir)
class Maildir:
"""See :class:`zope.sendmail.interfaces.IMaildir`"""
def __init__(self, path, create=False):
"See :class:`zope.sendmail.interfaces.IMaildirFactory`"
self.path = path
def access(path):
return os.access(path, os.F_OK)
subdir_cur = os.path.join(path, 'cur')
subdir_new = os.path.join(path, 'new')
subdir_tmp = os.path.join(path, 'tmp')
if create and not access(path):
os.mkdir(path)
os.mkdir(subdir_cur)
os.mkdir(subdir_new)
os.mkdir(subdir_tmp)
maildir = True
else:
maildir = (os.path.isdir(subdir_cur) and os.path.isdir(subdir_new)
and os.path.isdir(subdir_tmp))
if not maildir:
raise ValueError('%s is not a Maildir folder' % path)
def __iter__(self):
"See :class:`zope.sendmail.interfaces.IMaildir`"
join = os.path.join
subdir_cur = join(self.path, 'cur')
subdir_new = join(self.path, 'new')
# http://www.qmail.org/man/man5/maildir.html says:
# "It is a good idea for readers to skip all filenames in new
# and cur starting with a dot. Other than this, readers
# should not attempt to parse filenames."
new_messages = [join(subdir_new, x) for x in os.listdir(subdir_new)
if not x.startswith('.')]
cur_messages = [join(subdir_cur, x) for x in os.listdir(subdir_cur)
if not x.startswith('.')]
# Sort by modification time so earlier messages are sent before
# later messages during queue processing.
msgs_sorted = [(m, os.path.getmtime(m)) for m
in new_messages + cur_messages]
msgs_sorted.sort(key=lambda x: x[1])
return iter([m[0] for m in msgs_sorted])
def newMessage(self):
"See :class:`zope.sendmail.interfaces.IMaildir`"
# NOTE: http://www.qmail.org/man/man5/maildir.html says, that the first
# step of the delivery process should be a chdir. Chdirs and
# threading do not mix. Is that chdir really necessary?
join = os.path.join
subdir_tmp = join(self.path, 'tmp')
subdir_new = join(self.path, 'new')
pid = os.getpid()
host = socket.gethostname()
randmax = 0x7fffffff
counter = 0
while True:
timestamp = int(time.time())
unique = '%d.%d.%s.%d' % (timestamp, pid, host,
random.randrange(randmax))
filename = join(subdir_tmp, unique)
try:
fd = os.open(filename, os.O_CREAT | os.O_EXCL | os.O_WRONLY,
0o600)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# File exists
counter += 1
if counter >= 1000:
raise RuntimeError("Failed to create unique file name"
" in %s, are we under a DoS attack?"
% subdir_tmp)
# NOTE: maildir.html (see above) says I should sleep for 2
time.sleep(0.1)
else:
break
return MaildirMessageWriter(os.fdopen(fd, 'wb'), filename,
join(subdir_new, unique))
def _encode_utf8(s):
if isinstance(s, str):
s = s.encode('utf-8')
return s
@implementer(IMaildirMessageWriter)
class MaildirMessageWriter:
"""See :class:`zope.sendmail.interfaces.IMaildirMessageWriter`"""
def __init__(self, fd, filename, new_filename):
self._filename = filename
self._new_filename = new_filename
self._fd = fd
self._finished = False
self._aborted = False
def write(self, data):
self._fd.write(_encode_utf8(data))
def writelines(self, lines):
lines = map(_encode_utf8, lines)
self._fd.writelines(lines)
def close(self):
self._fd.close()
def commit(self):
if self._aborted:
raise RuntimeError('Cannot commit, message already aborted')
elif not self._finished:
self._finished = True
self.close()
os.rename(self._filename, self._new_filename)
# NOTE: the same maildir.html says it should be a link, followed by
# unlink. But Win32 does not necessarily have hardlinks!
def abort(self):
# XXX mgedmin: I think it is dangerous to have an abort() that does
# nothing when commit() already succeeded. But the tests currently
# test that expectation.
if not self._finished:
self._finished = True
self._aborted = True
self.close()
os.unlink(self._filename)
# XXX: should there be a __del__ that calls abort()? | zope.sendmail | /zope.sendmail-6.0.tar.gz/zope.sendmail-6.0/src/zope/sendmail/maildir.py | maildir.py |
===============
API Reference
===============
Interfaces
==========
.. automodule:: zope.sendmail.interfaces
Delivery Implementations
========================
.. automodule:: zope.sendmail.delivery
SMTP Mailer
===========
.. automodule:: zope.sendmail.mailer
Maildir support
===============
.. automodule:: zope.sendmail.maildir
Queued Mail
===========
.. automodule:: zope.sendmail.queue
Vocabulary
==========
.. automodule:: zope.sendmail.vocabulary
ZCML Directives
===============
.. automodule:: zope.sendmail.zcml
| zope.sendmail | /zope.sendmail-6.0.tar.gz/zope.sendmail-6.0/docs/api.rst | api.rst |
from functools import cmp_to_key
from locale import strcoll
def cmp(lhs, rhs):
if lhs is None:
if rhs is None:
return 0
else:
return -1
elif rhs is None:
return 1
else:
return (lhs > rhs) - (rhs > lhs)
class _Smallest:
""" Singleton: sorts below any other value.
"""
__slots__ = ()
def __lt__(self, other):
return True
def __eq__(self, other):
return other is self
def __gt__(self, other):
return False
_Smallest = _Smallest()
def sort(sequence, sort=(), _=None, mapping=0):
"""Return a sorted copy of 'sequence'.
:param sequence: is a sequence of objects to be sorted
:param sort: is a sequence of tuples (key,func,direction)
that define the sort order:
- *key* is the name of an attribute to sort the objects by
- *func* is the name of a comparison function. This parameter is
optional
allowed values:
- "cmp" -- the standard comparison function (default)
- "nocase" -- case-insensitive comparison
- "strcoll" or "locale" -- locale-aware string comparison
- "strcoll_nocase" or "locale_nocase" -- locale-aware case-insensitive
string comparison
- "xxx" -- a user-defined comparison function
- *direction* defines the sort direction for the key (optional).
(allowed values: "asc" (default) , "desc")
"""
need_sortfunc = 0
if sort:
for s in sort:
if len(s) > 1: # extended sort if there is reference to...
# ...comparison function or sort order, even if they are
# "cmp" and "asc"
need_sortfunc = 1
break
sortfields = sort # multi sort = key1,key2
multsort = len(sortfields) > 1 # flag: is multiple sort
if need_sortfunc:
# prepare the list of functions and sort order multipliers
sf_list = make_sortfunctions(sortfields, _)
# clean the mess a bit
if multsort: # More than one sort key.
sortfields = [x[0] for x in sf_list]
else:
sort = sf_list[0][0]
elif sort:
if multsort: # More than one sort key.
sortfields = [x[0] for x in sort]
else:
sort = sort[0][0]
isort = not sort
s = []
for client in sequence:
k = _Smallest
if isinstance(client, tuple) and len(client) == 2:
if isort:
k = client[0]
v = client[1]
else:
if isort:
k = client
v = client
if sort:
if multsort: # More than one sort key.
k = []
for sk in sortfields:
try:
if mapping:
akey = v[sk]
else:
akey = getattr(v, sk)
except (AttributeError, KeyError):
akey = _Smallest
else:
if not isinstance(akey, BASIC_TYPES):
try:
akey = akey()
except BaseException: # pylint:disable=bare-except
pass
k.append(akey)
else: # One sort key.
try:
if mapping:
k = v[sort]
else:
k = getattr(v, sort)
except (AttributeError, KeyError):
k = _Smallest
if not isinstance(k, BASIC_TYPES):
try:
k = k()
except BaseException: # pylint:disable=bare-except
pass
s.append((k, client))
if need_sortfunc:
by = SortBy(multsort, sf_list)
s.sort(key=cmp_to_key(by))
else:
s.sort()
return [x[1] for x in s]
SortEx = sort
BASIC_TYPES = (
str,
bytes,
int,
float,
tuple,
list,
type(None)
)
def nocase(str1, str2):
return cmp(str1.lower(), str2.lower())
def strcoll_nocase(str1, str2):
return strcoll(str1.lower(), str2.lower())
_SORT_FUNCTIONS = {
"cmp": cmp,
"nocase": nocase,
"locale": strcoll,
"strcoll": strcoll,
"locale_nocase": strcoll_nocase,
"strcoll_nocase": strcoll_nocase,
}
def make_sortfunctions(sortfields, _):
"""Accepts a list of sort fields; splits every field, finds comparison
function. Returns a list of 3-tuples (field, cmp_function, asc_multplier)
"""
# pylint:disable=too-many-branches
sf_list = []
for field in sortfields:
info = list(field)
i_len = len(info)
if i_len == 1:
info.append("cmp")
info.append("asc")
elif i_len == 2:
info.append("asc")
elif i_len == 3:
pass
else:
raise SyntaxError(
"sort option: (Key [,sorter_name [,direction]])")
f_name = info[1]
# predefined function?
func = _SORT_FUNCTIONS.get(f_name)
# no - look it up in the namespace
if func is None:
if hasattr(_, 'getitem'):
# support for zope.documenttemplate.dt_util.TemplateDict
func = _.getitem(f_name, 0)
else:
func = _[f_name]
sort_order = info[2].lower()
if sort_order == "asc":
multiplier = +1
elif sort_order == "desc":
multiplier = -1
else:
raise SyntaxError(
"sort direction must be either ASC or DESC")
sf_list.append((info[0], func, multiplier))
return sf_list
class SortBy:
def __init__(self, multsort, sf_list):
self.multsort = multsort
self.sf_list = sf_list
def __call__(self, o1, o2):
n_fields = len(self.sf_list)
if self.multsort:
o1 = o1[0] # if multsort - take the first element (key list)
o2 = o2[0]
req_len = n_fields
else:
req_len = n_fields + 1
# assert that o1 and o2 are tuples of apropriate length
if len(o1) != req_len:
raise ValueError("%s, %d" % (o1, req_len))
if len(o2) != req_len:
raise ValueError("%s, %d" % (o2, req_len))
# now run through the list of functions in sf_list and
# compare every object in o1 and o2
for i in range(n_fields):
# if multsort - we already extracted the key list
# if not multsort - i is 0, and the 0th element is the key
c1, c2 = o1[i], o2[i]
func, multiplier = self.sf_list[i][1:3]
if c1 is _Smallest and c2 is _Smallest:
return 0
if c1 is _Smallest:
return -1
elif c2 is _Smallest:
return 1
n = func(c1, c2)
if n:
return n * multiplier
# all functions returned 0 - identical sequences
return 0 | zope.sequencesort | /zope.sequencesort-5.0-py3-none-any.whl/zope/sequencesort/ssort.py | ssort.py |
=========
CHANGES
=========
4.0.2 (2019-07-11)
==================
- Fix pipetrigger.close() to close the right file descriptor.
(This could've been causing EBADF errors in unrelated places!)
- Add Python 3.7 support.
4.0.1 (2017-10-31)
==================
- Fix Windows compatibility regression introduced in 4.0.0.
See `issue 9 <https://github.com/zopefoundation/zope.server/issues/9>`_.
4.0.0 (2017-10-30)
==================
- Drop Python 2.6 support.
- Add Python 3.4, 3.5, and 3.6 support.
- Add PyPy support.
- Made the HTTPTask not have ``command`` or ``uri`` values of
``"None"`` when the first request line cannot be parsed. Now they
are empty strings.
- Reimplement ``buffers.OverflowableBuffer`` in terms of the standard
library's ``tempfile.SpooledTemporaryFile``. This is much simpler.
See `issue 5 <https://github.com/zopefoundation/zope.server/issues/5>`_.
- Achieve and maintain 100% test coverage.
- Remove all the custom logging implementations in
``zope.server.logging`` and change the ``CommonAccessLogger`` and
``CommonFTPActivityLogger`` to only work with Python standard
library loggers. The standard library supports all the logging
functions this package previously provided. It can be easily configured
with ZConfig. See `issue 4
<https://github.com/zopefoundation/zope.server/issues/4>`_.
3.9.0 (2013-03-13)
==================
- Better adherence to WSGI:
* Call close method if present on iterables returned by
``start_response``.
* Don't include non-string values in the CGI environment
(``CHANNEL_CREATION_TIME``).
* Always include ``QUERY_STRING`` to avoid the cgi module falling back
to ``sys.argv``.
* Add tests based on `paste.lint` middleware.
- Replaced deprecated ``zope.interface.implements`` usage with equivalent
``zope.interface.implementer`` decorator.
- Dropped support for Python 2.4 and 2.5.
- Exceptions that happen in the handler thread main loop are logged so that
the unexpected death of a handler thread does not happen in silence.
3.8.6 (2012-01-07)
==================
- On startup, HTTPServer prints a clickable URL after the hostname/port.
3.8.5 (2011-09-13)
==================
- fixed bug: requests lasting over 15 minutes were sometimes closed
prematurely.
3.8.4 (2011-06-07)
==================
- Fix syntax error in tests on Python < 2.6.
3.8.3 (2011-05-18)
==================
- Made ``start_response`` method of WSGI server implementation more compliant
with spec:
http://www.python.org/dev/peps/pep-0333/#the-start-response-callable
3.8.2 (2010-12-04)
==================
- Corrected license version in ``zope/server/http/tests/test_wsgiserver.py``.
3.8.1 (2010-08-24)
==================
- When the result of a WSGI application was received, ``task.write()`` was
only called once to transmit the data. This prohibited the transmission of
partial results. Now the WSGI server iterates through the result itself
making multiple ``task.write()`` calls, which will cause partial data to be
transmitted.
- Created a second test case instance for the post-mortem WSGI server, so it
is tested as well.
- Using python's ``doctest`` module instead of deprecated
``zope.testing.doctest``.
3.8.0 (2010-08-05)
==================
- Implemented correct server proxy behavior. The HTTP server would always add
a "Server" and "Date" response header to the list of response headers
regardless whether one had been set already. The HTTP 1.1 spec specifies
that a proxy server must not modify the "Server" and "Date" header but add a
"Via" header instead.
3.7.0 (2010-08-01)
==================
- Implemented proxy support. Proxy requests contain a full URIs and the
request parser used to throw that information away. Using
``urlparse.urlsplit()``, all pieces of the URL are recorded.
- The proxy scheme and netloc/hostname are exposed in the WSGI environment as
``zserver.proxy.scheme`` and ``zserver.proxy.host``.
- Made tests runnable via buildout again.
3.6.2 (2010-06-11)
==================
- The log message "Exception during task" is no longer logged to the root
logger but to zope.server.taskthreads.
3.6.1 (2009-10-07)
==================
- Made tests pass with current zope.publisher which restricts redirects to the
current host by default.
3.6.0 (2009-05-27)
==================
- Moved some imports from test modules to their setUp to prevent
failures when ZEO tests are run by the same testrunner
- Removed unused dependency on zope.deprecation.
- Remove old zpkg-related DEPENDENCIES.cfg file.
3.5.0 (2008-03-01)
==================
- Improve package meta-data.
- Fix of 599 error on conflict error in request
see: http://mail.zope.org/pipermail/zope-dev/2008-January/030844.html
- Removed dependency on ZODB.
3.5.0a2 (2007-06-02)
====================
- Made WSGI server really WSGI-compliant by adding variables to the
environment that are required by the spec.
3.5.0a1 (2007-06-02)
====================
- Added a factory and entry point for PasteDeploy.
3.4.3 (2008-08-18)
==================
- Moved some imports from test modules to their setUp to prevent
failures when ZEO tests are run by the same testrunner
3.4.2 (2008-02-02)
==================
- Fix of 599 error on conflict error in request
see: http://mail.zope.org/pipermail/zope-dev/2008-January/030844.html
3.4.1 (2007-06-02)
==================
- Made WSGI server really WSGI-compliant by adding variables to the
environment that are required by the spec.
3.4.0 (2007-06-02)
==================
- Removed an unused import. Unchanged otherwise.
3.4.0a1 (2007-04-22)
====================
- Initial release as a separate project, corresponds to zope.server
from Zope 3.4.0a1
- Made WSGI server really WSGI-compliant by adding variables to the
environment that are required by the spec.
| zope.server | /zope.server-4.0.2.tar.gz/zope.server-4.0.2/CHANGES.rst | CHANGES.rst |
.. image:: https://img.shields.io/pypi/v/zope.server.svg
:target: https://pypi.python.org/pypi/zope.server/
:alt: Latest release
.. image:: https://img.shields.io/pypi/pyversions/zope.server.svg
:target: https://pypi.org/project/zope.server/
:alt: Supported Python versions
.. image:: https://travis-ci.org/zopefoundation/zope.server.svg?branch=master
:target: https://travis-ci.org/zopefoundation/zope.server
.. image:: https://coveralls.io/repos/github/zopefoundation/zope.server/badge.svg?branch=master
:target: https://coveralls.io/github/zopefoundation/zope.server?branch=master
This package contains generic base classes for channel-based servers, the
servers themselves and helper objects, such as tasks and requests.
============
WSGI Support
============
`zope.server`'s HTTP server comes with WSGI_ support.
``zope.server.http.wsgihttpserver.WSGIHTTPServer`` can act as a WSGI gateway.
There's also an entry point for PasteDeploy_ that lets you use zope.server's
WSGI gateway from a configuration file, e.g.::
[server:main]
use = egg:zope.server
host = 127.0.0.1
port = 8080
.. _WSGI: http://www.python.org/dev/peps/pep-0333/
.. _PasteDeploy: http://pythonpaste.org/deploy/
| zope.server | /zope.server-4.0.2.tar.gz/zope.server-4.0.2/README.rst | README.rst |
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
__version__ = '2015-07-01'
# See zc.buildout's changelog if this version is up to date.
tmpeggs = tempfile.mkdtemp(prefix='bootstrap-')
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("--version",
action="store_true", default=False,
help=("Return bootstrap.py version."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
parser.add_option("--buildout-version",
help="Use a specific zc.buildout version")
parser.add_option("--setuptools-version",
help="Use a specific setuptools version")
parser.add_option("--setuptools-to-dir",
help=("Allow for re-use of existing directory of "
"setuptools versions"))
options, args = parser.parse_args()
if options.version:
print("bootstrap.py version %s" % __version__)
sys.exit(0)
######################################################################
# load/install setuptools
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
if os.path.exists('ez_setup.py'):
exec(open('ez_setup.py').read(), ez)
else:
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
# Strip all site-packages directories from sys.path that
# are not sys.prefix; this is because on Windows
# sys.prefix is a site-package directory.
if sitepackage_path != sys.prefix:
sys.path[:] = [x for x in sys.path
if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
if options.setuptools_version is not None:
setup_args['version'] = options.setuptools_version
if options.setuptools_to_dir is not None:
setup_args['to_dir'] = options.setuptools_to_dir
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
# Fix sys.path here as easy_install.pth added before PYTHONPATH
cmd = [sys.executable, '-c',
'import sys; sys.path[0:0] = [%r]; ' % setuptools_path +
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
requirement = 'zc.buildout'
version = options.buildout_version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
try:
return not parsed_version.is_prerelease
except AttributeError:
# Older setuptools
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs) | zope.server | /zope.server-4.0.2.tar.gz/zope.server-4.0.2/bootstrap.py | bootstrap.py |
"""Buffers
"""
from io import BytesIO
import tempfile
# copy_bytes controls the size of temp. strings for shuffling data around.
COPY_BYTES = 1 << 18 # 256K
# The maximum number of bytes to buffer in a simple string.
STRBUF_LIMIT = 8192
class FileBasedBuffer(object):
remain = 0
def __init__(self, file, from_buffer=None):
self.file = file
if from_buffer is not None:
# This code base no longer uses this
# function except tests that are designed
# just to test it.
from_file = from_buffer.getfile()
read_pos = from_file.tell()
from_file.seek(0)
while 1:
data = from_file.read(COPY_BYTES)
if not data:
break
file.write(data)
self.remain = int(file.tell() - read_pos)
from_file.seek(read_pos)
file.seek(read_pos)
def __len__(self):
return self.remain
def append(self, s):
file = self.file
read_pos = file.tell()
file.seek(0, 2)
file.write(s)
file.seek(read_pos)
self.remain = self.remain + len(s)
def get(self, bytes=-1, skip=0):
file = self.file
if not skip:
read_pos = file.tell()
if bytes < 0:
# Read all
res = file.read()
else:
res = file.read(bytes)
if skip:
self.remain -= len(res)
else:
file.seek(read_pos)
return res
def skip(self, bytes, allow_prune=0):
if self.remain < bytes:
raise ValueError("Can't skip %d bytes in buffer of %d bytes" % (
bytes, self.remain))
self.file.seek(bytes, 1)
self.remain = self.remain - bytes
def newfile(self):
raise NotImplementedError()
def prune(self):
file = self.file
if self.remain == 0:
read_pos = file.tell()
file.seek(0, 2)
sz = file.tell()
file.seek(read_pos)
if sz == 0:
# Nothing to prune.
return
nf = self.newfile()
while 1:
data = file.read(COPY_BYTES)
if not data:
break
nf.write(data)
self.file.close()
self.file = nf
def getfile(self):
return self.file
def close(self):
self.file.close()
class TempfileBasedBuffer(FileBasedBuffer):
def __init__(self, from_buffer=None):
FileBasedBuffer.__init__(self, self.newfile(), from_buffer)
def newfile(self):
return tempfile.TemporaryFile(mode='w+b',
suffix='zope_server_buffer.tmp')
class StringIOBasedBuffer(FileBasedBuffer):
def __init__(self, from_buffer=None):
FileBasedBuffer.__init__(self, self.newfile(), from_buffer)
def newfile(self):
return BytesIO()
class OverflowableBuffer(TempfileBasedBuffer):
"""
A buffer based on a :class:`tempfile.SpooledTemporaryFile`,
buffering up to *overflow* (plus some extra) in memory, and
automatically spooling that to disk when exceeded.
.. versionchanged:: 4.0.0
Re-implement in terms of ``SpooledTemporaryFile``.
Internal attributes of this object such as ``overflowed`` and
``strbuf`` no longer exist.
"""
def __init__(self, overflow):
# overflow is the maximum to be stored in a SpooledTemporaryFile
self.overflow = overflow + STRBUF_LIMIT
TempfileBasedBuffer.__init__(self)
def newfile(self):
return tempfile.SpooledTemporaryFile(max_size=self.overflow,
mode='w+b',
suffix='zope_server_buffer.tmp')
def getfile(self):
# Return the underlying file object, not the spooled file
# (despite the _ prefix, this is a documented attribute).
# If we haven't rolled to disk, this will be the StringIO object.
# This improves backwards compatibility for code that assumes
# it can do getfile().getvalue() (which before would work for
# small values)
return self.file._file | zope.server | /zope.server-4.0.2.tar.gz/zope.server-4.0.2/src/zope/server/buffers.py | buffers.py |
"""Dual-mode channel
"""
import asyncore
import socket
from time import time
from zope.server import trigger
from zope.server.adjustments import default_adj
from zope.server.buffers import OverflowableBuffer
# Create the main trigger if it doesn't exist yet.
the_trigger = trigger.trigger()
class DualModeChannel(asyncore.dispatcher, object):
"""Channel that switches between asynchronous and synchronous mode.
Call set_sync() before using a channel in a thread other than
the thread handling the main loop.
Call set_async() to give the channel back to the thread handling
the main loop.
"""
# will_close is set to True to close the socket.
will_close = False
# boolean: async or sync mode
async_mode = True
last_activity = 0
def __init__(self, conn, addr, adj=None):
self.addr = addr
if adj is None:
adj = default_adj
self.adj = adj
self.outbuf = OverflowableBuffer(adj.outbuf_overflow)
self.creation_time = time()
asyncore.dispatcher.__init__(self, conn)
#
# ASYNCHRONOUS METHODS
#
def handle_close(self):
self.close()
def writable(self):
if not self.async_mode:
return 0
return self.will_close or self.outbuf
def handle_write(self):
if not self.async_mode:
return
if self.outbuf:
try:
self._flush_some()
except socket.error:
self.handle_comm_error()
elif self.will_close:
self.close()
self.last_activity = time()
def readable(self):
if not self.async_mode:
return 0
return not self.will_close
def handle_read(self):
if not self.async_mode or self.will_close:
return
try:
data = self.recv(self.adj.recv_bytes)
except socket.error:
self.handle_comm_error()
return
self.last_activity = time()
self.received(data)
def received(self, data):
"""
Override to receive data in async mode.
"""
def handle_comm_error(self):
"""
Designed for handling communication errors that occur
during asynchronous operations *only*. Probably should log
this, but in a different place.
"""
self.handle_error()
def set_sync(self):
"""Switches to synchronous mode.
The main thread will stop calling received().
"""
self.async_mode = False
#
# SYNCHRONOUS METHODS
#
def flush(self, block=True):
"""Sends pending data.
If block is set, this pauses the application. If it is turned
off, only the amount of data that can be sent without blocking
is sent.
"""
if not block:
while self._flush_some():
pass
return
blocked = False
try:
while self.outbuf:
# We propagate errors to the application on purpose.
if not blocked:
self.socket.setblocking(1)
blocked = True
self._flush_some()
finally:
if blocked:
self.socket.setblocking(0)
def set_async(self):
"""Switches to asynchronous mode.
The main thread will begin calling received() again.
"""
self.async_mode = True
self.pull_trigger()
self.last_activity = time()
#
# METHODS USED IN BOTH MODES
#
def write(self, data):
wrote = 0
if isinstance(data, bytes):
if data:
self.outbuf.append(data)
wrote = len(data)
else:
for v in data:
if v:
self.outbuf.append(v)
wrote += len(v)
while len(self.outbuf) >= self.adj.send_bytes:
# Send what we can without blocking.
# We propagate errors to the application on purpose
# (to stop the application if the connection closes).
if not self._flush_some():
break
return wrote
def pull_trigger(self):
"""Wakes up the main loop.
"""
the_trigger.pull_trigger()
def _flush_some(self):
"""Flushes data.
Returns 1 if some data was sent."""
outbuf = self.outbuf
if outbuf and self.connected:
chunk = outbuf.get(self.adj.send_bytes)
num_sent = self.send(chunk)
if num_sent:
outbuf.skip(num_sent, 1)
return 1
return 0
def close_when_done(self):
# Flush all possible.
while self._flush_some():
pass
self.will_close = True
if not self.async_mode:
# For safety, don't close the socket until the
# main thread calls handle_write().
self.async_mode = True
self.pull_trigger()
def close(self):
# Always close in asynchronous mode. If the connection is
# closed in a thread, the main loop can end up with a bad file
# descriptor.
assert self.async_mode
self.connected = False
try:
asyncore.dispatcher.close(self)
except AttributeError: # pragma: no cover (tox coverage environment is python 3)
# On Python 2.7, this is not idempotent. If we were
# already closed (or never fully opened) it will
# raise a AttributeError because it tries to call close()
# on self.socket, which is None
pass | zope.server | /zope.server-4.0.2.tar.gz/zope.server-4.0.2/src/zope/server/dualmodechannel.py | dualmodechannel.py |
import time
import sys
import asyncore
from threading import Lock
from six import reraise
from zope.interface import implementer
from zope.server.dualmodechannel import DualModeChannel
from zope.server.interfaces import IServerChannel, ITask
# task_lock is useful for synchronizing access to task-related attributes.
task_lock = Lock()
@implementer(IServerChannel, ITask)
class ServerChannelBase(DualModeChannel):
"""Base class for a high-performance, mixed-mode server-side channel."""
# See zope.server.interfaces.IServerChannel
parser_class = None # Subclasses must provide a parser class
task_class = None # ... and a task class.
active_channels = {} # Class-specific channel tracker
next_channel_cleanup = [0] # Class-specific cleanup time
proto_request = None # A request parser instance
last_activity = 0 # Time of last activity
tasks = None # List of channel-related tasks to execute
running_tasks = False # True when another thread is running tasks
#
# ASYNCHRONOUS METHODS (including __init__)
#
def __init__(self, server, conn, addr, adj=None):
"""See async.dispatcher"""
DualModeChannel.__init__(self, conn, addr, adj)
self.server = server
self.last_activity = t = self.creation_time
self.check_maintenance(t)
def add_channel(self, map=None):
"""See async.dispatcher
This hook keeps track of opened channels.
"""
DualModeChannel.add_channel(self, map)
self.__class__.active_channels[self._fileno] = self
def del_channel(self, map=None):
"""See async.dispatcher
This hook keeps track of closed channels.
"""
DualModeChannel.del_channel(self, map)
ac = self.__class__.active_channels
fd = self._fileno
if fd in ac:
del ac[fd]
def check_maintenance(self, now):
"""See async.dispatcher
Performs maintenance if necessary.
"""
ncc = self.__class__.next_channel_cleanup
if now < ncc[0]:
return
ncc[0] = now + self.adj.cleanup_interval
self.maintenance()
def maintenance(self):
"""See async.dispatcher
Kills off dead connections.
"""
self.kill_zombies()
def kill_zombies(self):
"""See async.dispatcher
Closes connections that have not had any activity in a while.
The timeout is configured through adj.channel_timeout (seconds).
"""
now = time.time()
cutoff = now - self.adj.channel_timeout
# channel.close calls channel.del_channel, which can change
# the size of the map.
for channel in list(self.active_channels.values()):
if (channel is not self and not channel.running_tasks and
channel.last_activity < cutoff):
channel.close()
def received(self, data):
"""See async.dispatcher
Receives input asynchronously and send requests to
handle_request().
"""
preq = self.proto_request
while data:
if preq is None:
preq = self.parser_class(self.adj)
n = preq.received(data)
if preq.completed:
# The request is ready to use.
self.proto_request = None
if not preq.empty:
self.handle_request(preq)
preq = None
else:
self.proto_request = preq
if n >= len(data):
break
data = data[n:]
def handle_request(self, req):
"""Creates and queues a task for processing a request.
Subclasses may override this method to handle some requests
immediately in the main async thread.
"""
task = self.task_class(self, req)
self.queue_task(task)
def handle_error(self):
"""See async.dispatcher
Handles program errors (not communication errors)
"""
t, v = sys.exc_info()[:2]
if issubclass(t, (SystemExit, KeyboardInterrupt)):
reraise(*sys.exc_info())
asyncore.dispatcher.handle_error(self)
def handle_comm_error(self):
"""See async.dispatcher
Handles communication errors (not program errors)
"""
if self.adj.log_socket_errors:
self.handle_error()
else:
# Ignore socket errors.
self.close()
#
# BOTH MODES
#
def queue_task(self, task):
"""Queue a channel-related task to be executed in another thread."""
start = False
with task_lock:
if self.tasks is None:
self.tasks = []
self.tasks.append(task)
if not self.running_tasks:
self.running_tasks = True
start = True
if start:
self.set_sync()
self.server.addTask(self)
#
# ITask implementation. Delegates to the queued tasks.
#
def service(self):
"""Execute all pending tasks"""
while True:
task = None
with task_lock:
if self.tasks:
task = self.tasks.pop(0)
else:
# No more tasks
self.running_tasks = False
self.set_async()
break
try:
task.service()
except:
# propagate the exception, but keep executing tasks
self.server.addTask(self)
raise
def cancel(self):
"""Cancels all pending tasks"""
with task_lock:
old = () if not self.tasks else list(self.tasks)
self.tasks = []
self.running_tasks = False
try:
for task in old:
task.cancel()
finally:
self.set_async()
def defer(self):
pass
try:
from zope.testing.cleanup import addCleanUp
except ImportError: # pragma: no cover
pass
else:
# Tests are very bad about actually closing
# all the channels that they create. This leads to
# an ever growing active_channels map.
def _clean_active_channels():
for c in list(ServerChannelBase.active_channels.values()):
try:
c.close()
except BaseException: # pragma: no cover
pass
ServerChannelBase.active_channels.clear()
addCleanUp(_clean_active_channels) | zope.server | /zope.server-4.0.2.tar.gz/zope.server-4.0.2/src/zope/server/serverchannelbase.py | serverchannelbase.py |
"""Threaded Task Dispatcher
"""
import logging
import threading
from time import time, sleep
from six.moves.queue import Queue, Empty
from zope.server.interfaces import ITaskDispatcher
from zope.interface import implementer
log = logging.getLogger(__name__)
@implementer(ITaskDispatcher)
class ThreadedTaskDispatcher(object):
"""A Task Dispatcher that creates a thread for each task."""
stop_count = 0 # Number of threads that will stop soon.
def __init__(self):
self.threads = {} # { thread number -> 1 }
self.queue = Queue()
self.thread_mgmt_lock = threading.Lock()
def handlerThread(self, thread_no):
threads = self.threads
try:
while threads.get(thread_no):
task = self.queue.get()
if task is None:
# Special value: kill this thread.
break
try:
task.service()
except:
log.exception('Exception during task')
except:
log.exception('Exception in thread main loop')
finally:
mlock = self.thread_mgmt_lock
with mlock:
self.stop_count -= 1
try:
del threads[thread_no]
except KeyError:
pass
def setThreadCount(self, count):
"""See zope.server.interfaces.ITaskDispatcher"""
mlock = self.thread_mgmt_lock
with mlock:
threads = self.threads
thread_no = 0
running = len(threads) - self.stop_count
while running < count:
# Start threads.
while thread_no in threads:
thread_no = thread_no + 1
threads[thread_no] = 1
running += 1
t = threading.Thread(target=self.handlerThread,
args=(thread_no,),
name='zope.server-%d' % thread_no)
t.setDaemon(True)
t.start()
thread_no = thread_no + 1
if running > count:
# Stop threads.
to_stop = running - count
self.stop_count += to_stop
for _n in range(to_stop):
self.queue.put(None)
running -= 1
def addTask(self, task):
"""See zope.server.interfaces.ITaskDispatcher"""
if task is None:
raise ValueError("No task passed to addTask().")
# assert ITask.providedBy(task)
try:
task.defer()
self.queue.put(task)
except:
task.cancel()
raise
def shutdown(self, cancel_pending=True, timeout=5):
"""See zope.server.interfaces.ITaskDispatcher"""
self.setThreadCount(0)
# Ensure the threads shut down.
threads = self.threads
expiration = time() + timeout
while threads:
if time() >= expiration:
log.error("%d thread(s) still running", len(threads))
break
sleep(0.1)
if cancel_pending:
# Cancel remaining tasks.
try:
queue = self.queue
while not queue.empty():
task = queue.get()
if task is not None:
task.cancel()
except Empty:
pass
def getPendingTasksEstimate(self):
"""See zope.server.interfaces.ITaskDispatcher"""
return self.queue.qsize() | zope.server | /zope.server-4.0.2.tar.gz/zope.server-4.0.2/src/zope/server/taskthreads.py | taskthreads.py |
"""Adjustments are tunable parameters.
"""
import socket
from zope.server import maxsockets
class Adjustments(object):
"""This class contains tunable communication parameters.
You can either change default_adj to adjust parameters for
all sockets, or you can create a new instance of this class,
change its attributes, and pass it to the channel constructors.
"""
# backlog is the argument to pass to socket.listen().
backlog = 1024
# recv_bytes is the argument to pass to socket.recv().
recv_bytes = 8192
# send_bytes is the number of bytes to send to socket.send().
# Multiples of 9000 should avoid partly-filled packets, but don't
# set this larger than the TCP write buffer size. In Linux,
# /proc/sys/net/ipv4/tcp_wmem controls the minimum, default, and
# maximum sizes of TCP write buffers.
send_bytes = 9000
# copy_bytes is the number of bytes to copy from one file to another.
copy_bytes = 65536
# Create a tempfile if the pending output data gets larger
# than outbuf_overflow. With RAM so cheap, this probably
# ought to be set to the 16-32 MB range (circa 2001) for
# good performance with big transfers. The default is
# conservative.
outbuf_overflow = 1050000
# Create a tempfile if the data received gets larger
# than inbuf_overflow.
inbuf_overflow = 525000
# Stop accepting new connections if too many are already active.
connection_limit = maxsockets.max_select_sockets() - 3 # Safe
# Minimum seconds between cleaning up inactive channels.
cleanup_interval = 300
# Maximum seconds to leave an inactive connection open.
channel_timeout = 900
# Boolean: turn off to not log premature client disconnects.
log_socket_errors = 1
# The socket options to set on receiving a connection.
# It is a list of (level, optname, value) tuples.
# TCP_NODELAY is probably good for Zope, since Zope buffers
# data itself.
socket_options = [
(socket.SOL_TCP, socket.TCP_NODELAY, 1),
]
default_adj = Adjustments() | zope.server | /zope.server-4.0.2.tar.gz/zope.server-4.0.2/src/zope/server/adjustments.py | adjustments.py |
import asyncore
import logging
import socket
from zope.server.adjustments import default_adj
from zope.server.interfaces import IServer
from zope.interface import implementer
@implementer(IServer)
class ServerBase(asyncore.dispatcher, object):
"""Async. server base for launching derivatives of ServerChannelBase."""
# See zope.server.interfaces.IServer
channel_class = None # Override with a channel class.
SERVER_IDENT = 'zope.server.serverbase' # Override.
def __init__(self, ip, port, task_dispatcher=None, adj=None, start=1,
hit_log=None, verbose=0):
if adj is None:
adj = default_adj
self.adj = adj
asyncore.dispatcher.__init__(self)
self.port = port
self.task_dispatcher = task_dispatcher
self.verbose = verbose
self.hit_log = hit_log
self.logger = logging.getLogger(self.__class__.__name__)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.set_reuse_addr()
self.bind((ip, port))
self.server_name = self.computeServerName(ip)
if start:
self.accept_connections()
except BaseException:
self.close()
raise
level_mapping = {
'info': logging.INFO,
'error': logging.ERROR,
'warning': logging.WARN,
}
def log_info(self, message, type='info'):
"""See zope.server.interfaces.IDispatcherLogging"""
self.logger.log(self.level_mapping.get(type, logging.INFO), message)
log = log_info
def computeServerName(self, ip=''):
"""Given an IP, try to determine the server name."""
if ip:
server_name = str(ip)
else:
server_name = str(socket.gethostname())
# Convert to a host name if necessary.
is_hostname = 0
for c in server_name:
# XXX: What about ipv6?
if c != '.' and not c.isdigit():
is_hostname = 1
break
if not is_hostname:
if self.verbose:
self.log_info('Computing hostname', 'info')
try:
server_name = socket.gethostbyaddr(server_name)[0]
except socket.error:
if self.verbose:
self.log_info('Cannot do reverse lookup', 'info')
return server_name
def accept_connections(self):
self.accepting = 1
self.socket.listen(self.adj.backlog) # Circumvent asyncore's NT limit
if self.verbose:
self.log_info('%s started.\n'
'\tHostname: %s\n\tPort: %d%s' % (
self.SERVER_IDENT,
self.server_name,
self.port,
self.getExtraLogMessage()
))
def getExtraLogMessage(self):
r"""Additional information to be logged on startup.
If not empty, should start with '\n\t', and every line break should
be followed by a '\t'.
"""
return ''
def addTask(self, task):
"""See zope.server.interfaces.ITaskDispatcher"""
td = self.task_dispatcher
if td is not None:
td.addTask(task)
else:
task.service()
def readable(self):
"""See zope.server.interfaces.IDispatcher"""
return (self.accepting and
len(asyncore.socket_map) < self.adj.connection_limit)
def writable(self):
"""See zope.server.interfaces.IDispatcher"""
return 0
def handle_read(self):
"""See zope.server.interfaces.IDispatcherEventHandler"""
def handle_connect(self):
"""See zope.server.interfaces.IDispatcherEventHandler"""
def handle_accept(self):
"""See zope.server.interfaces.IDispatcherEventHandler"""
try:
v = self.accept()
if v is None:
return
conn, addr = v
except socket.error:
# Linux: On rare occasions we get a bogus socket back from
# accept. socketmodule.c:makesockaddr complains that the
# address family is unknown. We don't want the whole server
# to shut down because of this.
if self.adj.log_socket_errors:
self.log_info('warning: server accept() threw an exception',
'warning')
return
for (level, optname, value) in self.adj.socket_options:
conn.setsockopt(level, optname, value)
self.channel_class(self, conn, addr, self.adj) | zope.server | /zope.server-4.0.2.tar.gz/zope.server-4.0.2/src/zope/server/serverbase.py | serverbase.py |
from __future__ import print_function
import asyncore
import os
import socket
import struct
from threading import Lock
import errno
_ADDRESS_MASK = 256 ** struct.calcsize('P')
def positive_id(obj):
"""Return id(obj) as a non-negative integer."""
# Note that the output depends on the size of void* on the platform.
result = id(obj)
if result < 0:
result += _ADDRESS_MASK
assert result > 0
return result
# Original comments follow; they're hard to follow in the context of
# ZEO's use of triggers. TODO: rewrite from a ZEO perspective.
# Wake up a call to select() running in the main thread.
#
# This is useful in a context where you are using Medusa's I/O
# subsystem to deliver data, but the data is generated by another
# thread. Normally, if Medusa is in the middle of a call to
# select(), new output data generated by another thread will have
# to sit until the call to select() either times out or returns.
# If the trigger is 'pulled' by another thread, it should immediately
# generate a READ event on the trigger object, which will force the
# select() invocation to return.
#
# A common use for this facility: letting Medusa manage I/O for a
# large number of connections; but routing each request through a
# thread chosen from a fixed-size thread pool. When a thread is
# acquired, a transaction is performed, but output data is
# accumulated into buffers that will be emptied more efficiently
# by Medusa. [picture a server that can process database queries
# rapidly, but doesn't want to tie up threads waiting to send data
# to low-bandwidth connections]
#
# The other major feature provided by this class is the ability to
# move work back into the main thread: if you call pull_trigger()
# with a thunk argument, when select() wakes up and receives the
# event it will call your thunk from within that thread. The main
# purpose of this is to remove the need to wrap thread locks around
# Medusa's data structures, which normally do not need them. [To see
# why this is true, imagine this scenario: A thread tries to push some
# new data onto a channel's outgoing data queue at the same time that
# the main thread is trying to remove some]
class _triggerbase(object):
"""OS-independent base class for OS-dependent trigger class."""
kind = None # subclass must set to "pipe" or "loopback"; used by repr
def __init__(self):
self._closed = False
# `lock` protects the `thunks` list from being traversed and
# appended to simultaneously.
self.lock = Lock()
# List of no-argument callbacks to invoke when the trigger is
# pulled. These run in the thread running the asyncore mainloop,
# regardless of which thread pulls the trigger.
self.thunks = []
def readable(self):
return 1
def writable(self):
return 0
def handle_connect(self):
pass
def handle_close(self):
self.close()
# Override the asyncore close() method, because it doesn't know about
# (so can't close) all the gimmicks we have open. Subclass must
# supply a _close() method to do platform-specific closing work. _close()
# will be called iff we're not already closed.
def close(self):
if not self._closed:
self._closed = True
self.del_channel()
self._close() # subclass does OS-specific stuff
def _close(self): # see close() above; subclass must supply
raise NotImplementedError
def pull_trigger(self, thunk=None):
if thunk:
with self.lock:
self.thunks.append(thunk)
self._physical_pull()
# Subclass must supply _physical_pull, which does whatever the OS
# needs to do to provoke the "write" end of the trigger.
def _physical_pull(self):
raise NotImplementedError
def handle_read(self):
try:
self.recv(8192)
except socket.error:
return
with self.lock:
for thunk in self.thunks:
try:
thunk()
except:
_nil, t, v, tbinfo = asyncore.compact_traceback()
print('exception in trigger thunk:'
' (%s:%s %s)' % (t, v, tbinfo))
self.thunks = []
def __repr__(self):
return '<select-trigger (%s) at %x>' % (self.kind, positive_id(self))
if hasattr(asyncore, 'file_dispatcher'):
# asyncore.file_dispatcher does not exist on Windows
class pipetrigger(_triggerbase, asyncore.file_dispatcher):
kind = "pipe"
def __init__(self):
_triggerbase.__init__(self)
r, self.trigger = os.pipe()
asyncore.file_dispatcher.__init__(self, r)
if self.socket.fd != r:
# Starting in Python 2.6, the descriptor passed to
# file_dispatcher gets duped and assigned to
# self.socket.fd. This breaks the instantiation semantics and
# is a bug imo. I doubt it will get fixed, but maybe
# it will. Who knows. For that reason, we test for the
# fd changing rather than just checking the Python version.
os.close(r)
def _close(self):
if self.socket is not None:
self.socket.close()
self.socket = None
if self.trigger is not None:
os.close(self.trigger)
self.trigger = None
def _physical_pull(self):
os.write(self.trigger, b'x')
class BindError(Exception):
pass
class sockettrigger(_triggerbase, asyncore.dispatcher):
# Windows version; uses just sockets, because a pipe isn't select'able
# on Windows.
kind = "loopback"
ADDR_IN_USE_CODES = (getattr(errno, 'EADDRINUSE', -1),
getattr(errno, 'WSAEADDRINUSE', -1))
def __init__(self):
_triggerbase.__init__(self)
# Get a pair of connected sockets. The trigger is the 'w'
# end of the pair, which is connected to 'r'. 'r' is put
# in the asyncore socket map. "pulling the trigger" then
# means writing something on w, which will wake up r.
w = socket.socket()
# Disable buffering -- pulling the trigger sends 1 byte,
# and we want that sent immediately, to wake up asyncore's
# select() ASAP.
w.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
count = 0
while 1:
count += 1
# Bind to a local port; for efficiency, let the OS pick
# a free port for us.
# Unfortunately, stress tests showed that we may not
# be able to connect to that port ("Address already in
# use") despite that the OS picked it. This appears
# to be a race bug in the Windows socket implementation.
# So we loop until a connect() succeeds (almost always
# on the first try). See the long thread at
# http://mail.zope.org/pipermail/zope/2005-July/160433.html
# for hideous details.
a = socket.socket()
a.bind(("127.0.0.1", 0))
connect_address = a.getsockname() # assigned (host, port) pair
a.listen(1)
try:
self._connect_client(w, connect_address)
break # success
except socket.error as detail:
if detail.args[0] not in self.ADDR_IN_USE_CODES:
# "Address already in use" is the only error
# I've seen on two WinXP Pro SP2 boxes, under
# Pythons 2.3.5 and 2.4.1.
# (Original commit: https://github.com/zopefoundation/ZEO/commit/c4f736a78ca6713fc3dec21f8aa1fa6f144dd82f)
a.close()
w.close()
raise
# (10048, 'Address already in use')
# assert count <= 2 # never triggered in Tim's tests
if count >= 10: # I've never seen it go above 2
a.close()
w.close()
raise BindError("Cannot bind trigger!")
# Close `a` and try again. Note: I originally put a short
# sleep() here, but it didn't appear to help or hurt.
a.close()
r, addr = a.accept() # r becomes asyncore's (self.)socket
a.close()
self.trigger = w
asyncore.dispatcher.__init__(self, r)
def _connect_client(self, w, connect_address):
w.connect(connect_address)
def _close(self):
# self.socket is r, and self.trigger is w, from __init__
self.socket.close()
self.trigger.close()
def _physical_pull(self):
self.trigger.send(b'x')
if os.name == 'posix':
trigger = pipetrigger
else: # pragma: no cover
trigger = sockettrigger | zope.server | /zope.server-4.0.2.tar.gz/zope.server-4.0.2/src/zope/server/trigger.py | trigger.py |
"""FTP server specific interfaces.
"""
from zope.interface import Interface
class IFTPCommandHandler(Interface):
"""This interface defines all the FTP commands that are supported by the
server.
Every command takes the command line as first arguments, since it is
responsible
"""
def cmd_abor(args):
"""Abort operation. No read access required.
"""
def cmd_appe(args):
"""Append to a file. Write access required.
"""
def cmd_cdup(args):
"""Change to parent of current working directory.
"""
def cmd_cwd(args):
"""Change working directory.
"""
def cmd_dele(args):
"""Delete a file. Write access required.
"""
def cmd_help(args):
"""Give help information. No read access required.
"""
def cmd_list(args):
"""Give list files in a directory or displays the info of one file.
"""
def cmd_mdtm(args):
"""Show last modification time of file.
Example output: 213 19960301204320
Geez, there seems to be a second syntax for this fiel, where one
can also set the modification time using:
MDTM datestring pathname
"""
def cmd_mkd(args):
"""Make a directory. Write access required.
"""
def cmd_mode(args):
"""Set file transfer mode. No read access required. Obselete.
"""
def cmd_nlst(args):
"""Give name list of files in directory.
"""
def cmd_noop(args):
"""Do nothing. No read access required.
"""
def cmd_pass(args):
"""Specify password.
"""
def cmd_pasv(args):
"""Prepare for server-to-server transfer. No read access required.
"""
def cmd_port(args):
"""Specify data connection port. No read access required.
"""
def cmd_pwd(args):
"""Print the current working directory.
"""
def cmd_quit(args):
"""Terminate session. No read access required.
"""
def cmd_rest(args):
"""Restart incomplete transfer.
"""
def cmd_retr(args):
"""Retrieve a file.
"""
def cmd_rmd(args):
"""Remove a directory. Write access required.
"""
def cmd_rnfr(args):
"""Specify rename-from file name. Write access required.
"""
def cmd_rnto(args):
"""Specify rename-to file name. Write access required.
"""
def cmd_size(args):
"""Return size of file.
"""
def cmd_stat(args):
"""Return status of server. No read access required.
"""
def cmd_stor(args):
"""Store a file. Write access required.
"""
def cmd_stru(args):
"""Set file transfer structure. Obselete."""
def cmd_syst(args):
"""Show operating system type of server system.
No read access required.
Replying to this command is of questionable utility,
because this server does not behave in a predictable way
w.r.t. the output of the LIST command. We emulate Unix ls
output, but on win32 the pathname can contain drive
information at the front Currently, the combination of
ensuring that os.sep == '/' and removing the leading slash
when necessary seems to work. [cd'ing to another drive
also works]
This is how wuftpd responds, and is probably the most
expected. The main purpose of this reply is so that the
client knows to expect Unix ls-style LIST output.
one disadvantage to this is that some client programs
assume they can pass args to /bin/ls. a few typical
responses:
215 UNIX Type: L8 (wuftpd)
215 Windows_NT version 3.51
215 VMS MultiNet V3.3
500 'SYST': command not understood. (SVR4)
"""
def cmd_type(args):
"""Specify data transfer type. No read access required.
"""
def cmd_user(args):
"""Specify user name. No read access required.
"""
# this is the command list from the wuftpd man page
# '!' requires write access
#
not_implemented_commands = {
'acct': 'specify account (ignored)',
'allo': 'allocate storage (vacuously)',
'site': 'non-standard commands (see next section)',
'stou': 'store a file with a unique name', #!
'xcup': 'change to parent of current working directory (deprecated)',
'xcwd': 'change working directory (deprecated)',
'xmkd': 'make a directory (deprecated)', #!
'xpwd': 'print the current working directory (deprecated)',
'xrmd': 'remove a directory (deprecated)', #!
}
class IFileSystemAccess(Interface):
"""Provides authenticated access to a filesystem."""
def authenticate(credentials):
"""Verifies filesystem access based on the presented credentials.
Should raise zope.security.interfaces.Unauthorized if the user can
not be authenticated.
This method checks only general access and is not used for each
call to open(). Rather, open() should do its own verification.
Credentials are passed as (username, password) tuples.
"""
def open(credentials):
"""Returns an IFileSystem.
Should raise zope.security.interfaces.Unauthorized if the user
can not be authenticated.
Credentials are passed as (username, password) tuples.
"""
class IFileSystem(Interface):
"""An abstract filesystem.
Opening files for reading, and listing directories, should
return a producer.
All paths are POSIX paths, even when run on Windows,
which mainly means that FS implementations always expect forward
slashes, and filenames are case-sensitive.
`IFileSystem`, in generel, could be created many times per
request. Thus it is not advisable to store state in them. However, if
you have a special kind of `IFileSystemAccess` object that somhow
manages an `IFileSystem` for each set of credentials, then it would be
possible to store some state on this obejct.
"""
def type(path):
"""Return the file type at `path`.
The return valie is 'd', for a directory, 'f', for a file, and
None if there is no file at `path`.
This method doesn't raise exceptions.
"""
def names(path, filter=None):
"""Return a sequence of the names in a directory.
If `filter` is not None, include only those names for which
`filter` returns a true value.
"""
def ls(path, filter=None):
"""Return a sequence of information objects.
Returm item info objects (see the ls_info operation) for the files
in a directory.
If `filter` is not None, include only those names for which
`filter` returns a true value.
"""
def readfile(path, outstream, start=0, end=None):
"""Outputs the file at `path` to a stream.
Data are copied starting from `start`. If `end` is not None,
data are copied up to `end`.
"""
def lsinfo(path):
"""Return information for a unix-style ls listing for `path`.
Information is returned as a dictionary containing the following keys:
type
The path type, either 'd' or 'f'.
owner_name
Defaults to "na". Must not include spaces.
owner_readable
Defaults to True.
owner_writable
Defaults to True.
owner_executable
Defaults to True for directories and False otherwise.
group_name
Defaults to "na". Must not include spaces.
group_readable
Defaults to True.
group_writable
Defaults to True.
group_executable
Defaults to True for directories and False otherwise.
other_readable
Defaults to False.
other_writable
Defaults to False.
other_executable
Defaults to True for directories and false otherwise.
mtime
Optional time, as a datetime.datetime object.
nlinks
The number of links. Defaults to 1.
size
The file size. Defaults to 0.
name
The file name.
"""
def mtime(path):
"""Return the modification time for the file at `path`.
This method returns the modification time. It is assumed that the path
exists. You can use the `type(path)` method to determine whether
`path` points to a valid file.
If the modification time is unknown, then return `None`.
"""
def size(path):
"""Return the size of the file at path.
This method returns the modification time. It is assumed that the path
exists. You can use the `type(path)` method to determine whether
`path` points to a valid file.
"""
def mkdir(path):
"""Create a directory.
If it is not possible or allowed to create the directory, an `OSError`
should be raised describing the reason of failure.
"""
def remove(path):
"""Remove a file. Same as unlink.
If it is not possible or allowed to remove the file, an `OSError`
should be raised describing the reason of failure.
"""
def rmdir(path):
"""Remove a directory.
If it is not possible or allowed to remove the directory, an `OSError`
should be raised describing the reason of failure.
"""
def rename(old, new):
"""Rename a file or directory."""
def writefile(path, instream, start=None, end=None, append=False):
"""Write data to a file.
Both `start` and `end` must be either None or a non-negative
integer.
If `append` is true, `start` and `end` are ignored.
If `start` or `end` is not None, they specify the part of the
file that is to be written.
If `end` is None, the file is truncated after the data are
written. If `end` is not None, any parts of the file after
`end` are left unchanged.
Note that if `end` is not `None`, and there is not enough data
in the `instream` it will fill the file up to `end`, then the missing
data are undefined.
If both `start` is `None` and `end` is `None`, then the file contents
are overwritten.
If `start` is specified and the file doesn't exist or is shorter
than `start`, the data in the file before `start` file will be
undefined.
If you do not want to handle incorrect starting and ending indices,
you can also raise an `IOError`, which will be properly handled by the
server.
"""
def writable(path):
"""Return boolean indicating whether a file at path is writable.
Note that a true value should be returned if the file doesn't
exist but its directory is writable.
""" | zope.server | /zope.server-4.0.2.tar.gz/zope.server-4.0.2/src/zope/server/interfaces/ftp.py | ftp.py |
"""Server interfaces.
"""
from zope.interface import Interface
from zope.interface import Attribute
class ISocket(Interface):
"""Represents a socket.
Note: Most of this documentation is taken from the Python Library
Reference.
"""
def listen(backlog):
"""Listen for connections made to the socket.
The 'backlog' argument specifies the maximum number of queued
connections and should be at least 1; the maximum value is
system-dependent (usually 5).
"""
def bind(addr):
"""Bind the socket to address.
The socket must not already be bound.
"""
def connect(address):
"""Connect to a remote socket at address."""
def accept():
"""Accept a connection.
The socket must be bound to an address and listening for
connections. The return value is a pair (conn, address) where conn is
a new socket object usable to send and receive data on the connection,
and address is the address bound to the socket on the other end of the
connection.
"""
def recv(buffer_size):
"""Receive data from the socket.
The return value is a string representing the data received. The
maximum amount of data to be received at once is specified by
bufsize. See the Unix manual page recv(2) for the meaning of the
optional argument flags; it defaults to zero.
"""
def send(data):
"""Send data to the socket.
The socket must be connected to a remote socket. The optional flags
argument has the same meaning as for recv() above. Returns the number
of bytes sent. Applications are responsible for checking that all data
has been sent; if only some of the data was transmitted, the
application needs to attempt delivery of the remaining data.
"""
def close():
"""Close the socket.
All future operations on the socket object will fail. The remote end
will receive no more data (after queued data is flushed). Sockets are
automatically closed when they are garbage-collected.
"""
class ITaskDispatcher(Interface):
"""An object that accepts tasks and dispatches them to threads.
"""
def setThreadCount(count):
"""Sets the number of handler threads.
"""
def addTask(task):
"""Receives a task and dispatches it to a thread.
Note that, depending on load, a task may have to wait a
while for its turn.
"""
def shutdown(cancel_pending=True, timeout=5):
"""Shuts down all handler threads and may cancel pending tasks.
"""
def getPendingTasksEstimate():
"""Returns an estimate of the number of tasks waiting to be serviced.
This method may be useful for monitoring purposes. If the
number of pending tasks is continually climbing, your server
is becoming overloaded and the operator should be notified.
"""
class ITask(Interface):
"""
The interface expected of an object placed in the queue of
a ThreadedTaskDispatcher. Provides facilities for executing
or canceling the task.
"""
def service():
"""
Services the task. Either service() or cancel() is called
for every task queued.
"""
def cancel():
"""
Called instead of service() during shutdown or if an
exception occurs that prevents the task from being
serviced. Must return quickly and should not throw exceptions.
"""
def defer():
"""
Called just before the task is queued to be executed in
a different thread.
"""
class IDispatcherEventHandler(Interface):
"""The Dispatcher can receive several different types of events. This
interface describes the necessary methods that handle these common
event types.
"""
def handle_read_event():
"""Given a read event, a server has to handle the event and
read the input from the client.
"""
def handle_write_event():
"""Given a write event, a server has to handle the event and
write the output to the client.
"""
def handle_expt_event():
"""An exception event was handed to the server.
"""
def handle_error():
"""An error occurred, but we are still trying to fix it.
"""
def handle_expt():
"""Handle unhandled exceptions. This is usually a time to log.
"""
def handle_read():
"""Read output from client.
"""
def handle_write():
"""Write output via the socket to the client.
"""
def handle_connect():
"""A client requests a connection, now we need to do soemthing.
"""
def handle_accept():
"""A connection is accepted.
"""
def handle_close():
"""A connection is being closed.
"""
class IStreamConsumer(Interface):
"""Consumes a data stream until reaching a completion point.
The actual amount to be consumed might not be known ahead of time.
"""
def received(data):
"""Accepts data, returning the number of bytes consumed."""
completed = Attribute(
'completed', 'Set to a true value when finished consuming data.')
class IServer(Interface):
"""This interface describes the basic base server.
The most unusual part about the Zope servers (since they all
implement this interface or inherit its base class) is that it
uses a mix of asynchronous and thread-based mechanism to
serve. While the low-level socket listener uses async, the
actual request is executed in a thread. This is important
because even if a request takes a long time to process, the
server can service other requests simultaneously.
"""
channel_class = Attribute("""
The channel class defines the type of channel
to be used by the server. See IServerChannel
for more information.
""")
SERVER_IDENT = Attribute("""
This string identifies the server. By default
this is 'zope.server.' and should be
overridden.
""")
class IDispatcherLogging(Interface):
"""This interface provides methods through which the Dispatcher will
write its logs. A distinction is made between hit and message logging,
since they often go to different output types and can have very
different structure.
"""
def log (message):
"""Logs general requests made to the server.
"""
def log_info(message, type='info'):
"""Logs informational messages, warnings and errors.
"""
class IServerChannel(Interface):
parser_class = Attribute("""Subclasses must provide a parser class""")
task_class = Attribute("""Specifies the ITask class to be used for
generating tasks.""")
def queue_task(task):
"""Queues a channel-related task to be processed in sequence.
"""
class IDispatcher(ISocket, IDispatcherEventHandler, IDispatcherLogging):
"""The dispatcher is the most low-level component of a server.
1. It manages the socket connections and distributes the
request to the appropriate channel.
2. It handles the events passed to it, such as reading input,
writing output and handling errors. More about this
functionality can be found in IDispatcherEventHandler.
3. It handles logging of the requests passed to the server as
well as other informational messages and erros. Please see
IDispatcherLogging for more details.
Note: Most of this documentation is taken from the Python
Library Reference.
"""
def add_channel(map=None):
"""After the low-level socket connection negotiation is
completed, a channel is created that handles all requests
and responses until the end of the connection.
"""
def del_channel(map=None):
"""Delete a channel. This should include also closing the
socket to the client.
"""
def create_socket(family, type):
"""This is identical to the creation of a normal socket, and
will use the same options for creation. Refer to the socket
documentation for information on creating sockets.
"""
def readable():
"""Each time through the select() loop, the set of sockets is
scanned, and this method is called to see if there is any
interest in reading. The default method simply returns 1,
indicating that by default, all channels will be
interested.
"""
def writable():
"""Each time through the select() loop, the set of sockets is
scanned, and this method is called to see if there is any
interest in writing. The default method simply returns 1,
indicating that by default, all channels will be
interested.
""" | zope.server | /zope.server-4.0.2.tar.gz/zope.server-4.0.2/src/zope/server/interfaces/__init__.py | __init__.py |
"""WSGI-compliant HTTP Server that uses the Zope Publisher for executing a task.
"""
import asyncore
import re
import sys
from contextlib import closing
import six
import zope.security.management
from zope.server.http.httpserver import HTTPServer
from zope.server.taskthreads import ThreadedTaskDispatcher
def fakeWrite(body):
raise NotImplementedError(
"Zope 3's HTTP Server does not support the WSGI write() function.")
def curriedStartResponse(task):
def start_response(status, headers, exc_info=None):
if task.wroteResponseHeader() and not exc_info:
raise AssertionError("start_response called a second time "
"without providing exc_info.")
if exc_info:
try:
if task.wroteResponseHeader():
# higher levels will catch and handle raised exception:
# 1. "service" method in httptask.py
# 2. "service" method in severchannelbase.py
# 3. "handlerThread" method in taskthreads.py
six.reraise(*exc_info)
else:
# As per WSGI spec existing headers must be cleared
task.accumulated_headers = None
task.response_headers = {}
finally:
exc_info = None
# Prepare the headers for output
status, reason = re.match('([0-9]*) (.*)', status).groups()
task.setResponseStatus(status, reason)
task.appendResponseHeaders(['%s: %s' % i for i in headers])
# Return the write method used to write the response data.
return fakeWrite
return start_response
class WSGIHTTPServer(HTTPServer):
"""Zope Publisher-specific WSGI-compliant HTTP Server"""
application = None
def __init__(self, application, sub_protocol=None, *args, **kw):
if sys.platform[:3] == "win" and args[:1] == ('localhost',): # pragma: no cover
args = ('',) + args[1:]
self.application = application
if sub_protocol:
self.SERVER_IDENT += ' (%s)' % str(sub_protocol)
HTTPServer.__init__(self, *args, **kw)
@classmethod
def _constructWSGIEnvironment(cls, task):
env = task.getCGIEnvironment()
# deduce the URL scheme (http or https)
if (env.get('HTTPS', '').lower() == "on" or
env.get('SERVER_PORT_SECURE') == "1"):
protocol = 'https'
else:
protocol = 'http'
# the following environment variables are required by the WSGI spec
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = protocol
env['wsgi.errors'] = sys.stderr # apps should use the logging module
env['wsgi.multithread'] = True
env['wsgi.multiprocess'] = True
env['wsgi.run_once'] = False
env['wsgi.input'] = task.request_data.getBodyStream()
# Add some proprietary proxy information.
# Note: Derived request parsers might not have these new attributes,
# so fail gracefully.
try:
env['zserver.proxy.scheme'] = task.request_data.proxy_scheme
env['zserver.proxy.host'] = task.request_data.proxy_netloc
except AttributeError:
pass
return env
def executeRequest(self, task):
"""Overrides HTTPServer.executeRequest()."""
env = self._constructWSGIEnvironment(task)
# Call the application to handle the request and write a response
result = self.application(env, curriedStartResponse(task))
# By iterating manually at this point, we execute task.write()
# multiple times, allowing partial data to be sent.
try:
for value in result:
task.write(value)
finally:
if hasattr(result, "close"):
result.close()
class PMDBWSGIHTTPServer(WSGIHTTPServer):
"""Enter the post-mortem debugger when there's an error"""
def executeRequest(self, task):
"""Overrides HTTPServer.executeRequest()."""
env = self._constructWSGIEnvironment(task)
env['wsgi.handleErrors'] = False
# Call the application to handle the request and write a response
result = None
try:
result = self.application(env, curriedStartResponse(task))
# By iterating manually at this point, we execute task.write()
# multiple times, allowing partial data to be sent.
for value in result:
task.write(value)
except:
self.post_mortem(sys.exc_info())
finally:
if hasattr(result, "close"):
result.close()
@classmethod
def post_mortem(cls, exc_info):
import pdb
print("%s:" % exc_info[0])
print(exc_info[1])
zope.security.management.restoreInteraction()
try:
pdb.post_mortem(exc_info[2])
six.reraise(*exc_info)
finally:
del exc_info
zope.security.management.endInteraction()
def run_paste(wsgi_app, global_conf, name='zope.server.http',
host='127.0.0.1', port=8080, threads=4):
port = int(port)
threads = int(threads)
task_dispatcher = ThreadedTaskDispatcher()
task_dispatcher.setThreadCount(threads)
with closing(WSGIHTTPServer(wsgi_app, name, host, port,
task_dispatcher=task_dispatcher)):
asyncore.loop() | zope.server | /zope.server-4.0.2.tar.gz/zope.server-4.0.2/src/zope/server/http/wsgihttpserver.py | wsgihttpserver.py |
from zope.server.utilities import find_double_newline
from zope.server.interfaces import IStreamConsumer
from zope.interface import implementer
@implementer(IStreamConsumer)
class ChunkedReceiver(object):
# Here's the production for a chunk:
# (http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html)
# chunk = chunk-size [ chunk-extension ] CRLF
# chunk-data CRLF
# chunk-size = 1*HEX
# chunk-extension= *( ";" chunk-ext-name [ "=" chunk-ext-val ] )
# chunk-ext-name = token
# chunk-ext-val = token | quoted-string
# This implementation is quite lax on what it will accept, and is
# probably even vulnerable to malicious input (denial of service due to
# space exhaustion) on carefully crafted badly formed chunk
# control lines.
chunk_remainder = 0
control_line = b''
all_chunks_received = 0
trailer = b''
completed = 0
# max_control_line = 1024
# max_trailer = 65536
def __init__(self, buf):
self.buf = buf
def received(self, s):
# Returns the number of bytes consumed.
if self.completed:
return 0
orig_size = len(s)
while s:
rm = self.chunk_remainder
if rm > 0:
# Receive the remainder of a chunk.
to_write = s[:rm]
self.buf.append(to_write)
written = len(to_write)
s = s[written:]
self.chunk_remainder -= written
elif not self.all_chunks_received:
# Receive a control line.
s = self.control_line + s
pos = s.find(b'\n')
if pos < 0:
# Control line not finished.
self.control_line = s
s = b''
else:
# Control line finished.
line = s[:pos]
s = s[pos + 1:]
self.control_line = b''
line = line.strip()
if line:
# Begin a new chunk.
semi = line.find(b';')
if semi >= 0:
# discard extension info.
line = line[:semi]
sz = int(line.strip(), 16) # hexadecimal
if sz > 0:
# Start a new chunk.
self.chunk_remainder = sz
else:
# Finished chunks.
self.all_chunks_received = 1
# else expect a control line.
else:
# Receive the trailer.
trailer = self.trailer + s
if trailer.startswith(b'\r\n'):
# No trailer.
self.completed = 1
return orig_size - (len(trailer) - 2)
if trailer.startswith(b'\n'):
# No trailer.
self.completed = 1
return orig_size - (len(trailer) - 1)
pos = find_double_newline(trailer)
if pos < 0:
# Trailer not finished.
self.trailer = trailer
s = b''
else:
# Finished the trailer.
self.completed = 1
self.trailer = trailer[:pos]
return orig_size - (len(trailer) - pos)
return orig_size
def getfile(self):
return self.buf.getfile() | zope.server | /zope.server-4.0.2.tar.gz/zope.server-4.0.2/src/zope/server/http/chunking.py | chunking.py |
"""HTTP Server Date/Time utilities
"""
import re
import time
import calendar
def concat(*args):
return ''.join(args)
def join(seq, field=' '):
return field.join(seq)
def group(s):
return '(' + s + ')'
short_days = ['sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat']
long_days = ['sunday', 'monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday']
short_day_reg = group(join(short_days, '|'))
long_day_reg = group(join(long_days, '|'))
daymap = {}
for i in range(7):
daymap[short_days[i]] = i
daymap[long_days[i]] = i
hms_reg = join(3 * [group('[0-9][0-9]')], ':')
months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
'aug', 'sep', 'oct', 'nov', 'dec']
monmap = {}
for i in range(12):
monmap[months[i]] = i+1
months_reg = group(join(months, '|'))
# From draft-ietf-http-v11-spec-07.txt/3.3.1
# Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
# Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
# Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
# rfc822 format
rfc822_date = join(
[concat(short_day_reg, ','), # day
group('[0-9][0-9]?'), # date
months_reg, # month
group('[0-9]+'), # year
hms_reg, # hour minute second
'gmt'
],
' '
)
rfc822_reg = re.compile(rfc822_date)
def unpack_rfc822(m):
g = m.group
a = int
return (
a(g(4)), # year
monmap[g(3)], # month
a(g(2)), # day
a(g(5)), # hour
a(g(6)), # minute
a(g(7)), # second
0,
0,
0
)
# rfc850 format
rfc850_date = join(
[concat(long_day_reg, ','),
join(
[group('[0-9][0-9]?'),
months_reg,
group('[0-9]+')
],
'-'
),
hms_reg,
'gmt'
],
' '
)
rfc850_reg = re.compile(rfc850_date)
# they actually unpack the same way
unpack_rfc850 = unpack_rfc822
# parsdate.parsedate - ~700/sec.
# parse_http_date - ~1333/sec.
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def build_http_date(when):
year, month, day, hh, mm, ss, wd, _y, _z = time.gmtime(when)
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
weekdayname[wd],
day, monthname[month], year,
hh, mm, ss
)
def parse_http_date(d):
d = d.lower()
m = rfc850_reg.match(d)
if not m or m.end() != len(d):
m = rfc822_reg.match(d)
if m and m.end() == len(d):
# They both unpack the same way, it doesn't matter which
# function we use.
return int(calendar.timegm(unpack_rfc850(m)))
return 0 | zope.server | /zope.server-4.0.2.tar.gz/zope.server-4.0.2/src/zope/server/http/http_date.py | http_date.py |
import time
from zope.server.http.http_date import monthname
from zope.server.logger.pythonlogger import PythonLogger
from zope.server.logger.resolvinglogger import ResolvingLogger
class CommonAccessLogger(object):
"""Outputs accesses in common HTTP log format.
"""
def __init__(self, logger_object='accesslog', resolver=None):
"""
:keyword logger_object: Either a Python :class:`logging.Logger`
object, or a string giving the name of a Python logger to find.
.. versionchanged:: 4.0.0
Remove support for arbitrary ``IMessageLogger`` objects in
*logger_object*. Logging is now always directed through the
Python standard logging library.
"""
self.output = PythonLogger(logger_object)
# self.output is an IRequestLogger, which PythonLogger implements
# as unresolving.
if resolver is not None:
self.output = ResolvingLogger(resolver, self.output)
@classmethod
def compute_timezone_for_log(cls, tz):
if tz > 0:
neg = 1
else:
neg = 0
tz = -tz
h, rem = divmod(tz, 3600)
m, rem = divmod(rem, 60)
if neg:
return '-%02d%02d' % (h, m)
return '+%02d%02d' % (h, m)
tz_for_log = None
tz_for_log_alt = None
_localtime = staticmethod(time.localtime)
def log_date_string(self, when):
logtime = self._localtime(when)
Y, M, D, h, m, s = logtime[:6]
if not time.daylight:
tz = self.tz_for_log
if tz is None:
tz = self.compute_timezone_for_log(time.timezone)
self.tz_for_log = tz
else:
tz = self.tz_for_log_alt
if tz is None:
tz = self.compute_timezone_for_log(time.altzone)
self.tz_for_log_alt = tz
return '%d/%s/%02d:%02d:%02d:%02d %s' % (
D, monthname[M], Y, h, m, s, tz)
def log(self, task):
"""Receives a completed task and logs it in the common log format."""
now = time.time()
request_data = task.request_data
req_headers = request_data.headers
user_name = task.auth_user_name or 'anonymous'
user_agent = req_headers.get('USER_AGENT', '')
referer = req_headers.get('REFERER', '')
self.output.logRequest(
task.channel.addr[0],
' - %s [%s] "%s" %s %d "%s" "%s"\n' % (
user_name,
self.log_date_string(now),
request_data.first_line,
task.status,
task.bytes_written,
referer,
user_agent
)
) | zope.server | /zope.server-4.0.2.tar.gz/zope.server-4.0.2/src/zope/server/http/commonaccesslogger.py | commonaccesslogger.py |
import re
import sys
from io import BytesIO
from six.moves.urllib.parse import unquote, urlsplit
from zope.server.fixedstreamreceiver import FixedStreamReceiver
from zope.server.buffers import OverflowableBuffer
from zope.server.utilities import find_double_newline
from zope.server.interfaces import IStreamConsumer
from zope.interface import implementer
PY3 = sys.version_info >= (3, )
@implementer(IStreamConsumer)
class HTTPRequestParser(object):
"""A structure that collects the HTTP request.
Once the stream is completed, the instance is passed to
a server task constructor.
"""
# Parsing status
completed = 0 # Set once request is completed.
empty = 0 # Set if no request was made.
in_header = False
header_plus = b''
chunked = 0
content_length = 0
body_rcv = None
# Data from parsing. native strings.
first_line = ''
header = ''
command = ''
uri = ''
version = ''
proxy_scheme = None
proxy_netloc = None
path = None
fragment = None
query = None
# headers is a mapping containing native string keys translated to
# uppercase with dashes turned into underscores. The values
# are also native strings.
def __init__(self, adj):
"""
adj is an Adjustments object.
"""
self.headers = {}
self.adj = adj
def received(self, data):
"""
Receives the HTTP stream for one request.
Returns the number of bytes consumed.
Sets the completed flag once both the header and the
body have been received.
"""
if self.completed:
return 0 # Can't consume any more.
datalen = len(data)
br = self.body_rcv
if br is None:
# In header.
s = self.header_plus + data
index = find_double_newline(s)
if index >= 0:
# Header finished.
header_plus = s[:index]
consumed = len(data) - (len(s) - index)
self.in_header = 0
# Remove preceeding blank lines.
header_plus = header_plus.lstrip()
if not header_plus:
self.empty = 1
self.completed = 1
else:
self.parse_header(header_plus)
if self.body_rcv is None:
self.completed = 1
return consumed
else:
# Header not finished yet.
self.header_plus = s
return datalen
else:
# In body.
consumed = br.received(data)
if br.completed:
self.completed = 1
return consumed
def parse_header(self, header_plus):
"""
Parses the header_plus block of text (the headers plus the
first line of the request).
"""
index = header_plus.find(b'\n')
if index >= 0:
first_line = header_plus[:index].rstrip()
header = header_plus[index + 1:]
else:
first_line = header_plus.rstrip()
header = b''
if PY3:
first_line = first_line.decode('latin1')
header = header.decode('latin1')
self.first_line = first_line
self.header = header
lines = self.get_header_lines()
headers = self.headers
for line in lines:
index = line.find(':')
if index > 0:
key = line[:index]
value = line[index + 1:].strip()
key1 = key.upper().replace('-', '_')
# If a header already exists, we append subsequent values
# seperated by a comma. Applications already need to handle
# the comma seperated values, as HTTP front ends might do
# the concatenation for you (behavior specified in RFC2616).
try:
headers[key1] += ', %s' % value
except KeyError:
headers[key1] = value
# else there's garbage in the headers?
assert isinstance(self.first_line, str)
command, uri, version = self.crack_first_line()
self.command = command or ''
self.uri = uri or ''
self.version = version
self.split_uri()
if version == '1.1':
te = headers.get('TRANSFER_ENCODING', '')
if te == 'chunked':
from zope.server.http.chunking import ChunkedReceiver
self.chunked = 1
buf = OverflowableBuffer(self.adj.inbuf_overflow)
self.body_rcv = ChunkedReceiver(buf)
if not self.chunked:
try:
cl = int(headers.get('CONTENT_LENGTH', 0))
except ValueError:
cl = 0
self.content_length = cl
if cl > 0:
buf = OverflowableBuffer(self.adj.inbuf_overflow)
self.body_rcv = FixedStreamReceiver(cl, buf)
def get_header_lines(self):
"""
Splits the header into lines, putting multi-line headers together.
"""
r = []
lines = self.header.split('\n')
for line in lines:
if line and line[0] in ' \t':
r[-1] = r[-1] + line[1:]
else:
r.append(line)
return r
first_line_re = re.compile(
'([^ ]+) ((?:[^ :?#]+://[^ ?#/]*(?:[0-9]{1,5})?)?[^ ]+)'
'(( HTTP/([0-9.]+))$|$)')
def crack_first_line(self):
method = uri = version = None
r = self.first_line
m = self.first_line_re.match(r)
if m is not None and m.end() == len(r):
if m.group(3):
version = m.group(5)
method = m.group(1).upper()
uri = m.group(2)
return (method, uri, version)
def split_uri(self):
(self.proxy_scheme, self.proxy_netloc, path, self.query,
self.fragment) = urlsplit(self.uri)
if path and '%' in path:
path = unquote(path)
self.path = path
if self.query == '':
self.query = None
def getBodyStream(self):
body_rcv = self.body_rcv
if body_rcv is not None:
return body_rcv.getfile()
return BytesIO(b'') | zope.server | /zope.server-4.0.2.tar.gz/zope.server-4.0.2/src/zope/server/http/httprequestparser.py | httprequestparser.py |
from zope.server.http.http_date import build_http_date
from zope.publisher.interfaces.http import IHeaderOutput
from zope.server.task import AbstractTask
from zope.server.interfaces import ITask
from zope.interface import implementer
rename_headers = {
'CONTENT_LENGTH' : 'CONTENT_LENGTH',
'CONTENT_TYPE' : 'CONTENT_TYPE',
'CONNECTION' : 'CONNECTION_TYPE',
}
@implementer(ITask, IHeaderOutput) #, IOutputStream
class HTTPTask(AbstractTask):
"""An HTTP task accepts a request and writes to a channel.
Subclass this and override the execute() method.
"""
instream = None
close_on_finish = 1
status = '200'
reason = 'OK'
wrote_header = 0
accumulated_headers = None
bytes_written = 0
auth_user_name = ''
cgi_env = None
def __init__(self, channel, request_data):
# request_data is a httprequestparser.HTTPRequestParser
AbstractTask.__init__(self, channel)
self.request_data = request_data
self.response_headers = {}
version = request_data.version
if version not in ('1.0', '1.1'):
# fall back to a version we support.
version = '1.0'
self.version = version
def _do_service(self):
self.channel.server.executeRequest(self)
def setResponseStatus(self, status, reason):
"""See zope.publisher.interfaces.http.IHeaderOutput"""
self.status = status
self.reason = reason
def setResponseHeaders(self, mapping):
"""See zope.publisher.interfaces.http.IHeaderOutput"""
self.response_headers.update(mapping)
def appendResponseHeaders(self, lst):
"""See zope.publisher.interfaces.http.IHeaderOutput"""
accum = self.accumulated_headers
if accum is None:
self.accumulated_headers = accum = []
accum.extend(lst)
def wroteResponseHeader(self):
"""See zope.publisher.interfaces.http.IHeaderOutput"""
return self.wrote_header
def setAuthUserName(self, name):
"""See zope.publisher.interfaces.http.IHeaderOutput"""
self.auth_user_name = name
def prepareResponseHeaders(self):
version = self.version
# Figure out whether the connection should be closed.
connection = self.request_data.headers.get('CONNECTION', '').lower()
close_it = 0
response_headers = self.response_headers
accumulated_headers = self.accumulated_headers
if accumulated_headers is None:
accumulated_headers = []
if version == '1.0':
if connection == 'keep-alive':
if 'Content-Length' not in response_headers:
close_it = 1
else:
response_headers['Connection'] = 'Keep-Alive'
else:
close_it = 1
elif version == '1.1':
if 'connection: close' in (header.lower() for header in
accumulated_headers):
close_it = 1
if connection == 'close':
close_it = 1
elif 'Transfer-Encoding' in response_headers:
if response_headers['Transfer-Encoding'] != 'chunked':
close_it = 1
elif self.status == '304':
# Replying with headers only.
pass
elif 'Content-Length' not in response_headers:
# accumulated_headers is a simple list, we need to cut off
# the value of content-length manually
if 'content-length' not in (header[:14].lower() for header in
accumulated_headers):
close_it = 1
# under HTTP 1.1 keep-alive is default, no need to set the header
else:
# Close if unrecognized HTTP version.
close_it = 1
self.close_on_finish = close_it
if close_it:
self.response_headers['Connection'] = 'close'
# Set the Server and Date field, if not yet specified. This is needed
# if the server is used as a proxy.
if 'server' not in (header[:6].lower() for header in
accumulated_headers):
self.response_headers['Server'] = self.channel.server.SERVER_IDENT
else:
self.response_headers['Via'] = self.channel.server.SERVER_IDENT
if 'date' not in (header[:4].lower() for header in
accumulated_headers):
self.response_headers['Date'] = build_http_date(self.start_time)
def buildResponseHeader(self):
self.prepareResponseHeaders()
first_line = 'HTTP/%s %s %s' % (self.version, self.status, self.reason)
lines = [first_line] + ['%s: %s' % hv
for hv in self.response_headers.items()]
accum = self.accumulated_headers
if accum is not None:
lines.extend(accum)
res = '%s\r\n\r\n' % '\r\n'.join(lines)
return res.encode('utf-8')
def getCGIEnvironment(self):
"""Returns a CGI-like environment."""
env = self.cgi_env
if env is not None:
# Return the cached copy.
return env
request_data = self.request_data
path = request_data.path
channel = self.channel
server = channel.server
while path and path.startswith('/'):
path = path[1:]
env = {}
env['REQUEST_METHOD'] = request_data.command.upper()
env['SERVER_PORT'] = str(server.port)
env['SERVER_NAME'] = server.server_name
env['SERVER_SOFTWARE'] = server.SERVER_IDENT
env['SERVER_PROTOCOL'] = "HTTP/%s" % self.version
env['CHANNEL_CREATION_TIME'] = str(channel.creation_time)
env['SCRIPT_NAME'] = ''
env['PATH_INFO'] = '/' + path
env['QUERY_STRING'] = request_data.query or ''
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
addr = channel.addr[0]
env['REMOTE_ADDR'] = addr
# If the server has a resolver, try to get the
# remote host from the resolver's cache.
resolver = getattr(server, 'resolver', None)
if resolver is not None:
dns_cache = resolver.cache
if addr in dns_cache:
remote_host = dns_cache[addr][2]
if remote_host is not None:
env['REMOTE_HOST'] = remote_host
for key, value in request_data.headers.items():
value = value.strip()
mykey = rename_headers.get(key, None)
if mykey is None:
mykey = 'HTTP_%s' % key
if mykey not in env:
env[mykey] = value
self.cgi_env = env
return env
def finish(self):
if not self.wrote_header:
self.write(b'')
AbstractTask.finish(self)
def write(self, data):
channel = self.channel
if not self.wrote_header:
rh = self.buildResponseHeader()
channel.write(rh)
self.bytes_written += len(rh)
self.wrote_header = 1
if data:
self.bytes_written += channel.write(data)
def flush(self):
self.channel.flush() | zope.server | /zope.server-4.0.2.tar.gz/zope.server-4.0.2/src/zope/server/http/httptask.py | httptask.py |
from asyncore import compact_traceback
import os
import sys
from zope.server.serverchannelbase import ServerChannelBase
from zope.server.linereceiver.linecommandparser import LineCommandParser
from zope.server.linereceiver.linetask import LineTask
DEBUG = os.environ.get('ZOPE_SERVER_DEBUG')
class LineServerChannel(ServerChannelBase):
"""The Line Server Channel represents a connection to a particular
client. We can therefore store information here."""
# Wrapper class that is used to execute a command in a different thread
task_class = LineTask
# Class that is being initialized to parse the input
parser_class = LineCommandParser
# List of commands that are always available
special_commands = ('cmd_quit',)
# Commands that are run in a separate thread
thread_commands = ()
# Define the authentication status of the channel. Note that only the
# "special commands" can be executed without having authenticated.
authenticated = 0
# Define the reply code for non-authenticated responses
not_auth_reply = 'LOGIN_REQUIRED'
# Define the reply code for an unrecognized command
unknown_reply = 'CMD_UNKNOWN'
# Define the error message that occurs, when the reply code was not found.
reply_error = '500 Unknown Reply Code: %s.'
# Define the status messages
status_messages = {
'CMD_UNKNOWN' : "500 '%s': command not understood.",
'INTERNAL_ERROR' : "500 Internal error: %s",
'LOGIN_REQUIRED' : '530 Please log in with USER and PASS',
}
def handle_request(self, command):
"""Processes a command.
Some commands use an alternate thread.
"""
assert isinstance(command, LineCommandParser)
cmd = command.cmd
method = 'cmd_' + cmd.lower()
if not self.authenticated and method not in self.special_commands:
# The user is not logged in, therefore don't allow anything
self.reply(self.not_auth_reply)
elif method in self.thread_commands:
# Process in another thread.
task = self.task_class(self, command, method)
self.queue_task(task)
elif hasattr(self, method):
try:
getattr(self, method)(command.args)
except:
self.exception()
else:
self.reply(self.unknown_reply, cmd.upper())
def reply(self, code, args=(), flush=1):
""" """
try:
msg = self.status_messages[code] % args
except:
msg = self.reply_error % code
self.write(msg.encode('utf-8') + b'\r\n')
if flush:
self.flush(0)
# TODO: Some logging should go on here.
def handle_error_no_close(self):
"""See asyncore.dispatcher.handle_error()"""
_nil, t, v, tbinfo = compact_traceback()
# sometimes a user repr method will crash.
try:
self_repr = repr(self)
except:
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
self.log_info(
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
self_repr,
t,
v,
tbinfo
),
'error'
)
def exception(self):
if DEBUG: # pragma: no cover
import traceback
traceback.print_exc()
t, v = sys.exc_info()[:2]
try:
info = '%s: %s' % (getattr(t, '__name__', t), v)
except:
info = str(t)
self.reply('INTERNAL_ERROR', info)
self.handle_error_no_close()
self.close_when_done() | zope.server | /zope.server-4.0.2.tar.gz/zope.server-4.0.2/src/zope/server/linereceiver/lineserverchannel.py | lineserverchannel.py |
import posixpath
from io import BytesIO
from zope.server.interfaces.ftp import IFileSystem
from zope.server.interfaces.ftp import IFileSystemAccess
from zope.server.ftp.server import FTPServer
from zope.publisher.publish import publish
from zope.interface import implementer
@implementer(IFileSystem)
class PublisherFileSystem(object):
"""Generic Publisher FileSystem implementation."""
def __init__(self, credentials, request_factory):
self.credentials = credentials
self.request_factory = request_factory
def type(self, path):
if path == '/':
return 'd'
return self._execute(path, 'type')
def readfile(self, path, outstream, start=0, end=None):
return self._execute(path, 'readfile',
outstream=outstream, start=start, end=end)
_name = None
for _name in ('names', 'ls'):
f = locals()[_name] = lambda self, path, filter=None, _name=_name: self._execute(
path,
_name,
split=False,
filter=filter)
f.__name__ = _name
for _name in ('lsinfo', 'mtime', 'size', 'mkdir', 'remove', 'rmdir'):
f = locals()[_name] = lambda self, path, _name=_name: self._execute(path, _name)
f.__name__ = _name
del _name
def rename(self, old, new):
'See IWriteFileSystem'
old = self._translate(old)
new = self._translate(new)
path0, old = posixpath.split(old)
path1, new = posixpath.split(new)
assert path0 == path1
return self._execute(path0, 'rename', split=False, old=old, new=new)
def writefile(self, path, instream, start=None, end=None, append=False):
'See IWriteFileSystem'
return self._execute(
path, 'writefile',
instream=instream, start=start, end=end, append=append)
def writable(self, path):
'See IWriteFileSystem'
return self._execute(path, 'writable')
def _execute(self, path, command, split=True, **kw):
env = {}
env.update(kw)
env['command'] = command
path = self._translate(path)
if split:
env['path'], env['name'] = posixpath.split(path)
else:
env['path'] = path
env['credentials'] = self.credentials
request = self.request_factory(BytesIO(b''), env)
# Note that publish() calls close() on request, which deletes the
# response from the request, so that we need to keep track of it.
# agroszer: 2008.feb.1.: currently the above seems not to be true
# request will KEEP the response on close()
# even more if a retry occurs in the publisher,
# the response will be LOST, so we must accept the returned request
request = publish(request)
return request.response.getResult()
def _translate(self, path):
# Normalize
path = posixpath.normpath(path)
if path.startswith('..'):
# Someone is trying to get lower than the permitted root.
# We just ignore it.
path = '/'
return path
class PublisherFTPServer(FTPServer):
"""Generic FTP Server"""
def __init__(self, request_factory, name, ip, port, *args, **kw):
fs_access = PublisherFileSystemAccess(request_factory)
super(PublisherFTPServer, self).__init__(ip, port, fs_access,
*args, **kw)
@implementer(IFileSystemAccess)
class PublisherFileSystemAccess(object):
def __init__(self, request_factory):
self.request_factory = request_factory
def authenticate(self, credentials):
# We can't actually do any authentication initially, as the
# user may not be defined at the root.
pass
def open(self, credentials):
return PublisherFileSystem(credentials, self.request_factory) | zope.server | /zope.server-4.0.2.tar.gz/zope.server-4.0.2/src/zope/server/ftp/publisher.py | publisher.py |
"""FTP Server
"""
import asyncore
import posixpath
import socket
from datetime import date, timedelta
from getopt import getopt, GetoptError
from zope.security.interfaces import Unauthorized
from zope.interface import implementer
from zope.server.buffers import OverflowableBuffer
from zope.server.task import AbstractTask
from zope.server.interfaces import ITask
from zope.server.interfaces.ftp import IFileSystemAccess
from zope.server.interfaces.ftp import IFTPCommandHandler
from zope.server.linereceiver.lineserverchannel import LineServerChannel
from zope.server.serverbase import ServerBase
from zope.server.dualmodechannel import DualModeChannel, the_trigger
status_messages = {
'OPEN_DATA_CONN' : '150 Opening %s mode data connection for file list',
'OPEN_CONN' : '150 Opening %s connection for %s',
'SUCCESS_200' : '200 %s command successful.',
'TYPE_SET_OK' : '200 Type set to %s.',
'STRU_OK' : '200 STRU F Ok.',
'MODE_OK' : '200 MODE S Ok.',
'FILE_DATE' : '213 %4d%02d%02d%02d%02d%02d',
'FILE_SIZE' : '213 %d Bytes',
'HELP_START' : '214-The following commands are recognized',
'HELP_END' : '214 Help done.',
'SERVER_TYPE' : '215 %s Type: %s',
'SERVER_READY' : '220 %s FTP server (Zope Async/Thread V0.1) ready.',
'GOODBYE' : '221 Goodbye.',
'SUCCESS_226' : '226 %s command successful.',
'TRANS_SUCCESS' : '226 Transfer successful.',
'PASV_MODE_MSG' : '227 Entering Passive Mode (%s,%d,%d)',
'LOGIN_SUCCESS' : '230 Login Successful.',
'SUCCESS_250' : '250 %s command successful.',
'SUCCESS_257' : '257 %s command successful.',
'ALREADY_CURRENT' : '257 "%s" is the current directory.',
'PASS_REQUIRED' : '331 Password required',
'RESTART_TRANSFER' : '350 Restarting at %d. Send STORE or '
'RETRIEVE to initiate transfer.',
'READY_FOR_DEST' : '350 File exists, ready for destination.',
'NO_DATA_CONN' : "425 Can't build data connection",
'TRANSFER_ABORTED' : '426 Connection closed; transfer aborted.',
'CMD_UNKNOWN' : "500 '%s': command not understood.",
'INTERNAL_ERROR' : "500 Internal error: %s",
'ERR_ARGS' : '500 Bad command arguments',
'MODE_UNKOWN' : '502 Unimplemented MODE type',
'WRONG_BYTE_SIZE' : '504 Byte size must be 8',
'STRU_UNKNOWN' : '504 Unimplemented STRU type',
'NOT_AUTH' : "530 You are not authorized to perform the "
"'%s' command",
'LOGIN_REQUIRED' : '530 Please log in with USER and PASS',
'LOGIN_MISMATCH' : '530 The username and password do not match.',
'ERR_NO_LIST' : '550 Could not list directory or file: %s',
'ERR_NO_DIR' : '550 "%s": No such directory.',
'ERR_NO_FILE' : '550 "%s": No such file.',
'ERR_NO_DIR_FILE' : '550 "%s": No such file or directory.',
'ERR_IS_NOT_FILE' : '550 "%s": Is not a file',
'ERR_CREATE_FILE' : '550 Error creating file.',
'ERR_CREATE_DIR' : '550 Error creating directory: %s',
'ERR_DELETE_FILE' : '550 Error deleting file: %s',
'ERR_DELETE_DIR' : '550 Error removing directory: %s',
'ERR_OPEN_READ' : '553 Could not open file for reading: %s',
'ERR_OPEN_WRITE' : '553 Could not open file for writing: %s',
'ERR_IO' : '553 I/O Error: %s',
'ERR_RENAME' : '560 Could not rename "%s" to "%s": %s',
'ERR_RNFR_SOURCE' : '560 No source filename specify. Call RNFR first.',
}
@implementer(IFTPCommandHandler)
class FTPServerChannel(LineServerChannel):
"""The FTP Server Channel represents a connection to a particular
client. We can therefore store information here."""
# List of commands that are always available
special_commands = (
'cmd_quit', 'cmd_type', 'cmd_noop', 'cmd_user', 'cmd_pass')
# These are the commands that are accessing the filesystem.
# Since this could be also potentially a longer process, these commands
# are also the ones that are executed in a different thread.
thread_commands = (
'cmd_appe', 'cmd_cdup', 'cmd_cwd', 'cmd_dele',
'cmd_list', 'cmd_nlst', 'cmd_mdtm', 'cmd_mkd',
'cmd_pass', 'cmd_retr', 'cmd_rmd', 'cmd_rnfr',
'cmd_rnto', 'cmd_size', 'cmd_stor', 'cmd_stru')
# Define the status messages
status_messages = status_messages
# Define the type of directory listing this server is returning
system = ('UNIX', 'L8')
# comply with (possibly troublesome) RFC959 requirements
# This is necessary to correctly run an active data connection
# through a firewall that triggers on the source port (expected
# to be 'L-1', or 20 in the normal case).
bind_local_minus_one = 0
restart_position = 0
type_map = {'a':'ASCII', 'i':'Binary', 'e':'EBCDIC', 'l':'Binary'}
type_mode_map = {'a':'t', 'i':'b', 'e':'b', 'l':'b'}
def __init__(self, server, conn, addr, adj=None):
super(FTPServerChannel, self).__init__(server, conn, addr, adj)
self.port_addr = None # The client's PORT address
self.passive_listener = None # The PASV listener
self.client_dc = None # The data connection
self.transfer_mode = 'a' # Have to default to ASCII :-|
self.passive_mode = 0
self.cwd = '/'
self._rnfr = None
self.username = ''
self.credentials = None
self.reply('SERVER_READY', self.server.server_name)
def _getFileSystem(self):
"""Open the filesystem using the current credentials."""
return self.server.fs_access.open(self.credentials)
def cmd_abor(self, args):
'See IFTPCommandHandler'
assert self.async_mode
self.reply('TRANSFER_ABORTED')
self.abortPassive()
self.abortData()
def cmd_appe(self, args):
'See IFTPCommandHandler'
return self.cmd_stor(args, 'a')
def cmd_cdup(self, args):
'See IFTPCommandHandler'
path = self._generatePath('../')
if self._getFileSystem().type(path):
self.cwd = path
self.reply('SUCCESS_250', 'CDUP')
else: # pragma: no cover
self.reply('ERR_NO_FILE', path)
def cmd_cwd(self, args):
'See IFTPCommandHandler'
path = self._generatePath(args)
if self._getFileSystem().type(path) == 'd':
self.cwd = path
self.reply('SUCCESS_250', 'CWD')
else:
self.reply('ERR_NO_DIR', path)
def cmd_dele(self, args):
'See IFTPCommandHandler'
if not args:
self.reply('ERR_ARGS')
return
path = self._generatePath(args)
try:
self._getFileSystem().remove(path)
except OSError as err:
self.reply('ERR_DELETE_FILE', str(err))
else:
self.reply('SUCCESS_250', 'DELE')
def cmd_help(self, args):
'See IFTPCommandHandler'
self.reply('HELP_START', flush=0)
self.write(b'Help goes here somewhen.\r\n')
self.reply('HELP_END')
def cmd_list(self, args, long=1):
'See IFTPCommandHandler'
opts = ()
if args.strip().startswith('-'):
try:
opts, args = getopt(args.split(), 'Llad')
except GetoptError:
self.reply('ERR_ARGS')
return
if len(args) > 1:
self.reply('ERR_ARGS')
return
args = args[0] if args else ''
fs = self._getFileSystem()
path = self._generatePath(args)
if not fs.type(path):
self.reply('ERR_NO_DIR_FILE', path)
return
args = args.split()
try:
s = self.getList(
args, long,
directory=bool([opt for opt in opts if opt[0] == '-d'])
)
except OSError as err: # pragma: no cover
self.reply('ERR_NO_LIST', str(err))
return
ok_reply = ('OPEN_DATA_CONN', self.type_map[self.transfer_mode])
cdc = RETRChannel(self, ok_reply)
try:
cdc.write(s.encode('utf-8'))
cdc.close_when_done()
except OSError as err: # pragma: no cover
self.reply('ERR_NO_LIST', str(err))
cdc.reported = True
cdc.close_when_done()
def getList(self, args, long=0, directory=0):
# we need to scan the command line for arguments to '/bin/ls'...
fs = self._getFileSystem()
path_args = []
for arg in args:
if arg[0] != '-':
path_args.append(arg)
if len(path_args) < 1:
path = '.'
else:
path = path_args[0]
path = self._generatePath(path)
if fs.type(path) == 'd' and not directory:
if long:
file_list = map(ls, fs.ls(path))
else: # pragma: no cover
file_list = fs.names(path)
else:
if long:
file_list = [ls(fs.lsinfo(path))]
else: # pragma: no cover
file_list = [posixpath.split(path)[1]]
return '\r\n'.join(file_list) + '\r\n'
def cmd_mdtm(self, args):
'See IFTPCommandHandler'
fs = self._getFileSystem()
# We simply do not understand this non-standard extension to MDTM
if len(args.split()) > 1:
self.reply('ERR_ARGS')
return
path = self._generatePath(args)
if fs.type(path) != 'f':
self.reply('ERR_IS_NOT_FILE', path)
else:
mtime = fs.mtime(path)
if mtime is not None:
mtime = (mtime.year, mtime.month, mtime.day,
mtime.hour, mtime.minute, mtime.second)
else:
mtime = 0, 0, 0, 0, 0, 0
self.reply('FILE_DATE', mtime)
def cmd_mkd(self, args):
'See IFTPCommandHandler'
if not args:
self.reply('ERR_ARGS')
return
path = self._generatePath(args)
try:
self._getFileSystem().mkdir(path)
except OSError as err:
self.reply('ERR_CREATE_DIR', str(err))
else:
self.reply('SUCCESS_257', 'MKD')
def cmd_mode(self, args):
'See IFTPCommandHandler'
if len(args) == 1 and args in 'sS':
self.reply('MODE_OK')
else:
self.reply('MODE_UNKNOWN')
def cmd_nlst(self, args):
'See IFTPCommandHandler'
self.cmd_list(args, 0)
def cmd_noop(self, args):
'See IFTPCommandHandler'
self.reply('SUCCESS_200', 'NOOP')
def cmd_pass(self, args):
'See IFTPCommandHandler'
self.authenticated = 0
password = args
credentials = (self.username, password)
try:
self.server.fs_access.authenticate(credentials)
except Unauthorized:
self.reply('LOGIN_MISMATCH')
self.close_when_done()
else:
self.credentials = credentials
self.authenticated = 1
self.reply('LOGIN_SUCCESS')
def cmd_pasv(self, args):
'See IFTPCommandHandler'
assert self.async_mode
# Kill any existing passive listener first.
self.abortPassive()
local_addr = self.socket.getsockname()[0]
self.passive_listener = PassiveListener(self, local_addr)
port = self.passive_listener.port
self.reply('PASV_MODE_MSG', (','.join(local_addr.split('.')),
port / 256,
port % 256))
def cmd_port(self, args):
'See IFTPCommandHandler'
info = args.split(',')
ip = '.'.join(info[:4])
port = int(info[4])*256 + int(info[5])
# how many data connections at a time?
# I'm assuming one for now...
# TODO: we should (optionally) verify that the
# ip number belongs to the client. [wu-ftpd does this?]
self.port_addr = (ip, port)
self.reply('SUCCESS_200', 'PORT')
def cmd_pwd(self, args):
'See IFTPCommandHandler'
self.reply('ALREADY_CURRENT', self.cwd)
def cmd_quit(self, args):
'See IFTPCommandHandler'
self.reply('GOODBYE')
self.close_when_done()
def cmd_retr(self, args):
'See IFTPCommandHandler'
fs = self._getFileSystem()
if not args:
self.reply('CMD_UNKNOWN', 'RETR')
path = self._generatePath(args)
if fs.type(path) != 'f':
self.reply('ERR_IS_NOT_FILE', path)
return
start = 0
if self.restart_position:
start = self.restart_position
self.restart_position = 0
ok_reply = 'OPEN_CONN', (self.type_map[self.transfer_mode], path)
cdc = RETRChannel(self, ok_reply)
outstream = ApplicationOutputStream(cdc)
try:
fs.readfile(path, outstream, start)
cdc.close_when_done()
except OSError as err: # pragma: no cover
self.reply('ERR_OPEN_READ', str(err))
cdc.reported = True
cdc.close_when_done()
except IOError as err: # pragma: no cover XXX Same thing on Python 3
self.reply('ERR_IO', str(err))
cdc.reported = True
cdc.close_when_done()
def cmd_rest(self, args):
'See IFTPCommandHandler'
try:
pos = int(args)
except ValueError:
self.reply('ERR_ARGS')
return
self.restart_position = pos
self.reply('RESTART_TRANSFER', pos)
def cmd_rmd(self, args):
'See IFTPCommandHandler'
if not args:
self.reply('ERR_ARGS')
return
path = self._generatePath(args)
try:
self._getFileSystem().rmdir(path)
except OSError as err:
self.reply('ERR_DELETE_DIR', str(err))
else:
self.reply('SUCCESS_250', 'RMD') # pragma: no cover
def cmd_rnfr(self, args):
'See IFTPCommandHandler'
path = self._generatePath(args)
if self._getFileSystem().type(path):
self._rnfr = path
self.reply('READY_FOR_DEST')
else:
self.reply('ERR_NO_FILE', path)
def cmd_rnto(self, args):
'See IFTPCommandHandler'
path = self._generatePath(args)
if self._rnfr is None:
self.reply('ERR_RENAME')
return
try:
self._getFileSystem().rename(self._rnfr, path)
except OSError as err:
self.reply('ERR_RENAME', (self._rnfr, path, str(err)))
else:
self.reply('SUCCESS_250', 'RNTO') # pragma: no cover
self._rnfr = None
def cmd_size(self, args):
'See IFTPCommandHandler'
path = self._generatePath(args)
fs = self._getFileSystem()
if fs.type(path) != 'f':
self.reply('ERR_NO_FILE', path)
else:
self.reply('FILE_SIZE', fs.size(path))
def cmd_stor(self, args, write_mode='w'):
'See IFTPCommandHandler'
if not args:
self.reply('ERR_ARGS')
return
path = self._generatePath(args)
start = 0
if self.restart_position:
self.start = self.restart_position
mode = write_mode + self.type_mode_map[self.transfer_mode]
if not self._getFileSystem().writable(path):
self.reply('ERR_OPEN_WRITE', "Can't write file")
return
cdc = STORChannel(self, (path, mode, start))
self.syncConnectData(cdc)
self.reply('OPEN_CONN', (self.type_map[self.transfer_mode], path))
def finishSTOR(self, buffer, finish_args):
"""Called by STORChannel when the client has sent all data."""
(path, mode, start) = finish_args
assert not self.async_mode
try:
infile = buffer.getfile()
infile.seek(0)
self._getFileSystem().writefile(path, infile, start,
append=(mode[0] == 'a'))
except OSError as err: # pragma: no cover
self.reply('ERR_OPEN_WRITE', str(err))
except IOError as err: # pragma: no cover XXX On Py3 this is OSError
self.reply('ERR_IO', str(err))
except: # pragma: no cover
self.exception()
else:
self.reply('TRANS_SUCCESS')
def cmd_stru(self, args):
'See IFTPCommandHandler'
if len(args) == 1 and args in 'fF':
self.reply('STRU_OK')
else:
self.reply('STRU_UNKNOWN')
def cmd_syst(self, args):
'See IFTPCommandHandler'
self.reply('SERVER_TYPE', self.system)
def cmd_type(self, args):
'See IFTPCommandHandler'
# ascii, ebcdic, image, local <byte size>
args = args.split()
t = args[0].lower()
# no support for EBCDIC
# if t not in ['a','e','i','l']:
if t not in ('a', 'i', 'l'):
self.reply('ERR_ARGS')
elif t == 'l' and (len(args) > 2 and args[2] != '8'):
self.reply('WRONG_BYTE_SIZE')
else:
self.transfer_mode = t
self.reply('TYPE_SET_OK', self.type_map[t])
def cmd_user(self, args):
'See IFTPCommandHandler'
self.authenticated = 0
if len(args) > 1:
self.username = args
self.reply('PASS_REQUIRED')
else:
self.reply('ERR_ARGS')
############################################################
def _generatePath(self, args):
"""Convert relative paths to absolute paths."""
# We use posixpath even on non-Posix platforms because we don't want
# slashes converted to backslashes.
path = posixpath.join(self.cwd, args)
return posixpath.normpath(path)
def syncConnectData(self, cdc):
"""Calls asyncConnectData in the asynchronous thread."""
the_trigger.pull_trigger(lambda: self.asyncConnectData(cdc))
def asyncConnectData(self, cdc):
"""Starts connecting the data channel.
This is a little complicated because the data connection might
be established already (in passive mode) or might be
established in the near future (in port or passive mode.) If
the connection has already been established,
self.passive_listener already has a socket and is waiting for
a call to connectData(). If the connection has not been
established in passive mode, the passive listener will
remember the data channel and send it when it's ready. In port
mode, this method tells the data connection to connect.
"""
self.abortData()
self.client_dc = cdc
if self.passive_listener is not None:
# Connect via PASV
self.passive_listener.connectData(cdc)
if self.port_addr: # pragma: no cover
# Connect via PORT
a = self.port_addr
self.port_addr = None
cdc.connectPort(a)
def connectedPassive(self):
"""Accepted a passive connection."""
self.passive_listener = None
def abortPassive(self):
"""Close the passive listener."""
if self.passive_listener is not None:
self.passive_listener.abort()
self.passive_listener = None
def abortData(self):
"""Close the data connection."""
if self.client_dc is not None:
self.client_dc.abort()
self.client_dc = None
def closedData(self):
self.client_dc = None
def close(self):
# Make sure the passive listener and active client DC get closed.
self.abortPassive()
self.abortData()
LineServerChannel.close(self)
def ls(ls_info):
"""Formats a directory entry similarly to the 'ls' command.
"""
info = {
'owner_name': 'na',
'owner_readable': True,
'owner_writable': True,
'group_name': "na",
'group_readable': True,
'group_writable': True,
'other_readable': False,
'other_writable': False,
'nlinks': 1,
'size': 0,
}
if ls_info['type'] == 'd':
info['owner_executable'] = True
info['group_executable'] = True
info['other_executable'] = True
else:
info['owner_executable'] = False
info['group_executable'] = False
info['other_executable'] = False
info.update(ls_info)
mtime = info.get('mtime')
if mtime is not None:
fstring = '%b %d %Y' if date.today() - mtime.date() > timedelta(days=180) else '%b %d %H:%M'
mtime = mtime.strftime(fstring)
else:
mtime = "Jan 02 0000"
return "%s%s%s%s%s%s%s%s%s%s %3d %-8s %-8s %8d %s %s" % (
info['type'] == 'd' and 'd' or '-',
info['owner_readable'] and 'r' or '-',
info['owner_writable'] and 'w' or '-',
info['owner_executable'] and 'x' or '-',
info['group_readable'] and 'r' or '-',
info['group_writable'] and 'w' or '-',
info['group_executable'] and 'x' or '-',
info['other_readable'] and 'r' or '-',
info['other_writable'] and 'w' or '-',
info['other_executable'] and 'x' or '-',
info['nlinks'],
info['owner_name'],
info['group_name'],
info['size'],
mtime,
info['name'],
)
class PassiveListener(asyncore.dispatcher):
"""This socket accepts a data connection, used when the server has
been placed in passive mode. Although the RFC implies that we
ought to be able to use the same listener over and over again,
this presents a problem: how do we shut it off, so that we are
accepting connections only when we expect them? [we can't]
wuftpd, and probably all the other servers, solve this by
allowing only one connection to hit this listener. They then
close it. Any subsequent data-connection command will then try
for the default port on the client side [which is of course
never there]. So the 'always-send-PORT/PASV' behavior seems
required.
Another note: wuftpd will also be listening on the channel as
soon as the PASV command is sent. It does not wait for a data
command first.
"""
def __init__(self, control_channel, local_addr):
asyncore.dispatcher.__init__(self)
self.control_channel = control_channel
self.accepted = None # The accepted socket address
self.client_dc = None # The data connection to accept the socket
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.closed = False
# bind to an address on the interface where the
# control connection is connected.
self.bind((local_addr, 0))
self.port = self.socket.getsockname()[1]
self.listen(1)
def log(self, *ignore): # pragma: no cover
pass
def abort(self):
"""Abort the passive listener."""
if not self.closed: # pragma: no cover
self.closed = True
self.close()
if self.accepted is not None:
self.accepted.close()
def handle_accept(self):
"""Accept a connection from the client.
For some reason, sometimes accept() returns None instead of a
socket. This code ignores that case.
"""
v = self.accept()
if v is None: # pragma: no cover
return
self.accepted, _addr = v
if self.accepted is None: # pragma: no cover
return
self.accepted.setblocking(0)
self.closed = True
self.close()
if self.client_dc is not None: # pragma: no cover
self.connectData(self.client_dc)
def connectData(self, cdc):
"""Sends the connection to the data channel.
If the connection has not yet been made, sends the connection
when it becomes available.
"""
if self.accepted is not None:
cdc.connecting = True
cdc.set_socket(self.accepted)
# Note that this method will be called twice, once by the
# control channel, and once by handle_accept, and the two
# calls may come in either order. If handle_accept calls
# first, we don't want to call set_socket() on the data
# connection twice, so set self.accepted = None to keep a
# record that the data connection already has the socket.
self.accepted = None
self.control_channel.connectedPassive()
else: # pragma: no cover
self.client_dc = cdc
class FTPDataChannel(DualModeChannel):
"""Base class for FTP data connections.
Note that data channels are always in async mode.
"""
def __init__(self, control_channel):
self.control_channel = control_channel
self.reported = False
self.closed = False
DualModeChannel.__init__(self, None, None, control_channel.adj)
def connectPort(self, client_addr):
"""Connect to a port on the client"""
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
#if bind_local_minus_one:
# self.bind(('', self.control_channel.server.port - 1))
try:
self.connect(client_addr)
except socket.error: # pragma: no cover
self.report('NO_DATA_CONN')
def abort(self):
"""Abort the data connection without reporting."""
self.reported = True
if not self.closed:
self.closed = True
self.close()
def report(self, *reply_args):
"""Reports the result of the data transfer."""
self.reported = True
if self.control_channel is not None:
self.control_channel.reply(*reply_args)
def reportDefault(self):
"""Provide a default report on close."""
def close(self):
"""Notifies the control channel when the data connection closes."""
c = self.control_channel
try:
if c is not None and c.connected and not self.reported:
self.reportDefault()
finally:
self.control_channel = None
DualModeChannel.close(self)
if c is not None:
c.closedData()
class STORChannel(FTPDataChannel):
"""Channel for uploading one file from client to server"""
complete_transfer = 0
_fileno = None # provide a default for asyncore.dispatcher._fileno
def __init__(self, control_channel, finish_args):
self.finish_args = finish_args
self.inbuf = OverflowableBuffer(control_channel.adj.inbuf_overflow)
FTPDataChannel.__init__(self, control_channel)
# Note that this channel starts in async mode.
def writable(self):
return 0
def handle_connect(self):
pass
def received(self, data):
if data:
self.inbuf.append(data)
def handle_close(self):
"""Client closed, indicating EOF."""
c = self.control_channel
task = FinishSTORTask(c, self.inbuf, self.finish_args)
self.complete_transfer = 1
self.close()
c.queue_task(task)
def reportDefault(self):
if not self.complete_transfer: # pragma: no cover
self.report('TRANSFER_ABORTED')
# else the transfer completed and FinishSTORTask will
# provide a complete reply through finishSTOR().
@implementer(ITask)
class FinishSTORTask(AbstractTask):
"""Calls control_channel.finishSTOR() in an application thread.
This task executes after the client has finished uploading.
"""
def __init__(self, control_channel, inbuf, finish_args):
AbstractTask.__init__(self, control_channel)
self.inbuf = inbuf
self.finish_args = finish_args
def _do_service(self):
self.channel.finishSTOR(self.inbuf, self.finish_args)
def finish(self):
"""Does nothing"""
class RETRChannel(FTPDataChannel):
"""Channel for downloading one file from server to client
Also used for directory listings.
"""
opened = 0
_fileno = None # provide a default for asyncore.dispatcher._fileno
def __init__(self, control_channel, ok_reply_args):
self.ok_reply_args = ok_reply_args
FTPDataChannel.__init__(self, control_channel)
def _open(self):
"""Signal the client to open the connection."""
self.opened = 1
self.control_channel.reply(*self.ok_reply_args)
self.control_channel.asyncConnectData(self)
def write(self, data):
if self.control_channel is None:
raise IOError('Client FTP connection closed')
if not self.opened:
self._open()
return FTPDataChannel.write(self, data)
def readable(self):
return not self.connected
def handle_read(self):
# This may be called upon making the connection.
try:
self.recv(1)
except socket.error:
# The connection failed.
self.report('NO_DATA_CONN')
self.close()
def handle_connect(self):
pass
def handle_comm_error(self):
self.report('TRANSFER_ABORTED')
self.close()
def reportDefault(self):
if not len(self.outbuf):
# All data transferred
if not self.opened:
# Zero-length file
self._open() # pragma: no cover
self.report('TRANS_SUCCESS')
else:
# Not all data transferred
self.report('TRANSFER_ABORTED') # pragma: no cover
class ApplicationOutputStream(object):
"""Provide stream output to RETRChannel.
Maps close() to close_when_done().
"""
def __init__(self, retr_channel):
self.write = retr_channel.write
self.flush = retr_channel.flush
self.close = retr_channel.close_when_done
class FTPServer(ServerBase):
"""Generic FTP Server"""
channel_class = FTPServerChannel
SERVER_IDENT = 'zope.server.ftp'
def __init__(self, ip, port, fs_access, *args, **kw):
assert IFileSystemAccess.providedBy(fs_access)
self.fs_access = fs_access
super(FTPServer, self).__init__(ip, port, *args, **kw) | zope.server | /zope.server-4.0.2.tar.gz/zope.server-4.0.2/src/zope/server/ftp/server.py | server.py |
=========
CHANGES
=========
5.1 (2023-08-28)
================
- Declare ``zope.traversing`` as install dependency.
5.0 (2023-03-02)
================
- Drop support for Python 2.7, 3.5, 3.6.
- Add support for Python 3.11.
4.5 (2022-08-30)
================
- Add support for Python 3.5, 3.9, 3.10.
4.4.0 (2020-10-16)
==================
- Fix inconsistent resolution order with zope.interface v5.
- Add support for Python 3.8.
- Drop support for Python 3.4 and 3.5.
4.3.0 (2018-10-19)
==================
- Add support for Python 3.7.
- Host documentation at https://zopesession.readthedocs.io
4.2.0 (2017-09-22)
==================
- Add support for Python 3.5 and 3.6.
- Drop support for Python 2.6 and 3.3
- Reach 100% code coverage and maintain it via tox.ini and Travis CI.
4.1.0 (2015-06-02)
==================
- Add support for PyPy and PyPy3.
4.0.0 (2014-12-24)
==================
- Add support for Python 3.4.
- Add support for testing on Travis.
4.0.0a2 (2013-08-27)
====================
- Fix test that fails on any timezone east of GMT
4.0.0a1 (2013-02-21)
====================
- Add support for Python 3.3
- Replace deprecated ``zope.component.adapts`` usage with equivalent
``zope.component.adapter`` decorator.
- Replace deprecated ``zope.interface.implements`` usage with equivalent
``zope.interface.implementer`` decorator.
- Drop support for Python 2.4 and 2.5.
3.9.5 (2011-08-11)
==================
- LP #824355: enable support for HttpOnly cookies.
- Fix a bug in ``zope.session.session.Session`` that would trigger an
infinite loop if either iteration or a containment test were
attempted on an instance.
3.9.4 (2011-03-07)
==================
- Add an explicit `provides` to the IClientId adapter declaration in
adapter.zcml.
- Add option to disable implicit sweeps in
PersistentSessionDataContainer.
3.9.3 (2010-09-25)
==================
- Add test extra to declare test dependency on ``zope.testing``.
- Use Python's ``doctest`` module instead of depreacted
``zope.testing.doctest``.
3.9.2 (2009-11-23)
==================
- Fix Python 2.4 hmac compatibility issue by only using hashlib in
Python versions 2.5 and above.
- Use the CookieClientIdManager's secret as the hmac key instead of the
message when constructing and verifying client ids.
- Make it possible to construct CookieClientIdManager passing cookie namespace
and/or secret as constructor's arguments.
- Use zope.schema.fieldproperty.FieldProperty for "namespace" attribute of
CookieClientIdManager, just like for other attributes in its interface.
Also, make ICookieClientIdManager's "namespace" field an ASCIILine, so
it accepts only non-unicode strings for cookie names.
3.9.1 (2009-04-20)
==================
- Restore compatibility with Python 2.4.
3.9.0 (2009-03-19)
==================
- Don't raise deprecation warnings on Python 2.6.
- Drop dependency on ``zope.annotation``. Instead, we make classes implement
`IAttributeAnnotatable` in ZCML configuration, only if ``zope.annotation``
is available. If your code relies on annotatable `CookieClientIdManager`
and `PersistentSessionDataContainer` and you don't include the zcml classes
configuration of this package, you'll need to use `classImplements` function
from ``zope.interface`` to make those classes implement `IAttributeAnnotatable`
again.
- Drop dependency on zope.app.http, use standard date formatting function
from the ``email.utils`` module.
- Zope 3 application bootstrapping code for session utilities was moved into
zope.app.appsetup package, thus drop dependency on zope.app.appsetup in this
package.
- Drop testing dependencies, as we don't need anything behind zope.testing and
previous dependencies was simply migrated from zope.app.session before.
- Remove zpkg files and zcml slugs.
- Update package's description a bit.
3.8.1 (2009-02-23)
==================
- Add an ability to set cookie effective domain for CookieClientIdManager.
This is useful for simple cases when you have your application set up on
one domain and you want your identification cookie be active for subdomains.
- Python 2.6 compatibility change. Encode strings before calling hmac.new()
as the function no longer accepts the unicode() type.
3.8.0 (2008-12-31)
==================
- Add missing test dependency on ``zope.site`` and
``zope.app.publication``.
3.7.1 (2008-12-30)
==================
- Specify i18n_domain for titles in apidoc.zcml
- ZODB 3.9 no longer contains
ZODB.utils.ConflictResolvingMappingStorage, fixed tests, so they
work both with ZODB 3.8 and 3.9.
3.7.0 (2008-10-03)
==================
New features:
- Added a 'postOnly' option on CookieClientIdManagers to only allow setting
the client id cookie on POST requests. This is to further reduce risk from
broken caches handing the same client id out to multiple users. (Of
course, it doesn't help if caches are broken enough to cache POSTs.)
3.6.0 (2008-08-12)
==================
New features:
- Added a 'secure' option on CookieClientIdManagers to cause the secure
set-cookie option to be used, which tells the browser not to send the
cookie over http.
This provides enhanced security for ssl-only applications.
- Only set the client-id cookie if it isn't already set and try to
prevent the header from being cached. This is to minimize risk from
broken caches handing the same client id out to multiple users.
3.5.2 (2008-06-12)
==================
- Remove ConflictErrors caused on SessionData caused by setting
``lastAccessTime``.
3.5.1 (2008-04-30)
==================
- Split up the ZCML to make it possible to re-use more reasonably.
3.5.0 (2008-03-11)
==================
- Change the default session "resolution" to a sane value and document/test it.
3.4.1 (2007-09-25)
==================
- Fixed some meta data and switch to tgz release.
3.4.0 (2007-09-25)
==================
- Initial release
- Moved parts from ``zope.app.session`` to this packages
| zope.session | /zope.session-5.1.tar.gz/zope.session-5.1/CHANGES.rst | CHANGES.rst |
==============
zope.session
==============
.. image:: https://img.shields.io/pypi/v/zope.session.svg
:target: https://pypi.python.org/pypi/zope.session/
:alt: Latest release
.. image:: https://img.shields.io/pypi/pyversions/zope.session.svg
:target: https://pypi.org/project/zope.session/
:alt: Supported Python versions
.. image:: https://github.com/zopefoundation/zope.session/actions/workflows/tests.yml/badge.svg
:target: https://github.com/zopefoundation/zope.session/actions/workflows/tests.yml
.. image:: https://coveralls.io/repos/github/zopefoundation/zope.session/badge.svg?branch=master
:target: https://coveralls.io/github/zopefoundation/zope.session?branch=master
.. image:: https://readthedocs.org/projects/zopesession/badge/?version=latest
:target: https://zopesession.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
This package provides interfaces for client identification and session
support and their implementations for the request objects of
`zope.publisher <https://zopepublisher.readthedocs.io/>`_.
Documentation is hosted at https://zopesession.readthedocs.io/
| zope.session | /zope.session-5.1.tar.gz/zope.session-5.1/README.rst | README.rst |
"""Interfaces for session utility.
"""
from zope.i18nmessageid import ZopeMessageFactory as _
from zope.interface import Interface
from zope.interface.common.mapping import IMapping
from zope.interface.common.mapping import IReadMapping
from zope.interface.common.mapping import IWriteMapping
from zope import schema
__docformat__ = 'restructuredtext'
class IClientIdManager(Interface):
"""
Manages client identifiers.
.. seealso:: `zope.session.http.ICookieClientIdManager`
"""
def getClientId(request):
"""
Return the client id for the given request as a string.
If the request doesn't have an attached sessionId a new one will be
generated.
This will do whatever is possible to do the HTTP request to ensure the
session id will be preserved. Depending on the specific method,
further action might be necessary on the part of the user. See the
documentation for the specific implementation and its interfaces.
"""
class IClientId(Interface):
"""A unique id representing a session."""
def __str__():
"""As a unique ASCII string"""
class ISessionDataContainer(IReadMapping, IWriteMapping):
"""
Stores data objects for sessions.
The object implementing this interface is responsible for expiring data as
it feels appropriate.
Usage::
session_data_container[client_id][product_id][key] = value
Note that this interface does not support the full mapping interface -
the keys need to remain secret so we can't give access to :meth:`keys`,
:meth:`values` etc.
"""
timeout = schema.Int(
title=_("Timeout"),
description=_(
"Number of seconds before data becomes stale and may "
"be removed. A value of '0' means no expiration."),
default=3600,
required=True,
min=0,
)
resolution = schema.Int(
title=_("Timeout resolution (in seconds)"),
description=_(
"Defines what the 'resolution' of item timeout is. "
"Setting this higher allows the transience machinery to "
"do fewer 'writes' at the expense of causing items to time "
"out later than the 'Data object timeout value' by a factor "
"of (at most) this many seconds."
),
default=10 * 60,
required=True,
min=0,
)
def __getitem__(self, product_id):
"""Return an ISessionPkgData"""
def __setitem__(self, product_id, value):
"""Store an ISessionPkgData"""
class ISession(Interface):
"""
This object allows retrieval of the correct `ISessionData` for a
particular product id.
For example::
session = ISession(request)[product_id]
session['color'] = 'red'
assert ISessionData.providedBy(session)
"""
def __getitem__(product_id):
"""
Return the relevant `ISessionData`.
This involves locating the correct `ISessionDataContainer` for the
given product id, determining the client id, and returning the
relevant `ISessionData`.
.. caution::
This method implicitly creates a new session for the user
when it does not exist yet.
"""
def get(product_id, default=None):
"""
Return the relevant `ISessionPkgData` or *default* if not
available.
"""
class ISessionData(IMapping):
"""
Storage for a particular product id's session data.
Contains 0 or more `ISessionPkgData` instances.
"""
def getLastAccessTime():
"""
Return approximate epoch time this `ISessionData` was last
retrieved.
"""
def setLastAccessTime():
"""
An API for `ISessionDataContainer` to set the last retrieved epoch
time.
"""
# consider deprecating this property, or at least making it readonly. The
# setter should be used instead of setting this property because of
# conflict resolution: see https://bugs.launchpad.net/zope3/+bug/239531
lastAccessTime = schema.Int(
title=_("Last Access Time"),
description=_(
"Approximate epoch time this ISessionData was last retrieved "
"from its ISessionDataContainer"
),
default=0,
required=True,
)
# Note that only IReadMapping and IWriteMaping are implemented.
# We cannot give access to the keys, as they need to remain secret.
def __getitem__(self, client_id):
"""Return an `ISessionPkgData`"""
def __setitem__(self, client_id, session_pkg_data):
"""Store an `ISessionPkgData`"""
class ISessionPkgData(IMapping):
"""
Storage for a particular product id and browser id's session data
Data is stored persistently and transactionally. Data stored must
be persistent or picklable.
""" | zope.session | /zope.session-5.1.tar.gz/zope.session-5.1/src/zope/session/interfaces.py | interfaces.py |
"""Session implementation using cookies
"""
import hmac
import logging
import random
import re
import time
from email.utils import formatdate
from hashlib import sha1
from time import process_time
import zope.location
from persistent import Persistent
from zope.i18nmessageid import ZopeMessageFactory as _
from zope.interface import implementer
from zope.publisher.interfaces.http import IHTTPApplicationRequest
from zope.publisher.interfaces.http import IHTTPRequest
from zope.schema.fieldproperty import FieldProperty
from zope import component
from zope import schema
from zope.session.interfaces import IClientIdManager
from zope.session.session import digestEncode
logger = logging.getLogger(__name__)
class MissingClientIdException(Exception):
"""No ClientId found in Request"""
class ICookieClientIdManager(IClientIdManager):
"""
Manages client identification using a cookie.
.. seealso:: `CookieClientIdManager`
"""
namespace = schema.ASCIILine(
title=_('Cookie Name'),
description=_(
"Name of cookie used to maintain state. "
"Must be unique to the site domain name, and only contain "
"ASCII letters, digits and '_'"
),
required=True,
min_length=1,
max_length=30,
constraint=re.compile(r"^[\d\w_]+$").search,
)
cookieLifetime = schema.Int(
title=_('Cookie Lifetime'),
description=_(
"Number of seconds until the browser expires the cookie. "
"Leave blank expire the cookie when the browser is quit. "
"Set to 0 to never expire. "
),
min=0,
required=False,
default=None,
missing_value=None,
)
thirdparty = schema.Bool(
title=_('Third party cookie'),
description=_(
"Is a third party issuing the identification cookie? "
"Servers like Apache or Nginx have capabilities to issue "
"identification cookies too. If Third party cookies are "
"beeing used, Zope will never send a cookie back, just check "
"for them."
),
required=False,
default=False,
)
domain = schema.TextLine(
title=_('Effective domain'),
description=_(
"An identification cookie can be restricted to a specific domain "
"using this option. This option sets the ``domain`` attribute "
"for the cookie header. It is useful for setting one "
"identification cookie for multiple subdomains. So if this "
"option is set to ``.example.org``, the cookie will be available "
"for subdomains like ``yourname.example.org``. "
"Note that if you set this option to some domain, the "
"identification cookie won't be available for other domains, so, "
"for example you won't be able to login using the "
"SessionCredentials plugin via another domain."),
required=False,
)
secure = schema.Bool(
title=_('Request Secure communication'),
required=False,
default=False,
)
postOnly = schema.Bool(
title=_('Only set cookie on POST requests'),
required=False,
default=False,
)
httpOnly = schema.Bool(
title=_('The cookie cannot be accessed through client side scripts'),
required=False,
default=False,
)
@implementer(ICookieClientIdManager)
class CookieClientIdManager(zope.location.Location, Persistent):
"""
Default implementation of `ICookieClientIdManager`.
"""
thirdparty = FieldProperty(ICookieClientIdManager['thirdparty'])
cookieLifetime = FieldProperty(ICookieClientIdManager['cookieLifetime'])
secure = FieldProperty(ICookieClientIdManager['secure'])
postOnly = FieldProperty(ICookieClientIdManager['postOnly'])
domain = FieldProperty(ICookieClientIdManager['domain'])
namespace = FieldProperty(ICookieClientIdManager['namespace'])
httpOnly = FieldProperty(ICookieClientIdManager['httpOnly'])
def __init__(self, namespace=None, secret=None):
"""Create the cookie-based client id manager
We can pass namespace (cookie name) and/or secret string
for generating client unique ids.
If we don't pass either of them, they will be generated
automatically, this is very handy when storing id manager
in the persistent database, so they are saved between
application restarts.
>>> manager1 = CookieClientIdManager()
>>> len(manager1.namespace) > 0
True
>>> len(manager1.secret) > 0
True
We can specify cookie name by hand.
>>> manager2 = CookieClientIdManager('service_cookie')
>>> manager2.namespace
'service_cookie'
If we want to use `CookieClientIdManager` object as a non-persistent
utility, we need to specify some constant secret, so it won't be
recreated on each application restart.
>>> manager3 = CookieClientIdManager(secret='some_secret')
>>> manager3.secret
'some_secret'
Of course, we can specify both cookie name and secret.
>>> manager4 = CookieClientIdManager('service_cookie', 'some_secret')
>>> manager4.namespace
'service_cookie'
>>> manager4.secret
'some_secret'
"""
if namespace is None:
namespace = "zope3_cs_%x" % (int(time.time()) - 1000000000)
if secret is None:
secret = '%.20f' % random.random()
self.namespace = namespace
self.secret = secret
def getClientId(self, request):
"""Get the client id
This creates one if necessary:
>>> from io import BytesIO
>>> from zope.publisher.http import HTTPRequest
>>> request = HTTPRequest(BytesIO(), {})
>>> bim = CookieClientIdManager()
>>> id = bim.getClientId(request)
>>> id == bim.getClientId(request)
True
The id is retained accross requests:
>>> request2 = HTTPRequest(BytesIO(), {})
>>> request2._cookies = dict(
... [(name, cookie['value'])
... for (name, cookie) in request.response._cookies.items()
... ])
>>> id == bim.getClientId(request2)
True
>>> bool(id)
True
Note that the return value of this function is a string, not
an `.IClientId`. This is because this method is used to implement
the `.IClientId` Adapter.
>>> type(id) == str
True
We don't set the client id unless we need to, so, for example,
the second response doesn't have cookies set:
>>> request2.response._cookies
{}
An exception to this is if the ``cookieLifetime`` is set to a
non-zero integer value, in which case we do set it on every
request, regardless of when it was last set:
>>> bim.cookieLifetime = 3600 # one hour
>>> id == bim.getClientId(request2)
True
>>> bool(request2.response._cookies)
True
If the ``postOnly`` attribute is set to a true value, then cookies
will only be set on POST requests.
>>> bim.postOnly = True
>>> request = HTTPRequest(BytesIO(), {})
>>> bim.getClientId(request)
Traceback (most recent call last):
...
zope.session.http.MissingClientIdException
>>> print(request.response.getCookie(bim.namespace))
None
>>> request = HTTPRequest(BytesIO(), {'REQUEST_METHOD': 'POST'})
>>> id = bim.getClientId(request)
>>> id == bim.getClientId(request)
True
>>> request.response.getCookie(bim.namespace) is not None
True
>>> bim.postOnly = False
It's also possible to use third-party cookies. E.g. Apache ``mod_uid``
or Nginx ``ngx_http_userid_module`` are able to issue user tracking
cookies in front of Zope. In case ``thirdparty`` is activated Zope may
not set a cookie.
>>> bim.thirdparty = True
>>> request = HTTPRequest(BytesIO(), {})
>>> bim.getClientId(request)
Traceback (most recent call last):
...
zope.session.http.MissingClientIdException
>>> print(request.response.getCookie(bim.namespace))
None
"""
sid = self.getRequestId(request)
if sid is None:
if (self.thirdparty
or (self.postOnly and request.method != 'POST')):
raise MissingClientIdException
sid = self.generateUniqueId()
self.setRequestId(request, sid)
elif (not self.thirdparty) and self.cookieLifetime:
# If we have a finite cookie lifetime, then set the cookie
# on each request to avoid losing it.
self.setRequestId(request, sid)
return sid
def generateUniqueId(self):
"""Generate a new, random, unique id.
>>> bim = CookieClientIdManager()
>>> id1 = bim.generateUniqueId()
>>> id2 = bim.generateUniqueId()
>>> id1 != id2
True
"""
data = "{:.20f}{:.20f}{:.20f}".format(
random.random(), time.time(), process_time())
digest = sha1(data.encode()).digest()
s = digestEncode(digest)
# we store a HMAC of the random value together with it, which makes
# our session ids unforgeable.
mac = hmac.new(self.secret.encode(), s, digestmod=sha1).digest()
return (s + digestEncode(mac)).decode()
def getRequestId(self, request):
"""Return the browser id encoded in request as a string
Return `None` if an id is not set.
For example:
>>> from io import BytesIO
>>> from zope.publisher.http import HTTPRequest
>>> request = HTTPRequest(BytesIO(), {}, None)
>>> bim = CookieClientIdManager()
Because no cookie has been set, we get no id:
>>> bim.getRequestId(request) is None
True
We can set an id:
>>> id1 = bim.generateUniqueId()
>>> bim.setRequestId(request, id1)
And get it back:
>>> bim.getRequestId(request) == id1
True
When we set the request id, we also set a response cookie. We
can simulate getting this cookie back in a subsequent request:
>>> request2 = HTTPRequest(BytesIO(), {}, None)
>>> request2._cookies = dict(
... [(name, cookie['value'])
... for (name, cookie) in request.response._cookies.items()
... ])
And we get the same id back from the new request:
>>> bim.getRequestId(request) == bim.getRequestId(request2)
True
We allow unicode values as input, even though we work in the
byte-based realm of HMAC:
>>> id_uni = bim.generateUniqueId()
>>> bim.setRequestId(request, id_uni)
>>> bim.getRequestId(request) == id_uni
True
If the cookie data has been tampered with (doesn't correspond to our
secret), we will refuse to return an id:
>>> cookie = request.response.getCookie(bim.namespace)
>>> cookie['value'] = 'x' * len(cookie['value'])
>>> bim.getRequestId(request) is None
True
If another server is managing the ClientId cookies (Apache, Nginx)
we're returning their value without checking:
>>> bim.namespace = 'uid'
>>> bim.thirdparty = True
>>> request3 = HTTPRequest(BytesIO(), {}, None)
>>> request3._cookies = {'uid': 'AQAAf0Y4gjgAAAQ3AwMEAg=='}
>>> bim.getRequestId(request3)
'AQAAf0Y4gjgAAAQ3AwMEAg=='
"""
response_cookie = request.response.getCookie(self.namespace)
if response_cookie:
sid = response_cookie['value']
else:
request = IHTTPApplicationRequest(request)
sid = request.getCookies().get(self.namespace, None)
if self.thirdparty:
return sid
# If there is an id set on the response, use that but
# don't trust it. We need to check the response in case
# there has already been a new session created during the
# course of this request.
if sid is None or len(sid) != 54:
return None
s, mac = sid[:27], sid[27:]
# HMAC is specified to work on byte strings only so make
# sure to feed it that by encoding
mac_with_my_secret = hmac.new(self.secret.encode(), s.encode(),
digestmod=sha1).digest()
mac_with_my_secret = digestEncode(mac_with_my_secret).decode()
if mac_with_my_secret != mac:
return None
return sid
def setRequestId(self, request, id):
"""Set cookie with id on request.
This sets the response cookie:
See the examples in `getRequestId`.
Note that the id is checked for validity. Setting an
invalid value is silently ignored:
>>> from io import BytesIO
>>> from zope.publisher.http import HTTPRequest
>>> request = HTTPRequest(BytesIO(), {}, None)
>>> bim = CookieClientIdManager()
>>> bim.getRequestId(request)
>>> bim.setRequestId(request, 'invalid id')
>>> bim.getRequestId(request)
For now, the cookie path is the application URL:
>>> cookie = request.response.getCookie(bim.namespace)
>>> cookie['path'] == request.getApplicationURL(path_only=True)
True
By default, session cookies don't expire:
>>> 'expires' in cookie
False
Expiry time of 0 means never (well - close enough)
>>> bim.cookieLifetime = 0
>>> request = HTTPRequest(BytesIO(), {}, None)
>>> bid = bim.getClientId(request)
>>> cookie = request.response.getCookie(bim.namespace)
>>> cookie['expires']
'Tue, 19 Jan 2038 00:00:00 GMT'
A non-zero value means to expire after than number of seconds:
>>> bim.cookieLifetime = 3600
>>> request = HTTPRequest(BytesIO(), {}, None)
>>> bid = bim.getClientId(request)
>>> cookie = request.response.getCookie(bim.namespace)
>>> import email.utils
>>> c_expires = email.utils.parsedate(cookie['expires'])
>>> from datetime import datetime, timedelta
>>> expires = datetime(*c_expires[:7])
>>> now = datetime.utcnow()
>>> expires > now + timedelta(minutes=55)
True
If another server in front of Zope (Apache, Nginx) is managing the
cookies we won't set any ClientId cookies:
>>> request = HTTPRequest(BytesIO(), {}, None)
>>> bim.thirdparty = True
>>> from zope.testing.loggingsupport import InstalledHandler
>>> handler = InstalledHandler('zope.session.http')
>>> bim.setRequestId(request, '2345')
>>> handler.uninstall()
>>> len(handler.records)
1
>>> cookie = request.response.getCookie(bim.namespace)
>>> cookie
If the secure attribute is set to a true value, then the
secure cookie option is included.
>>> bim.thirdparty = False
>>> bim.cookieLifetime = None
>>> request = HTTPRequest(BytesIO(), {}, None)
>>> bim.secure = True
>>> bim.setRequestId(request, '1234')
>>> from pprint import pprint
>>> pprint(request.response.getCookie(bim.namespace))
{'path': '/', 'secure': True, 'value': '1234'}
If the domain is specified, it will be set as a cookie attribute.
>>> bim.domain = '.example.org'
>>> bim.setRequestId(request, '1234')
>>> cookie = request.response.getCookie(bim.namespace)
>>> print(cookie['domain'])
.example.org
When the cookie is set, cache headers are added to the
response to try to prevent the cookie header from being cached:
>>> request.response.getHeader('Cache-Control')
'no-cache="Set-Cookie,Set-Cookie2"'
>>> request.response.getHeader('Pragma')
'no-cache'
>>> request.response.getHeader('Expires')
'Mon, 26 Jul 1997 05:00:00 GMT'
If the httpOnly attribute is set to a true value, then the
HttpOnly cookie option is included.
>>> request = HTTPRequest(BytesIO(), {}, None)
>>> bim.secure = False
>>> bim.httpOnly = True
>>> bim.setRequestId(request, '1234')
>>> cookie = request.response.getCookie(bim.namespace)
>>> print(cookie['httponly'])
True
"""
# TODO: Currently, the path is the ApplicationURL. This is reasonable,
# and will be adequate for most purposes.
# A better path to use would be that of the folder that contains
# the site manager this service is registered within. However,
# that would be expensive to look up on each request, and would
# have to be altered to take virtual hosting into account.
# Seeing as this utility instance has a unique namespace for its
# cookie, using ApplicationURL shouldn't be a problem.
if self.thirdparty:
logger.warning('ClientIdManager is using thirdparty cookies, '
'ignoring setIdRequest call')
return
response = request.response
options = {}
if self.cookieLifetime is not None:
if self.cookieLifetime:
expires = formatdate(time.time() + self.cookieLifetime,
localtime=False, usegmt=True)
else:
expires = 'Tue, 19 Jan 2038 00:00:00 GMT'
options['expires'] = expires
if self.secure:
options['secure'] = True
if self.domain:
options['domain'] = self.domain
if self.httpOnly:
options['HttpOnly'] = True
response.setCookie(
self.namespace, id,
path=request.getApplicationURL(path_only=True),
**options)
response.setHeader(
'Cache-Control',
'no-cache="Set-Cookie,Set-Cookie2"')
response.setHeader('Pragma', 'no-cache')
response.setHeader('Expires', 'Mon, 26 Jul 1997 05:00:00 GMT')
def notifyVirtualHostChanged(event):
"""
Adjust cookie paths when
`zope.publisher.interfaces.http.IVirtualHostRequest` information
changes.
Given an event, this method should call a `CookieClientIdManager`'s
setRequestId if a cookie is present in the response for that manager. To
demonstrate we create a dummy manager object and event:
>>> from io import BytesIO
>>> @implementer(ICookieClientIdManager)
... class DummyManager(object):
... namespace = 'foo'
... thirdparty = False
... request_id = None
... def setRequestId(self, request, id):
... self.request_id = id
...
>>> manager = DummyManager()
>>> component.provideUtility(manager, IClientIdManager)
>>> from zope.publisher.http import HTTPRequest
>>> class DummyEvent (object):
... request = HTTPRequest(BytesIO(), {}, None)
>>> event = DummyEvent()
With no cookies present, the manager should not be called:
>>> notifyVirtualHostChanged(event)
>>> manager.request_id is None
True
However, when a cookie *has* been set, the manager is called so it can
update the cookie if need be:
>>> event.request.response.setCookie('foo', 'bar')
>>> notifyVirtualHostChanged(event)
>>> manager.request_id
'bar'
If a server in front of Zope manages the ClientIds (Apache, Nginx), we
don't need to take care about the cookies:
>>> manager2 = DummyManager()
>>> manager2.thirdparty = True
>>> event2 = DummyEvent()
However, when a cookie *has* been set, the manager is called so it can
update the cookie if need be:
>>> event2.request.response.setCookie('foo2', 'bar2')
>>> notifyVirtualHostChanged(event2)
>>> id = manager2.request_id
>>> id is None
True
Of course, if there is no request associated with the event,
nothing happens:
>>> event2.request = None
>>> notifyVirtualHostChanged(event2)
.. doctest::
:hide:
>>> import zope.component.testing
>>> zope.component.testing.tearDown()
"""
# the event sends us a IHTTPApplicationRequest, but we need a
# IHTTPRequest for the response attribute, and so does the cookie-
# manager.
request = IHTTPRequest(event.request, None)
if event.request is None:
return
for _name, manager in component.getUtilitiesFor(IClientIdManager):
if manager and ICookieClientIdManager.providedBy(manager):
# Third party ClientId Managers need no modification at all
if not manager.thirdparty:
cookie = request.response.getCookie(manager.namespace)
if cookie:
manager.setRequestId(request, cookie['value']) | zope.session | /zope.session-5.1.tar.gz/zope.session-5.1/src/zope/session/http.py | http.py |
"""Session implementation
"""
import base64
import random
import time
from collections import UserDict
from heapq import heapify
from heapq import heappop
from threading import get_ident
import persistent
import ZODB
import ZODB.MappingStorage
import zope.component
import zope.interface
import zope.location
import zope.minmax
from BTrees.OOBTree import OOBTree
from zope.interface.interfaces import ComponentLookupError
from zope.publisher.interfaces import IRequest
from zope.session.interfaces import IClientId
from zope.session.interfaces import IClientIdManager
from zope.session.interfaces import ISession
from zope.session.interfaces import ISessionData
from zope.session.interfaces import ISessionDataContainer
from zope.session.interfaces import ISessionPkgData
transtable = bytes.maketrans(b'+/', b'-.')
def digestEncode(s):
"""Encode SHA digest for cookie."""
return base64.encodebytes(s)[:-2].translate(transtable)
@zope.interface.implementer(IClientId)
@zope.component.adapter(IRequest)
class ClientId(str):
"""
Default implementation of `zope.session.interfaces.IClientId`.
.. doctest::
:hide:
>>> from zope.publisher.http import HTTPRequest
>>> from io import BytesIO
>>> from zope.session.interfaces import IClientIdManager
>>> from zope.session.http import CookieClientIdManager
>>> zope.component.provideUtility(
... CookieClientIdManager(), IClientIdManager)
`ClientId` objects for the same request should be equal:
>>> request = HTTPRequest(BytesIO(), {}, None)
>>> id1 = ClientId(request)
>>> id2 = ClientId(request)
>>> id1 == id2
True
.. doctest::
:hide:
>>> from zope.testing import cleanup
>>> cleanup.tearDown()
"""
def __new__(cls, request):
id_manager = zope.component.getUtility(IClientIdManager)
cid = id_manager.getClientId(request)
return str.__new__(cls, cid)
@zope.interface.implementer(ISessionDataContainer)
class PersistentSessionDataContainer(zope.location.Location,
persistent.Persistent,
UserDict):
"""
A `zope.session.interfaces.ISessionDataContainer` that stores data in the
ZODB.
"""
_v_last_sweep = 0 # Epoch time sweep last run
disable_implicit_sweeps = False
def __init__(self):
self.data = OOBTree()
self.timeout = 1 * 60 * 60
# The "resolution" should be a small fraction of the timeout.
self.resolution = 10 * 60
def __getitem__(self, pkg_id):
"""Retrieve an `zope.session.interfaces.ISessionData`
>>> sdc = PersistentSessionDataContainer()
>>> sdc.timeout = 60
>>> sdc.resolution = 3
>>> sdc['clientid'] = sd = SessionData()
To ensure stale data is removed, we can wind
back the clock using undocumented means...
>>> sd.setLastAccessTime(sd.getLastAccessTime() - 64)
>>> sdc._v_last_sweep = sdc._v_last_sweep - 4
Now the data should be garbage collected
>>> sdc['clientid']
Traceback (most recent call last):
[...]
KeyError: 'clientid'
Can you disable the automatic removal of stale data.
>>> sdc.disable_implicit_sweeps = True
>>> sdc['stale'] = stale = SessionData()
Now we try the same method of winding back the clock.
>>> stale.setLastAccessTime(sd.getLastAccessTime() - 64)
>>> sdc._v_last_sweep = sdc._v_last_sweep - 4
But the data is not automatically removed.
>>> sdc['stale'] is stale
True
We can manually remove stale data by calling sweep() if stale
data isn't being automatically removed.
>>> stale.setLastAccessTime(sd.getLastAccessTime() - 64)
>>> sdc.sweep()
>>> sdc['stale']
Traceback (most recent call last):
[...]
KeyError: 'stale'
Now we turn automatic removal back on.
>>> sdc.disable_implicit_sweeps = False
Ensure the ``lastAccessTime`` on the `.ISessionData` is being updated
occasionally. The `.ISessionDataContainer` maintains this whenever
the `.ISessionData` is set or retrieved.
``lastAccessTime`` on the ``ISessionData`` is set when it is added
to the ``ISessionDataContainer``
>>> sdc['client_id'] = sd = SessionData()
>>> sd.getLastAccessTime() > 0
True
The ``lastAccessTime`` is also updated whenever the ``ISessionData``
is retrieved through the ``ISessionDataContainer``, at most
once every ``resolution`` seconds.
>>> then = sd.getLastAccessTime() - 4
>>> sd.setLastAccessTime(then)
>>> now = sdc['client_id'].getLastAccessTime()
>>> now > then
True
>>> time.sleep(1)
>>> now == sdc['client_id'].getLastAccessTime()
True
Ensure the ``lastAccessTime`` is not modified and no garbage collection
occurs when timeout == 0. We test this by faking a stale
``ISessionData`` object.
>>> sdc.timeout = 0
>>> sd.setLastAccessTime(sd.getLastAccessTime() - 5000)
>>> lastAccessTime = sd.getLastAccessTime()
>>> sdc['client_id'].getLastAccessTime() == lastAccessTime
True
Next, we test session expiration functionality beyond transactions.
>>> import transaction
>>> from ZODB.DB import DB
>>> from ZODB.DemoStorage import DemoStorage
>>> sdc = PersistentSessionDataContainer()
>>> sdc.timeout = 60
>>> sdc.resolution = 3
>>> db = DB(DemoStorage('test_storage'))
>>> c = db.open()
>>> c.root()['sdc'] = sdc
>>> sdc['pkg_id'] = sd = SessionData()
>>> sd['name'] = 'bob'
>>> transaction.commit()
Access immediately. the data should be accessible.
>>> c.root()['sdc']['pkg_id']['name']
'bob'
Change the clock time and stale the session data.
>>> sdc = c.root()['sdc']
>>> sd = sdc['pkg_id']
>>> sd.setLastAccessTime(sd.getLastAccessTime() - 64)
>>> sdc._v_last_sweep = sdc._v_last_sweep - 4
>>> transaction.commit()
The data should be garbage collected.
>>> c.root()['sdc']['pkg_id']['name']
Traceback (most recent call last):
[...]
KeyError: 'pkg_id'
Then abort transaction and access the same data again.
The previous GC was cancelled, but deadline is over.
The data should be garbage collected again.
>>> transaction.abort()
>>> c.root()['sdc']['pkg_id']['name']
Traceback (most recent call last):
[...]
KeyError: 'pkg_id'
Cleanup:
>>> transaction.abort()
>>> c.close()
"""
if self.timeout == 0:
return UserDict.__getitem__(self, pkg_id)
now = time.time()
# TODO: When scheduler exists, sweeping should be done by
# a scheduled job since we are currently busy handling a
# request and may end up doing simultaneous sweeps
# If transaction is aborted after sweep. _v_last_sweep keep
# incorrect sweep time. So when self.data is ghost, revert the time
# to the previous _v_last_sweep time(_v_old_sweep).
if self.data._p_state < 0:
try:
self._v_last_sweep = self._v_old_sweep
del self._v_old_sweep
except AttributeError:
pass
if (self._v_last_sweep + self.resolution < now
and not self.disable_implicit_sweeps):
self.sweep()
if getattr(self, '_v_old_sweep', None) is None:
self._v_old_sweep = self._v_last_sweep
self._v_last_sweep = now
rv = UserDict.__getitem__(self, pkg_id)
# Only update the lastAccessTime once every few minutes, rather than
# every hit, to avoid ZODB bloat and conflicts
if rv.getLastAccessTime() + self.resolution < now:
rv.setLastAccessTime(int(now))
return rv
def __setitem__(self, pkg_id, session_data):
"""Set an `zope.session.interfaces.ISessionData`
>>> sdc = PersistentSessionDataContainer()
>>> sad = SessionData()
``__setitem__`` sets the ``ISessionData``'s ``lastAccessTime``
>>> sad.getLastAccessTime()
0
>>> sdc['1'] = sad
>>> 0 < sad.getLastAccessTime() <= time.time()
True
We can retrieve the same object we put in
>>> sdc['1'] is sad
True
"""
session_data.setLastAccessTime(int(time.time()))
return UserDict.__setitem__(self, pkg_id, session_data)
def sweep(self):
"""Clean out stale data
>>> sdc = PersistentSessionDataContainer()
>>> sdc['1'] = SessionData()
>>> sdc['2'] = SessionData()
Wind back the clock on one of the
`zope.session.interfaces.ISessionData`'s so it gets garbage collected
>>> sdc['2'].setLastAccessTime(
... sdc['2'].getLastAccessTime() - sdc.timeout * 2)
Sweep should leave '1' and remove '2'
>>> sdc.sweep()
>>> sd1 = sdc['1']
>>> sd2 = sdc['2']
Traceback (most recent call last):
[...]
KeyError: '2'
"""
# We only update the lastAccessTime every 'resolution' seconds.
# To compensate for this, we factor in the resolution when
# calculating the expiry time to ensure that we never remove
# data that has been accessed within timeout seconds.
expire_time = time.time() - self.timeout - self.resolution
heap = [(v.getLastAccessTime(), k) for k, v in self.data.items()]
heapify(heap)
while heap:
lastAccessTime, key = heappop(heap)
if lastAccessTime < expire_time:
del self.data[key]
else:
return
class RAMSessionDataContainer(PersistentSessionDataContainer):
"""
A `zope.session.interfaces.ISessionDataContainer` that stores data in RAM.
Currently session data is not shared between Zope clients, so
server affinity will need to be maintained to use this in a ZEO cluster.
>>> sdc = RAMSessionDataContainer()
>>> sdc['1'] = SessionData()
>>> sdc['1'] is sdc['1']
True
>>> ISessionData.providedBy(sdc['1'])
True
"""
def __init__(self):
self.resolution = 5 * 60
self.timeout = 1 * 60 * 60
# Something unique
self.key = '{}.{}.{}'.format(time.time(), random.random(), id(self))
_ram_storage = ZODB.MappingStorage.MappingStorage()
_ram_db = ZODB.DB(_ram_storage)
_conns = {}
def _getData(self):
# Open a connection to _ram_storage per thread
tid = get_ident()
if tid not in self._conns:
self._conns[tid] = self._ram_db.open()
root = self._conns[tid].root()
if self.key not in root:
root[self.key] = OOBTree()
return root[self.key]
data = property(_getData, None)
def sweep(self):
super().sweep()
self._ram_db.pack(time.time())
@zope.interface.implementer(ISession)
@zope.component.adapter(IRequest)
class Session:
"""
Default implementation of `zope.session.interfaces.ISession`
"""
def __init__(self, request):
self.client_id = str(IClientId(request))
def _sdc(self, pkg_id):
# Locate the ISessionDataContainer by looking up the named
# Utility, and falling back to the unnamed one.
try:
return zope.component.getUtility(ISessionDataContainer, pkg_id)
except ComponentLookupError:
return zope.component.getUtility(ISessionDataContainer)
def get(self, pkg_id, default=None):
"""
Get session data.
.. doctest::
:hide:
>>> from zope.publisher.interfaces import IRequest
>>> from zope.publisher.http import HTTPRequest
>>> from io import BytesIO
>>> from zope.session.interfaces import IClientIdManager, IClientId
>>> from zope.session.interfaces import ISessionDataContainer
>>> from zope.session.http import CookieClientIdManager
>>> zope.component.provideUtility(
... CookieClientIdManager(), IClientIdManager)
>>> zope.component.provideAdapter(ClientId, (IRequest,), IClientId)
>>> sdc = PersistentSessionDataContainer()
>>> zope.component.provideUtility(sdc, ISessionDataContainer, '')
If we use `get` we get `None` or *default* returned if the *pkg_id*
is not there:
>>> request = HTTPRequest(BytesIO(), {}, None)
>>> session = Session(request).get('not.there', 'default')
>>> session
'default'
This method is lazy and does not create the session data:
>>> session = Session(request).get('not.there')
>>> session is None
True
The ``__getitem__`` method instead creates the data:
>>> session = Session(request)['not.there']
>>> session is None
False
>>> session = Session(request).get('not.there')
>>> session is None
False
.. doctest::
:hide:
>>> import zope.testing.cleanup
>>> zope.testing.cleanup.tearDown()
"""
# The ISessionDataContainer contains two levels:
# ISessionDataContainer[client_id] == ISessionData
# ISessionDataContainer[client_id][pkg_id] == ISessionPkgData
sdc = self._sdc(pkg_id)
try:
sd = sdc[self.client_id]
except KeyError:
return default
return sd.get(pkg_id, default)
def __getitem__(self, pkg_id):
"""
Get or create session data.
.. doctest::
:hide:
>>> from zope.publisher.interfaces import IRequest
>>> from zope.publisher.http import HTTPRequest
>>> from io import BytesIO
>>> from zope.session.interfaces import IClientIdManager, IClientId
>>> from zope.session.http import CookieClientIdManager
>>> from zope.session.interfaces import ISessionDataContainer
>>> zope.component.provideUtility(
... CookieClientIdManager(), IClientIdManager)
>>> zope.component.provideAdapter(ClientId, (IRequest,), IClientId)
>>> sdc = PersistentSessionDataContainer()
>>> for product_id in ('', 'products.foo', 'products.bar'):
... zope.component.provideUtility(
... sdc, ISessionDataContainer, product_id)
Setup some sessions, each with a distinct namespace:
>>> request = HTTPRequest(BytesIO(), {}, None)
>>> request2 = HTTPRequest(BytesIO(), {}, None)
>>> ISession.providedBy(Session(request))
True
>>> session1 = Session(request)['products.foo']
>>> session2 = Session(request)['products.bar']
>>> session3 = Session(request2)['products.bar']
If we use the same parameters, we should retrieve the
same object:
>>> session1 is Session(request)['products.foo']
True
Make sure it returned sane values:
>>> ISessionPkgData.providedBy(session1)
True
Make sure that pkg_ids don't share a namespace:
>>> session1['color'] = 'red'
>>> session2['color'] = 'blue'
>>> session3['color'] = 'vomit'
>>> session1['color']
'red'
>>> session2['color']
'blue'
>>> session3['color']
'vomit'
.. doctest::
:hide:
>>> from zope.testing import cleanup
>>> cleanup.tearDown()
"""
sdc = self._sdc(pkg_id)
# The ISessionDataContainer contains two levels:
# ISessionDataContainer[client_id] == ISessionData
# ISessionDataContainer[client_id][pkg_id] == ISessionPkgData
try:
sd = sdc[self.client_id]
except KeyError:
sd = sdc[self.client_id] = SessionData()
try:
return sd[pkg_id]
except KeyError:
spd = sd[pkg_id] = SessionPkgData()
return spd
# These methods, part of the sequence protocol, are implemented to
# raise exceptions. If they are not implemented and the `in`
# operator is used, then __getitem__ is called with integer keys
# until IndexError is raised...and since __getitem__ auto-creates
# any requested keys, that can never happen, leaving us in an
# infinite loop. The __contains__ method takes precedence over
# __iter__, but for consistency and BWC we must also not be iterable.
# They raise two different exceptions for BWC as well
def __iter__(self):
# Section 5.9 of the language spec says:
# > for classes which do not define __contains__() but do define
# >__iter__() [the object is iterated]. If an exception is
# > raised during the iteration, it is as if in raised that exception.
# However, CPython turns NotImplementedError into a TypeError, but
# PyPy lets it propagate (like the spec says). Now that we implement
# __contains__, we emulate this behaviour both places.
raise NotImplementedError
def __contains__(self, x):
raise TypeError
@zope.interface.implementer(ISessionData)
class SessionData(persistent.Persistent, UserDict):
"""
Default implementation of `zope.session.interfaces.ISessionData`
>>> session = SessionData()
>>> ISessionData.providedBy(session)
True
>>> session.getLastAccessTime()
0
Before the `zope.minmax.Maximum` object this class used to have an
attribute ``lastAccessTime`` initialized in the class itself to zero. To
avoid changing the interface, that attribute has been turned into a
property. This part tests the behavior of a legacy session which would
have the lastAccessTime attribute loaded from the database. The
implementation should work for that case as well as with the new session
where ``lastAccessTime`` is a property. These tests will be removed in a
later release (see the comments in the code below).
First, create an instance of `SessionData` and remove a protected attribute
``_lastAccessTime`` from it to make it more like the legacy `SessionData`.
The subsequent attempt to get ``lastAccessTime`` will return a 0, because
the ``lastAccessTime`` is not there and the dictionary returns the default
value zero supplied to its `get` method.
>>> legacy_session = SessionData()
>>> del legacy_session._lastAccessTime
>>> legacy_session.getLastAccessTime()
0
Now, artificially add ``lastAccessTime`` to the instance's dictionary. This
should make it exactly like the legacy `SessionData`.
>>> legacy_session.__dict__['lastAccessTime'] = 42
>>> legacy_session.getLastAccessTime()
42
Finally, assign to ``lastAccessTime``. Since the instance now looks like a
legacy instance, this will trigger, through the property mechanism, a
creation of a `zope.minmax.Maximum` object which will take over the
handling of this value and its conflict resolution from now on.
>>> legacy_session.setLastAccessTime(13)
>>> legacy_session._lastAccessTime.value
13
"""
# this is for support of legacy sessions; this comment and
# the next line will be removed in a later release
_lastAccessTime = None
def __init__(self):
self.data = OOBTree()
self._lastAccessTime = zope.minmax.Maximum(0)
# we include this for parallelism with setLastAccessTime
def getLastAccessTime(self):
# this conditional is for legacy sessions; this comment and
# the next two lines will be removed in a later release
if self._lastAccessTime is None:
return self.__dict__.get('lastAccessTime', 0)
return self._lastAccessTime.value
# we need to set this value with setters in order to get optimal conflict
# resolution behavior
def setLastAccessTime(self, value):
# this conditional is for legacy sessions; this comment and
# the next two lines will be removed in a later release
if self._lastAccessTime is None:
self._lastAccessTime = zope.minmax.Maximum(0)
self._lastAccessTime.value = value
lastAccessTime = property(fget=getLastAccessTime,
fset=setLastAccessTime, # consider deprecating
doc='integer value of the last access time')
@zope.interface.implementer(ISessionPkgData)
class SessionPkgData(persistent.Persistent, UserDict):
"""
Default implementation of `zope.session.interfaces.ISessionPkgData`
>>> session = SessionPkgData()
>>> ISessionPkgData.providedBy(session)
True
"""
# Note that this does not extend persistent.mapping.PersistentDict
# (which is also a UserDict subclass); we don't mark ourself
# modified when keys in our data are assigned/deleted
def __init__(self):
self.data = OOBTree() | zope.session | /zope.session-5.1.tar.gz/zope.session-5.1/src/zope/session/session.py | session.py |
========================
Using ``zope.session``
========================
.. currentmodule:: zope.session.interfaces
Overview
========
.. caution::
Session data is maintained on the server. This gives a security
advantage in that we can assume that a client has not tampered with
the data. However, this can have major implications for scalability
as modifying session data too frequently can put a significant load
on servers and in extreme situations render your site unusable.
Developers should keep this in mind when writing code or risk
problems when their application is run in a production environment.
Applications requiring write-intensive session implementations (such
as page counters) should consider using cookies or specialized
session implementations.
Setup
-----
This package provides a ``configure.zcml`` for use with
`zope.configuration.xmlconfig` that provides the default adapters for
`IClientId` (`.ClientId`), `ISession` (`.Session`) and the
`zope.traversing.interfaces.IPathAdapter` named ``session``.
It also provides ``zope.security`` declarations and marks
`.CookieClientIdManager` and `.PersistentSessionDataContainer` as
implementing `zope.annotation.interfaces.IAttributeAnnotatable` if
that package is installed.
This document assumes that configuration has been completed:
>>> from zope.configuration import xmlconfig
>>> import zope.session
>>> _ = xmlconfig.file('configure.zcml', zope.session)
Note that it does **not** install any `ISessionDataContainer`
or `IClientIdManager` utilities. We do that manually:
>>> from zope.component import provideUtility
>>> from zope.session.interfaces import IClientIdManager
>>> from zope.session.interfaces import ISessionDataContainer
>>> from zope.session.http import CookieClientIdManager
>>> from zope.session.session import RAMSessionDataContainer
>>> provideUtility(CookieClientIdManager(), IClientIdManager)
>>> sdc = RAMSessionDataContainer()
>>> for product_id in ('', 'products.foo', 'products.bar'):
... provideUtility(sdc, ISessionDataContainer, product_id)
Sessions
--------
Sessions allow us to fake state over a stateless protocol - HTTP. We
do this by having a unique identifier stored across multiple HTTP
requests, be it a cookie or some id mangled into the URL.
The `IClientIdManager` Utility provides this unique id. It is
responsible for propagating this id so that future requests from the
client get the same id (eg. by setting an HTTP cookie). This utility
is used when we adapt the request to the unique client id:
>>> from zope.session.interfaces import IClientId
>>> from zope.publisher.http import HTTPRequest
>>> from io import BytesIO
>>> request = HTTPRequest(BytesIO(), {}, None)
>>> client_id = IClientId(request)
The `ISession` adapter gives us a mapping that can be used to store
and retrieve session data. A unique key (the package id) is used to
avoid namespace clashes:
>>> from zope.session.interfaces import ISession
>>> pkg_id = 'products.foo'
>>> session = ISession(request)[pkg_id]
>>> session['color'] = 'red'
>>> session2 = ISession(request)['products.bar']
>>> session2['color'] = 'blue'
>>> session['color']
'red'
>>> session2['color']
'blue'
Data Storage
============
The actual data is stored in an `ISessionDataContainer` utility.
`ISession` chooses which `ISessionDataContainer` should be used by
looking up as a named utility using the package id. This allows the
site administrator to configure where the session data is actually
stored by adding a registration for desired `ISessionDataContainer`
with the correct name.
>>> import zope.component
>>> from zope.session.interfaces import ISessionDataContainer
>>> sdc = zope.component.getUtility(ISessionDataContainer, pkg_id)
>>> sdc[client_id][pkg_id] is session
True
>>> sdc[client_id][pkg_id]['color']
'red'
If no `ISessionDataContainer` utility can be located by name using the
package id, then the unnamed `ISessionDataContainer` utility is used
as a fallback.
>>> ISession(request)['unknown'] \
... is zope.component.getUtility(ISessionDataContainer)[client_id]\
... ['unknown']
True
The `ISessionDataContainer` contains `ISessionData` objects, and
`ISessionData` objects in turn contain `ISessionPkgData` objects. You
should never need to know this unless you are writing administrative
views for the session machinery.
>>> from zope.session.interfaces import ISessionData
>>> from zope.session.interfaces import ISessionPkgData
>>> ISessionData.providedBy(sdc[client_id])
True
>>> ISessionPkgData.providedBy(sdc[client_id][pkg_id])
True
The `ISessionDataContainer` is responsible for expiring session data.
The expiry time can be configured by settings its ``timeout``
attribute.
>>> sdc.timeout = 1200 # 1200 seconds or 20 minutes
Restrictions
============
Data stored in the session must be persistent or picklable. (Exactly
which builtin and standard objects can be pickled depends on the
Python version, the Python implementation, and the ZODB version, so we
demonstrate with a custom object.)
>>> import transaction
>>> class NoPickle(object):
... def __reduce__(self):
... raise TypeError("I cannot be pickled")
>>> session['oops'] = NoPickle()
>>> transaction.commit()
Traceback (most recent call last):
[...]
TypeError: I cannot be pickled
Page Templates
==============
Session data may be accessed in page template documents using TALES
thanks to the ``session`` path adapter:
.. code-block:: xml
<span tal:content="request/session:products.foo/color | default">
green
</span>
or:
.. code-block:: xml
<div tal:define="session request/session:products.foo">
<script type="text/server-python">
try:
session['count'] += 1
except KeyError:
session['count'] = 1
</script>
<span tal:content="session/count" />
</div>
Session Timeout
===============
Sessions have a timeout (defaulting to an hour, in seconds).
>>> import zope.session.session
>>> data_container = zope.session.session.PersistentSessionDataContainer()
>>> data_container.timeout
3600
We need to keep up with when the session was last used (to know when
it needs to be expired), but it would be too resource-intensive to
write the last access time every, single time the session data is
touched. The session machinery compromises by only recording the last
access time periodically. That period is called the "resolution". That
also means that if the last-access-time + the-resolution < now, then
the session is considered to have timed out.
The default resolution is 10 minutes (600 seconds), meaning that a
user's session will actually time out sometime between 50 and 60
minutes.
>>> data_container.resolution
600
.. testcleanup::
transaction.abort()
from zope.testing import cleanup
cleanup.tearDown()
| zope.session | /zope.session-5.1.tar.gz/zope.session-5.1/docs/api.rst | api.rst |
============================
Sessions and Design Issues
============================
Sessions provide a way to temporarily associate information with a
client without requiring the authentication of a principal. We
associate an identifier with a particular client. Whenever we get a
request from that client, we compute the identifier and use the
identifier to look up associated information, which is stored on the
server.
A major disadvantage of sessions is that they require management of
information on the server. This can have major implications for
scalability. It is possible for a framework to make use of session
data very easy for the developer. This is great if scalability is not
an issue, otherwise, it is a booby trap.
Sessions introduce a number of issues to be considered.
Client Identification
=====================
Clients have to be identified. A number of approaches are possible,
including:
Using HTTP cookies
The application assigns a client identifier,
which is stored in a cookie. This technique is the most
straightforward, but can be defeated if the client does not support
HTTP cookies (usually because the feature has been disabled).
Using URLs.
The application assigns a client identifier, which is
stored in the URL. This makes URLs a bit uglier and requires some
care. If people copy URLs and send them to others, then you could
end up with multiple clients with the same session identifier. There
are a number of ways to reduce the risk of accidental reuse of
session identifiers:
- Embed the client IP address in the identifier
- Expire the identifier
Use hidden form variables.
This complicates applications. It
requires all requests to be POST requests and requires the
maintenance of the hidden variables.
Use the client IP address.
This doesn't work very well, because an IP address may be shared by
many clients.
Data Storage
============
Data can be simply stored in the object database. This provides lots
of flexibility. You can store pretty much anything you want as long as
it is persistent. You get the full benefit of the object database,
such as transactions, transparency, clustering, and so on. Using the
object database is especially useful when:
- Writes are infrequent
- Data are complex
If writes are frequent, then the object database introduces
scalability problems. Really, any transactional database is likely to
introduce problems with frequent writes. If you are tempted to update
session data on every request, think very hard about it. You are
creating a scalability problem.
If you know that scalability is not (and never will be) an issue,
you can just use the object database.
If you have client data that needs to be updated often (as in every
request), consider storing the data on the client. (Like all data
received from a client, it may be tainted and, in most instances,
should not be trusted. Sensitive information that the user should not
see should likewise not be stored on the client, unless encrypted with
a key the client has no access to.) If you can't store it on the
client, then consider some other storage mechanism, like a fast
database, possibly without transaction support.
You may be tempted to store session data in memory for speed. This
doesn't turn out to work very well. If you need scalability, then you
need to be able to use an application-server cluster and storage of
session data in memory defeats that. You can use "server-affinity" to
assure that requests from a client always go back to the same server,
but not all load balancers support server affinity, and, for those
that do, enabling server affinity tends to defeat load balancing.
Session Expiration
==================
You may wish to ensure that sessions terminate after some period of
time. This may be for security reasons, or to avoid accidental sharing
of a session among multiple clients. The policy might be expressed in
terms of total session time, or maximum inactive time, or some
combination.
There are a number of ways to approach this. You can expire client
identifiers. You can expire session data.
Data Expiration
===============
Because HTTP is a stateless protocol, you can't tell whether a user is
thinking about a task or has simply stopped working on it. Some means
is needed to free server session storage that is no-longer needed.
The simplest strategy is to never remove data. This strategy has some
obvious disadvantages. Other strategies can be viewed as optimizations
of the basic strategy. It is important to realize that a data
expiration strategy can be informed by, but need not be constrained by
a session-expiration strategy.
| zope.session | /zope.session-5.1.tar.gz/zope.session-5.1/docs/design.rst | design.rst |
Sites and Local Site Managers
=============================
This is an introduction of location-based component architecture.
Creating and Accessing Sites
----------------------------
*Sites* are used to provide custom component setups for parts of your
application or web site. Every folder:
>>> from zope.site import folder
>>> myfolder = folder.rootFolder()
has the potential to become a site:
>>> from zope.component.interfaces import ISite, IPossibleSite
>>> IPossibleSite.providedBy(myfolder)
True
but is not yet one:
>>> ISite.providedBy(myfolder)
False
If you would like your custom content component to be able to become a site,
you can use the `SiteManagerContainer` mix-in class:
>>> from zope import site
>>> class MyContentComponent(site.SiteManagerContainer):
... pass
>>> myContent = MyContentComponent()
>>> IPossibleSite.providedBy(myContent)
True
>>> ISite.providedBy(myContent)
False
To convert a possible site to a real site, we have to provide a site manager:
>>> sm = site.LocalSiteManager(myfolder)
>>> myfolder.setSiteManager(sm)
>>> ISite.providedBy(myfolder)
True
>>> myfolder.getSiteManager() is sm
True
Note that an event is generated when a local site manager is created:
>>> from zope.component.eventtesting import getEvents
>>> from zope.site.interfaces import INewLocalSite
>>> [event] = getEvents(INewLocalSite)
>>> event.manager is sm
True
If one tries to set a bogus site manager, a `ValueError` will be raised:
>>> myfolder2 = folder.Folder()
>>> myfolder2.setSiteManager(object)
Traceback (most recent call last):
...
ValueError: setSiteManager requires an IComponentLookup
If the possible site has been changed to a site already, a `TypeError`
is raised when one attempts to add a new site manager:
>>> myfolder.setSiteManager(site.LocalSiteManager(myfolder))
Traceback (most recent call last):
...
TypeError: Already a site
There is also an adapter you can use to get the next site manager from any
location:
>>> myfolder['mysubfolder'] = folder.Folder()
>>> import zope.interface.interfaces
>>> zope.interface.interfaces.IComponentLookup(myfolder['mysubfolder']) is sm
True
If the location passed is a site, the site manager of that site is returned:
>>> zope.interface.interfaces.IComponentLookup(myfolder) is sm
True
Using the Site Manager
----------------------
A site manager contains several *site management folders*, which are used to
logically organize the software. When a site manager is initialized, a default
site management folder is created:
>>> sm = myfolder.getSiteManager()
>>> default = sm['default']
>>> default.__class__
<class 'zope.site.site.SiteManagementFolder'>
However, you can tell not to create the default site manager folder on
LocalSiteManager creation:
>>> nodefault = site.LocalSiteManager(myfolder, default_folder=False)
>>> 'default' in nodefault
False
Also, note that when creating LocalSiteManager, its __parent__ is set to
site that was passed to constructor and the __name__ is set to ++etc++site.
>>> nodefault.__parent__ is myfolder
True
>>> nodefault.__name__ == '++etc++site'
True
You can easily create a new site management folder:
>>> sm['mySMF'] = site.SiteManagementFolder()
>>> sm['mySMF'].__class__
<class 'zope.site.site.SiteManagementFolder'>
Once you have your site management folder -- let's use the default one -- we
can register some components. Let's start with a utility (we define it
in a ``__module__`` that can be pickled):
>>> import zope.interface
>>> __name__ = 'zope.site.tests'
>>> class IMyUtility(zope.interface.Interface):
... pass
>>> import persistent
>>> from zope.container.contained import Contained
>>> @zope.interface.implementer(IMyUtility)
... class MyUtility(persistent.Persistent, Contained):
... def __init__(self, title):
... self.title = title
... def __repr__(self):
... return "%s('%s')" %(self.__class__.__name__, self.title)
Now we can create an instance of our utility and put it in the site
management folder and register it:
>>> myutil = MyUtility('My custom utility')
>>> default['myutil'] = myutil
>>> sm.registerUtility(myutil, IMyUtility, 'u1')
Now we can ask the site manager for the utility:
>>> sm.queryUtility(IMyUtility, 'u1')
MyUtility('My custom utility')
Of course, the local site manager has also access to the global component
registrations:
>>> gutil = MyUtility('Global Utility')
>>> from zope.component import getGlobalSiteManager
>>> gsm = getGlobalSiteManager()
>>> gsm.registerUtility(gutil, IMyUtility, 'gutil')
>>> sm.queryUtility(IMyUtility, 'gutil')
MyUtility('Global Utility')
Next let's see whether we can also successfully register an adapter as
well. Here the adapter will provide the size of a file:
>>> class IFile(zope.interface.Interface):
... pass
>>> class ISized(zope.interface.Interface):
... pass
>>> @zope.interface.implementer(IFile)
... class File(object):
... pass
>>> @zope.interface.implementer(ISized)
... class FileSize(object):
... def __init__(self, context):
... self.context = context
Now that we have the adapter we need to register it:
>>> sm.registerAdapter(FileSize, [IFile])
Finally, we can get the adapter for a file:
>>> file = File()
>>> size = sm.queryAdapter(file, ISized, name='')
>>> isinstance(size, FileSize)
True
>>> size.context is file
True
By the way, once you set a site
>>> from zope.component import hooks
>>> hooks.setSite(myfolder)
you can simply use the zope.component's `getSiteManager()` method to get
the nearest site manager:
>>> from zope.component import getSiteManager
>>> getSiteManager() is sm
True
This also means that you can simply use zope.component to look up your utility
>>> from zope.component import getUtility
>>> getUtility(IMyUtility, 'gutil')
MyUtility('Global Utility')
or the adapter via the interface's `__call__` method:
>>> size = ISized(file)
>>> isinstance(size, FileSize)
True
>>> size.context is file
True
Multiple Sites
--------------
Until now we have only dealt with one local and the global site. But things
really become interesting, once we have multiple sites. We can override other
local configuration.
This behaviour uses the notion of location, therefore we need to configure the
zope.location package first:
>>> import zope.configuration.xmlconfig
>>> _ = zope.configuration.xmlconfig.string("""
... <configure xmlns="http://namespaces.zope.org/zope">
... <include package="zope.component" file="meta.zcml"/>
... <include package="zope.location" />
... </configure>
... """)
Let's now create a new folder called `folder11`, add it to `myfolder` and make
it a site:
>>> myfolder11 = folder.Folder()
>>> myfolder['myfolder11'] = myfolder11
>>> myfolder11.setSiteManager(site.LocalSiteManager(myfolder11))
>>> sm11 = myfolder11.getSiteManager()
If we ask the second site manager for its next, we get
>>> sm11.__bases__ == (sm, )
True
and the first site manager should have the folling sub manager:
>>> sm.subs == (sm11,)
True
If we now register a second utility with the same name and interface with the
new site manager folder,
>>> default11 = sm11['default']
>>> myutil11 = MyUtility('Utility, uno & uno')
>>> default11['myutil'] = myutil11
>>> sm11.registerUtility(myutil11, IMyUtility, 'u1')
then it will will be available in the second site manager
>>> sm11.queryUtility(IMyUtility, 'u1')
MyUtility('Utility, uno & uno')
but not in the first one:
>>> sm.queryUtility(IMyUtility, 'u1')
MyUtility('My custom utility')
It is also interesting to look at the use cases of moving and copying a
site. To do that we create a second root folder and make it a site, so that
site hierarchy is as follows:
::
_____ global site _____
/ \
myfolder myfolder2
|
myfolder11
>>> myfolder2 = folder.rootFolder()
>>> myfolder2.setSiteManager(site.LocalSiteManager(myfolder2))
Before we can move or copy sites, we need to register two event subscribers
that manage the wiring of site managers after moving or copying:
>>> import zope.lifecycleevent.interfaces
>>> gsm.registerHandler(
... site.changeSiteConfigurationAfterMove,
... (ISite, zope.lifecycleevent.interfaces.IObjectMovedEvent),
... )
We only have to register one event listener, since the copy action causes an
`IObjectAddedEvent` to be created, which is just a special type of
`IObjectMovedEvent`.
First, make sure that everything is setup correctly in the first place:
>>> myfolder11.getSiteManager().__bases__ == (myfolder.getSiteManager(), )
True
>>> myfolder.getSiteManager().subs[0] is myfolder11.getSiteManager()
True
>>> myfolder2.getSiteManager().subs
()
Let's now move ``myfolder11`` from ``myfolder`` to ``myfolder2``:
>>> myfolder2['myfolder21'] = myfolder11
>>> del myfolder['myfolder11']
Now the next site manager for ``myfolder11``'s site manager should have changed:
>>> myfolder21 = myfolder11
>>> myfolder21.getSiteManager().__bases__ == (myfolder2.getSiteManager(), )
True
>>> myfolder2.getSiteManager().subs[0] is myfolder21.getSiteManager()
True
>>> myfolder.getSiteManager().subs
()
Make sure that our interfaces and classes are picklable:
>>> import sys
>>> sys.modules['zope.site.tests'].IMyUtility = IMyUtility
>>> sys.modules['zope.site.tests'].MyUtility = MyUtility
>>> from pickle import dumps, loads
>>> data = dumps(myfolder2['myfolder21'])
>>> myfolder['myfolder11'] = loads(data)
>>> myfolder11 = myfolder['myfolder11']
>>> myfolder11.getSiteManager().__bases__ == (myfolder.getSiteManager(), )
True
>>> myfolder.getSiteManager().subs[0] is myfolder11.getSiteManager()
True
>>> myfolder2.getSiteManager().subs[0] is myfolder21.getSiteManager()
True
Finally, let's check that everything works fine when our folder is moved
to the folder that doesn't contain any site manager. Our folder's
sitemanager's bases should be set to global site manager.
>>> myfolder11.getSiteManager().__bases__ == (myfolder.getSiteManager(), )
True
>>> nosm = folder.Folder()
>>> nosm['root'] = myfolder11
>>> myfolder11.getSiteManager().__bases__ == (gsm, )
True
Deleting a site unregisters its site manger from its parent site manager:
>>> del myfolder2['myfolder21']
>>> myfolder2.getSiteManager().subs
()
The removed site manager now has no bases:
>>> myfolder21.getSiteManager().__bases__
()
| zope.site | /zope.site-5.0-py3-none-any.whl/zope/site/site.rst | site.rst |
import zope.container.constraints
import zope.container.interfaces
import zope.interface
import zope.interface.interfaces
import zope.location.interfaces
from zope.annotation.interfaces import IAttributeAnnotatable
class INewLocalSite(zope.interface.Interface):
"""Event: a local site was created
"""
manager = zope.interface.Attribute("The new site manager")
@zope.interface.implementer(INewLocalSite)
class NewLocalSite:
"""Event: a local site was created
"""
def __init__(self, manager):
self.manager = manager
class ILocalSiteManager(zope.interface.interfaces.IComponents):
"""Site Managers act as containers for registerable components.
If a Site Manager is asked for an adapter or utility, it checks for those
it contains before using a context-based lookup to find another site
manager to delegate to. If no other site manager is found they defer to
the global site manager which contains file based utilities and adapters.
"""
subs = zope.interface.Attribute(
"A collection of registries that describe the next level "
"of the registry tree. They are the children of this "
"registry node. This attribute should never be "
"manipulated manually. Use `addSub()` and `removeSub()` "
"instead.")
def addSub(sub):
"""Add a new sub-registry to the node.
.. caution:: This method should *not* be used manually. It is
automatically called by `setNext`. To add a new registry to the
tree, use ``sub.setNext(self, self.base)`` instead!
"""
def removeSub(sub):
"""Remove a sub-registry to the node.
.. caution:: This method should *not* be used manually. It is
automatically called by `setNext`. To remove a registry from the
tree, use ``sub.setNext(None)`` instead!
"""
class ISiteManagementFolder(zope.container.interfaces.IContainer):
"""Component and component registration containers."""
zope.container.constraints.containers(
ILocalSiteManager, '.ISiteManagementFolder')
class IFolder(zope.container.interfaces.IContainer,
zope.component.interfaces.IPossibleSite,
IAttributeAnnotatable):
"""The standard Zope Folder object interface."""
class IRootFolder(zope.location.interfaces.IRoot, IFolder):
"""
The standard Zope root Folder object interface.
.. versionchanged:: 4.5.0
``IRoot`` is now defined to come before ``IFolder`` in the
interface resolution (priority) order.
""" | zope.site | /zope.site-5.0-py3-none-any.whl/zope/site/interfaces.py | interfaces.py |
import zope.component
import zope.component.hooks
import zope.component.interfaces
import zope.event
import zope.interface
import zope.lifecycleevent.interfaces
import zope.location
import zope.location.interfaces
from zope.component.hooks import setSite
from zope.component.interfaces import ISite
from zope.component.persistentregistry import PersistentAdapterRegistry
from zope.component.persistentregistry import PersistentComponents
from zope.container.btree import BTreeContainer
from zope.container.contained import Contained
from zope.deprecation import deprecated
from zope.filerepresentation.interfaces import IDirectoryFactory
from zope.interface.interfaces import ComponentLookupError
from zope.interface.interfaces import IComponentLookup
from zope.lifecycleevent import ObjectCreatedEvent
from zope.location.interfaces import ILocationInfo
from zope.location.interfaces import IRoot
from zope.site import interfaces
# BBB. Remove in Version 5.0 including imports
setSite = deprecated(
setSite,
'``zope.site.site.setSite`` is deprecated '
'and will be removed in zope.site Version 5.0. '
'Use it from ``zope.component.hooks`` instead.') # noqa
@zope.interface.implementer(interfaces.ISiteManagementFolder)
class SiteManagementFolder(BTreeContainer):
"""Implementation of a :class:`~.ISiteManagementFolder`"""
@zope.interface.implementer(IDirectoryFactory)
class SMFolderFactory:
"""
Implementation of a :class:`~.IDirectoryFactory` that creates
:class:`SiteManagementFolder`
"""
def __init__(self, context):
self.context = context
def __call__(self, name):
return SiteManagementFolder()
@zope.interface.implementer(zope.component.interfaces.IPossibleSite)
class SiteManagerContainer(Contained):
"""Implement access to the site manager (++etc++site).
This is a mix-in that implements the :class:`~.IPossibleSite`
interface; for example, it is used by the Folder implementation.
"""
_sm = None
def getSiteManager(self):
if self._sm is not None:
return self._sm
raise ComponentLookupError('no site manager defined')
def setSiteManager(self, sm):
# pylint:disable=no-value-for-parameter
if ISite.providedBy(self):
raise TypeError("Already a site")
if IComponentLookup.providedBy(sm):
self._sm = sm
else:
raise ValueError('setSiteManager requires an IComponentLookup')
zope.interface.directlyProvides(
self, zope.component.interfaces.ISite,
zope.interface.directlyProvidedBy(self))
zope.event.notify(interfaces.NewLocalSite(sm))
def _findNextSiteManager(site):
while True:
if IRoot.providedBy(site): # pylint:disable=no-value-for-parameter
# we're the root site, return None
return None
try:
# pylint:disable=no-value-for-parameter, too-many-function-args
# pylint:disable=assignment-from-no-return
site = ILocationInfo(site).getParent()
except TypeError:
# there was not enough context; probably run from a test
return None
if ISite.providedBy(site): # pylint:disable=no-value-for-parameter
return site.getSiteManager()
class _LocalAdapterRegistry(
PersistentAdapterRegistry,
zope.location.Location,
):
pass
@zope.interface.implementer(interfaces.ILocalSiteManager)
class LocalSiteManager(BTreeContainer,
PersistentComponents):
"""Local Site Manager (:class:`~.ILocalSiteManager`) implementation"""
subs = ()
def _setBases(self, bases):
# Update base subs
for base in self.__bases__:
if ((base not in bases) # pragma: no cover
# pylint:disable=no-value-for-parameter
and interfaces.ILocalSiteManager.providedBy(base)):
base.removeSub(self)
for base in bases:
if ((base not in self.__bases__)
# pylint:disable=no-value-for-parameter
and interfaces.ILocalSiteManager.providedBy(base)):
base.addSub(self)
super()._setBases(bases)
def __init__(self, site, default_folder=True):
BTreeContainer.__init__(self)
PersistentComponents.__init__(self)
# Locate the site manager
self.__parent__ = site
self.__name__ = '++etc++site'
# Set base site manager
next_sm = _findNextSiteManager(site)
if next_sm is None:
next_sm = zope.component.getGlobalSiteManager()
self.__bases__ = (next_sm, )
# Setup default site management folder if requested
if default_folder:
folder = SiteManagementFolder()
zope.event.notify(ObjectCreatedEvent(folder))
self['default'] = folder
def _init_registries(self):
self.adapters = _LocalAdapterRegistry()
self.utilities = _LocalAdapterRegistry()
self.adapters.__parent__ = self.utilities.__parent__ = self
self.adapters.__name__ = 'adapters'
self.utilities.__name__ = 'utilities'
def _p_repr(self):
return PersistentComponents.__repr__(self)
def addSub(self, sub):
"""See :meth:`zope.site.interfaces.ILocalSiteManager.addSub`"""
self.subs += (sub, )
def removeSub(self, sub):
"""See :meth:`zope.site.interfaces.ILocalSiteManager.removeSub`"""
self.subs = tuple(
[s for s in self.subs if s is not sub])
def threadSiteSubscriber(ob, event):
"""A multi-subscriber to `zope.component.interfaces.ISite` and
`zope.traversing.interfaces.BeforeTraverseEvent`.
Sets the 'site' thread global if the object traversed is a site.
.. note::
The ``configure.zcml`` included in this package does
*not* install this subscriber. That must be configured separately.
``zope.app.publication`` includes such configuration.
"""
zope.component.hooks.setSite(ob)
def clearThreadSiteSubscriber(event):
"""A subscriber to `zope.publisher.interfaces.EndRequestEvent`
Cleans up the site thread global after the request is processed.
.. note::
The ``configure.zcml`` included in this package does *not*
install this subscriber. That must be configured separately.
``zope.app.publication`` includes such configuration.
"""
clearSite()
# Clear the site thread global
clearSite = zope.component.hooks.setSite
try:
from zope.testing.cleanup import addCleanUp
except ImportError: # pragma: no cover
pass
else:
addCleanUp(clearSite)
@zope.component.adapter(zope.interface.Interface)
@zope.interface.implementer(IComponentLookup)
def SiteManagerAdapter(ob):
"""An adapter from :class:`~.ILocation` to :class:`~.IComponentLookup`.
The ILocation is interpreted flexibly, we just check for
``__parent__``.
"""
current = ob
while True:
if ISite.providedBy(current): # pylint:disable=no-value-for-parameter
return current.getSiteManager()
current = getattr(current, '__parent__', None)
if current is None:
# It is not a location or has no parent, so we return the global
# site manager
return zope.component.getGlobalSiteManager()
def changeSiteConfigurationAfterMove(site, event):
"""
After a site is (re-)moved, its site manager links have to be
updated.
Subscriber to :class:`~.ISite` objects in a :class:`~.IObjectMovedEvent`.
"""
local_sm = site.getSiteManager()
if event.newParent is not None:
next_sm = _findNextSiteManager(site)
if next_sm is None:
next_sm = zope.component.getGlobalSiteManager()
local_sm.__bases__ = (next_sm, )
else:
local_sm.__bases__ = ()
@zope.component.adapter(
SiteManagerContainer,
zope.lifecycleevent.interfaces.IObjectMovedEvent)
def siteManagerContainerRemoved(container, event):
# The relation between SiteManagerContainer and LocalSiteManager is a
# kind of containment hierarchy, but it is not expressed via containment,
# but rather via an attribute (_sm).
#
# When the parent is deleted, this needs to be propagated to the children,
# and since we don't have "real" containment, we need to do that manually.
try:
sm = container.getSiteManager()
except ComponentLookupError:
pass
else:
zope.component.handle(sm, event) | zope.site | /zope.site-5.0-py3-none-any.whl/zope/site/site.py | site.py |
import zope.component.interfaces
import zope.container.folder
from zope.interface import directlyProvides
from zope.interface import implementer
from zope.site.interfaces import IFolder
from zope.site.interfaces import IRootFolder
from zope.site.site import SiteManagerContainer
@implementer(IFolder)
class Folder(zope.container.folder.Folder, SiteManagerContainer):
"""Implementation of :class:`zope.site.interfaces.IFolder`"""
def rootFolder():
"""Factory for a :class:`zope.site.interfaces.IRootFolder`"""
f = Folder()
directlyProvides(f, IRootFolder)
return f
class FolderSublocations:
"""
Adapter for an :class:`zope.site.interfaces.IFolder` to
:class:`zope.location.interfaces.ISublocations`.
The subobjects of a folder include it's contents and it's site
manager if it is a site::
>>> from zope.container.contained import Contained
>>> folder = Folder()
>>> folder['ob1'] = Contained()
>>> folder['ob2'] = Contained()
>>> folder['ob3'] = Contained()
>>> subs = list(FolderSublocations(folder).sublocations())
>>> subs.remove(folder['ob1'])
>>> subs.remove(folder['ob2'])
>>> subs.remove(folder['ob3'])
>>> subs
[]
>>> sm = Contained()
>>> from zope.interface import directlyProvides
>>> from zope.interface.interfaces import IComponentLookup
>>> directlyProvides(sm, IComponentLookup)
>>> folder.setSiteManager(sm)
>>> directlyProvides(folder, zope.component.interfaces.ISite)
>>> subs = list(FolderSublocations(folder).sublocations())
>>> subs.remove(folder['ob1'])
>>> subs.remove(folder['ob2'])
>>> subs.remove(folder['ob3'])
>>> subs.remove(sm)
>>> subs
[]
"""
def __init__(self, folder):
self.folder = folder
def sublocations(self):
folder = self.folder
for key in folder:
yield folder[key]
if zope.component.interfaces.ISite.providedBy(folder):
yield folder.getSiteManager() | zope.site | /zope.site-5.0-py3-none-any.whl/zope/site/folder.py | folder.py |
Changes
=======
3.0 (2023-06-01)
----------------
- Add support for SQLAlchemy 2.0 and for new psycopg v3 backend.
(`#79 <https://github.com/zopefoundation/zope.sqlalchemy/pull/79>`_)
**Breaking Changes**
- No longer allow calling ``session.commit()`` within a manual nested database
transaction (a savepoint). If you want to use savepoints directly in code that is
not aware of ``transaction.savepoint()`` with ``session.begin_nested()`` then
use the savepoint returned by the function to commit just the nested transaction
i.e. ``savepoint = session.begin_nested(); savepoint.commit()`` or use it as a
context manager i.e. ``with session.begin_nested():``.
(`for details see #79 <https://github.com/zopefoundation/zope.sqlalchemy/pull/79#issuecomment-1516069841>`_)
2.0 (2023-02-06)
----------------
- Drop support for Python 2.7, 3.5, 3.6.
- Drop support for ``SQLAlchemy < 1.1``
(`#65 <https://github.com/zopefoundation/zope.sqlalchemy/issues/65>`_)
- Add support for Python 3.10, 3.11.
1.6 (2021-09-06)
----------------
- Add support for Python 2.7 on SQLAlchemy 1.4.
(`#71 <https://github.com/zopefoundation/zope.sqlalchemy/issues/71>`_)
1.5 (2021-07-14)
----------------
- Call ``mark_changed`` also on the ``do_orm_execute`` event if the operation
is an insert, update or delete. This is SQLAlchemy >= 1.4 only, as it
introduced that event.
(`#67 <https://github.com/zopefoundation/zope.sqlalchemy/issues/67>`_)
- Fixup get transaction. There was regression introduced in 1.4.
(`#66 <https://github.com/zopefoundation/zope.sqlalchemy/issues/66>`_)
1.4 (2021-04-26)
----------------
- Add ``mark_changed`` and ``join_transaction`` methods to
``ZopeTransactionEvents``.
(`#46 <https://github.com/zopefoundation/zope.sqlalchemy/issues/46>`_)
- Reduce DeprecationWarnings with SQLAlchemy 1.4 and require at least
SQLAlchemy >= 0.9.
(`#54 <https://github.com/zopefoundation/zope.sqlalchemy/issues/54>`_)
- Add support for SQLAlchemy 1.4.
(`#58 <https://github.com/zopefoundation/zope.sqlalchemy/issues/58>`_)
- Prevent using an SQLAlchemy 1.4 version with broken flush support.
(`#57 <https://github.com/zopefoundation/zope.sqlalchemy/issues/57>`_)
1.3 (2020-02-17)
----------------
* ``.datamanager.register()`` now returns the ``ZopeTransactionEvents``
instance which was used to register the events. This allows to change its
parameters afterwards.
(`#40 <https://github.com/zopefoundation/zope.sqlalchemy/pull/40>`_)
* Add preliminary support for Python 3.9a3.
1.2 (2019-10-17)
----------------
**Breaking Changes**
* Drop support for Python 3.4.
* Add support for Python 3.7 and 3.8.
* Fix deprecation warnings for the event system. We already used it in general
but still leveraged the old extension mechanism in some places.
(`#31 <https://github.com/zopefoundation/zope.sqlalchemy/issues/31>`_)
To make things clearer we renamed the ``ZopeTransactionExtension`` class
to ``ZopeTransactionEvents``. Existing code using the 'register' version
stays compatible.
**Upgrade from 1.1**
Your old code like this:
.. code-block:: python
from zope.sqlalchemy import ZopeTransactionExtension
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension(), **options))
becomes:
.. code-block:: python
from zope.sqlalchemy import register
DBSession = scoped_session(sessionmaker(**options))
register(DBSession)
1.1 (2019-01-03)
----------------
* Add support to MySQL using pymysql.
1.0 (2018-01-31)
----------------
* Add support for Python 3.4 up to 3.6.
* Support SQLAlchemy 1.2.
* Drop support for Python 2.6, 3.2 and 3.3.
* Drop support for transaction < 1.6.0.
* Fix hazard that could cause SQLAlchemy session not to be committed when
transaction is committed in rare situations.
(`#23 <https://github.com/zopefoundation/zope.sqlalchemy/pull/23>`_)
0.7.7 (2016-06-23)
------------------
* Support SQLAlchemy 1.1.
(`#15 <https://github.com/zopefoundation/zope.sqlalchemy/issues/15>`_)
0.7.6 (2015-03-20)
------------------
* Make version check in register compatible with prereleases.
0.7.5 (2014-06-17)
------------------
* Ensure mapped objects are expired following a ``transaction.commit()`` when
no database commit was required.
(`#8 <https://github.com/zopefoundation/zope.sqlalchemy/issues/8>`_)
0.7.4 (2014-01-06)
------------------
* Allow ``session.commit()`` on nested transactions to facilitate integration
of existing code that might not use ``transaction.savepoint()``.
(`#1 <https://github.com/zopefoundation/zope.sqlalchemy/issues/1>`_)
* Add a new function zope.sqlalchemy.register(), which replaces the
direct use of ZopeTransactionExtension to make use
of the newer SQLAlchemy event system to establish instrumentation on
the given Session instance/class/factory. Requires at least
SQLAlchemy 0.7.
(`#4 <https://github.com/zopefoundation/zope.sqlalchemy/issues/4>`_)
* Fix `keep_session=True` doesn't work when a transaction is joined by flush
and other manngers bug.
(`#5 <https://github.com/zopefoundation/zope.sqlalchemy/issues/5>`_)
0.7.3 (2013-09-25)
------------------
* Prevent the ``Session`` object from getting into a "wedged" state if joining
a transaction fails. With thread scoped sessions that are reused this can cause
persistent errors requiring a server restart.
(`#2 <https://github.com/zopefoundation/zope.sqlalchemy/issues/2>`_)
0.7.2 (2013-02-19)
------------------
* Make life-time of sessions configurable. Specify `keep_session=True` when
setting up the SA extension.
* Python 3.3 compatibility.
0.7.1 (2012-05-19)
------------------
* Use ``@implementer`` as a class decorator instead of ``implements()`` at
class scope for compatibility with ``zope.interface`` 4.0. This requires
``zope.interface`` >= 3.6.0.
0.7 (2011-12-06)
----------------
* Python 3.2 compatibility.
0.6.1 (2011-01-08)
------------------
* Update datamanager.mark_changed to handle sessions which have not yet logged
a (ORM) query.
0.6 (2010-07-24)
----------------
* Implement should_retry for sqlalchemy.orm.exc.ConcurrentModificationError
and serialization errors from PostgreSQL and Oracle.
(Specify transaction>=1.1 to use this functionality.)
* Include license files.
* Add ``transaction_manager`` attribute to data managers for compliance with
IDataManager interface.
0.5 (2010-06-07)
----------------
* Remove redundant session.flush() / session.clear() on savepoint operations.
These were only needed with SQLAlchemy 0.4.x.
* SQLAlchemy 0.6.x support. Require SQLAlchemy >= 0.5.1.
* Add support for running ``python setup.py test``.
* Pull in pysqlite explicitly as a test dependency.
* Setup sqlalchemy mappers in test setup and clear them in tear down. This
makes the tests more robust and clears up the global state after. It
caused the tests to fail when other tests in the same run called
clear_mappers.
0.4 (2009-01-20)
----------------
Bugs fixed:
* Only raise errors in tpc_abort if we have committed.
* Remove the session id from the SESSION_STATE just before we de-reference the
session (i.e. all work is already successfuly completed). This fixes cases
where the transaction commit failed but SESSION_STATE was already cleared. In
those cases, the transaction was wedeged as abort would always error. This
happened on PostgreSQL where invalid SQL was used and the error caught.
* Call session.flush() unconditionally in tpc_begin.
* Change error message on session.commit() to be friendlier to non zope users.
Feature changes:
* Support for bulk update and delete with SQLAlchemy 0.5.1
0.3 (2008-07-29)
----------------
Bugs fixed:
* New objects added to a session did not cause a transaction join, so were not
committed at the end of the transaction unless the database was accessed.
SQLAlchemy 0.4.7 or 0.5beta3 now required.
Feature changes:
* For correctness and consistency with ZODB, renamed the function 'invalidate'
to 'mark_changed' and the status 'invalidated' to 'changed'.
0.2 (2008-06-28)
----------------
Feature changes:
* Updated to support SQLAlchemy 0.5. (0.4.6 is still supported).
0.1 (2008-05-15)
----------------
* Initial public release.
| zope.sqlalchemy | /zope.sqlalchemy-3.0.tar.gz/zope.sqlalchemy-3.0/CHANGES.rst | CHANGES.rst |
***************
zope.sqlalchemy
***************
.. contents::
:local:
Introduction
============
The aim of this package is to unify the plethora of existing packages
integrating SQLAlchemy with Zope's transaction management. As such it seeks
only to provide a data manager and makes no attempt to define a `zopeish` way
to configure engines.
For WSGI applications, Zope style automatic transaction management is
available with `repoze.tm2`_ (used by `Turbogears 2`_ and other systems).
This package is also used by `pyramid_tm`_ (an add-on of the `Pyramid`_) web
framework.
You need to understand `SQLAlchemy`_ and the `Zope transaction manager`_ for
this package and this README to make any sense.
.. _repoze.tm2: https://repozetm2.readthedocs.io/en/latest/
.. _pyramid_tm: https://docs.pylonsproject.org/projects/pyramid_tm/en/latest/
.. _Pyramid: https://pylonsproject.org/
.. _Turbogears 2: https://turbogears.org/
.. _SQLAlchemy: https://sqlalchemy.org/docs/
.. _Zope transaction manager: https://www.zodb.org/en/latest/#transactions
Running the tests
=================
This package is distributed as a buildout. Using your desired python run:
$ python bootstrap.py
$ ./bin/buildout
This will download the dependent packages and setup the test script, which may
be run with:
$ ./bin/test
or with the standard setuptools test command:
$ ./bin/py setup.py test
To enable testing with your own database set the TEST_DSN environment variable
to your sqlalchemy database dsn. Two-phase commit behaviour may be tested by
setting the TEST_TWOPHASE variable to a non empty string. e.g:
$ TEST_DSN=postgres://test:test@localhost/test TEST_TWOPHASE=True bin/test
Usage in short
==============
The integration between Zope transactions and the SQLAlchemy event system is
done using the ``register()`` function on the session factory class.
.. code-block:: python
from zope.sqlalchemy import register
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
engine = sqlalchemy.create_engine("postgresql://scott:tiger@localhost/test")
DBSession = scoped_session(sessionmaker(bind=engine))
register(DBSession)
Instantiated sessions commits and rollbacks will now be integrated with Zope
transactions.
.. code-block:: python
import transaction
from sqlalchemy.sql import text
session = DBSession()
result = session.execute(text("DELETE FROM objects WHERE id=:id"), {"id": 2})
row = result.fetchone()
transaction.commit()
Full Example
============
This example is lifted directly from the SQLAlchemy declarative documentation.
First the necessary imports.
>>> from sqlalchemy import *
>>> from sqlalchemy.orm import declarative_base, scoped_session, sessionmaker, relationship
>>> from sqlalchemy.sql import text
>>> from zope.sqlalchemy import register
>>> import transaction
Now to define the mapper classes.
>>> Base = declarative_base()
>>> class User(Base):
... __tablename__ = 'test_users'
... id = Column('id', Integer, primary_key=True)
... name = Column('name', String(50))
... addresses = relationship("Address", backref="user")
>>> class Address(Base):
... __tablename__ = 'test_addresses'
... id = Column('id', Integer, primary_key=True)
... email = Column('email', String(50))
... user_id = Column('user_id', Integer, ForeignKey('test_users.id'))
Create an engine and setup the tables. Note that for this example to work a
recent version of sqlite/pysqlite is required. 3.4.0 seems to be sufficient.
>>> engine = create_engine(TEST_DSN)
>>> Base.metadata.create_all(engine)
Now to create the session itself. As zope is a threaded web server we must use
scoped sessions. Zope and SQLAlchemy sessions are tied together by using the
register
>>> Session = scoped_session(sessionmaker(bind=engine,
... twophase=TEST_TWOPHASE))
Call the scoped session factory to retrieve a session. You may call this as
many times as you like within a transaction and you will always retrieve the
same session. At present there are no users in the database.
>>> session = Session()
>>> register(session)
<zope.sqlalchemy.datamanager.ZopeTransactionEvents object at ...>
>>> session.query(User).all()
[]
We can now create a new user and commit the changes using Zope's transaction
machinery, just as Zope's publisher would.
>>> session.add(User(id=1, name='bob'))
>>> transaction.commit()
Engine level connections are outside the scope of the transaction integration.
>>> engine.connect().execute(text('SELECT * FROM test_users')).fetchall()
[(1, ...'bob')]
A new transaction requires a new session. Let's add an address.
>>> session = Session()
>>> bob = session.query(User).all()[0]
>>> str(bob.name)
'bob'
>>> bob.addresses
[]
>>> bob.addresses.append(Address(id=1, email='[email protected]'))
>>> transaction.commit()
>>> session = Session()
>>> bob = session.query(User).all()[0]
>>> bob.addresses
[<Address object at ...>]
>>> str(bob.addresses[0].email)
'[email protected]'
>>> bob.addresses[0].email = 'wrong@wrong'
To rollback a transaction, use transaction.abort().
>>> transaction.abort()
>>> session = Session()
>>> bob = session.query(User).all()[0]
>>> str(bob.addresses[0].email)
'[email protected]'
>>> transaction.abort()
By default, zope.sqlalchemy puts sessions in an 'active' state when they are
first used. ORM write operations automatically move the session into a
'changed' state. This avoids unnecessary database commits. Sometimes it
is necessary to interact with the database directly through SQL. It is not
possible to guess whether such an operation is a read or a write. Therefore we
must manually mark the session as changed when manual SQL statements write
to the DB.
>>> session = Session()
>>> conn = session.connection()
>>> users = Base.metadata.tables['test_users']
>>> conn.execute(users.update().where(users.c.name=='bob'), {'name': 'ben'})
<sqlalchemy.engine... object at ...>
>>> from zope.sqlalchemy import mark_changed
>>> mark_changed(session)
>>> transaction.commit()
>>> session = Session()
>>> str(session.query(User).all()[0].name)
'ben'
>>> transaction.abort()
If this is a problem you may register the events and tell them to place the
session in the 'changed' state initially.
>>> Session.remove()
>>> register(Session, 'changed')
<zope.sqlalchemy.datamanager.ZopeTransactionEvents object at ...>
>>> session = Session()
>>> conn = session.connection()
>>> conn.execute(users.update().where(users.c.name=='ben'), {'name': 'bob'})
<sqlalchemy.engine... object at ...>
>>> transaction.commit()
>>> session = Session()
>>> str(session.query(User).all()[0].name)
'bob'
>>> transaction.abort()
The `mark_changed` function accepts a kwarg for `keep_session` which defaults
to `False` and is unaware of the registered extensions `keep_session`
configuration.
If you intend for `keep_session` to be True, you can specify it explicitly:
>>> from zope.sqlalchemy import mark_changed
>>> mark_changed(session, keep_session=True)
>>> transaction.commit()
You can also use a configured extension to preserve this argument:
>>> sessionExtension = register(session, keep_session=True)
>>> sessionExtension.mark_changed(session)
>>> transaction.commit()
Long-lasting session scopes
---------------------------
The default behaviour of the transaction integration is to close the session
after a commit. You can tell by trying to access an object after committing:
>>> bob = session.query(User).all()[0]
>>> transaction.commit()
>>> bob.name
Traceback (most recent call last):
sqlalchemy.orm.exc.DetachedInstanceError: Instance <User at ...> is not bound to a Session; attribute refresh operation cannot proceed...
To support cases where a session needs to last longer than a transaction (useful
in test suites) you can specify to keep a session when registering the events:
>>> Session = scoped_session(sessionmaker(bind=engine,
... twophase=TEST_TWOPHASE))
>>> register(Session, keep_session=True)
<zope.sqlalchemy.datamanager.ZopeTransactionEvents object at ...>
>>> session = Session()
>>> bob = session.query(User).all()[0]
>>> bob.name = 'bobby'
>>> transaction.commit()
>>> bob.name
'bobby'
The session must then be closed manually:
>>> session.close()
Development version
===================
`GIT version <https://github.com/zopefoundation/zope.sqlalchemy>`_
| zope.sqlalchemy | /zope.sqlalchemy-3.0.tar.gz/zope.sqlalchemy-3.0/src/zope/sqlalchemy/README.rst | README.rst |
from weakref import WeakKeyDictionary
import transaction as zope_transaction
from packaging.version import Version as parse_version
from sqlalchemy import __version__ as sqlalchemy_version
from sqlalchemy.engine.base import Engine
from sqlalchemy.exc import DBAPIError
from sqlalchemy.orm.exc import ConcurrentModificationError
from transaction._transaction import Status as ZopeStatus
from transaction.interfaces import IDataManagerSavepoint
from transaction.interfaces import ISavepointDataManager
from zope.interface import implementer
_retryable_errors = []
try:
import psycopg2.extensions
except ImportError:
pass
else:
_retryable_errors.append(
(psycopg2.extensions.TransactionRollbackError, None))
# Error Class 40: Transaction Rollback, for details
# see https://www.psycopg.org/psycopg3/docs/api/errors.html
try:
import psycopg.errors
except ImportError:
pass
else:
_retryable_errors.append(
(psycopg.errors.OperationalError,
lambda e: e.sqlstate.startswith('40'))
)
# ORA-08177: can't serialize access for this transaction
try:
import cx_Oracle
except ImportError:
pass
else:
_retryable_errors.append(
(cx_Oracle.DatabaseError, lambda e: e.args[0].code == 8177)
)
# 1213: Deadlock found when trying to get lock; try restarting transaction
try:
import pymysql
except ImportError:
pass
else:
_retryable_errors.append(
(pymysql.err.OperationalError, lambda e: e.args[0] == 1213)
)
# The status of the session is stored on the connection info
STATUS_ACTIVE = "active" # session joined to transaction, writes allowed.
STATUS_CHANGED = "changed" # data has been written
# session joined to transaction, no writes allowed.
STATUS_READONLY = "readonly"
STATUS_INVALIDATED = STATUS_CHANGED # BBB
NO_SAVEPOINT_SUPPORT = {"sqlite"}
_SESSION_STATE = WeakKeyDictionary() # a mapping of session -> status
# This is thread safe because you are using scoped sessions
SA_GE_14 = parse_version(sqlalchemy_version) >= parse_version('1.4.0')
#
# The two variants of the DataManager.
#
@implementer(ISavepointDataManager)
class SessionDataManager:
"""Integrate a top level sqlalchemy session transaction into a
zope transaction.
One phase variant.
"""
def __init__(
self, session, status, transaction_manager, keep_session=False):
self.transaction_manager = transaction_manager
if SA_GE_14:
root_transaction = session.get_transaction() or session.begin()
else:
# Support both SQLAlchemy 1.0 and 1.1
# https://github.com/zopefoundation/zope.sqlalchemy/issues/15
_iterate_parents = (
getattr(session.transaction, "_iterate_self_and_parents", None)
or session.transaction._iterate_parents
)
root_transaction = _iterate_parents()[-1]
self.tx = root_transaction
self.session = session
transaction_manager.get().join(self)
_SESSION_STATE[session] = status
self.state = "init"
self.keep_session = keep_session
def _finish(self, final_state):
assert self.tx is not None
session = self.session
del _SESSION_STATE[self.session]
self.tx = self.session = None
self.state = final_state
# closing the session is the last thing we do. If it fails the
# transactions don't get wedged and the error propagates
if not self.keep_session:
session.close()
else:
session.expire_all()
def abort(self, trans):
if self.tx is not None: # there may have been no work to do
self._finish("aborted")
def tpc_begin(self, trans):
self.session.flush()
def commit(self, trans):
status = _SESSION_STATE[self.session]
if status is not STATUS_INVALIDATED:
session = self.session
if session.expire_on_commit:
session.expire_all()
self._finish("no work")
def tpc_vote(self, trans):
# for a one phase data manager commit last in tpc_vote
if self.tx is not None: # there may have been no work to do
self.tx.commit()
self._finish("committed")
def tpc_finish(self, trans):
pass
def tpc_abort(self, trans):
assert self.state != "committed"
def sortKey(self):
# Try to sort last, so that we vote last - we may commit in tpc_vote(),
# which allows Zope to roll back its transaction if the RDBMS
# threw a conflict error.
return "~sqlalchemy:%d" % id(self.tx)
@property
def savepoint(self):
"""Savepoints are only supported when all connections support
subtransactions.
"""
# ATT: the following check is weak since the savepoint capability
# of a RDBMS also depends on its version. E.g. Postgres 7.X does not
# support savepoints but Postgres is whitelisted independent of its
# version. Possibly additional version information should be taken
# into account (ajung)
if {
engine.url.drivername
for engine in self.tx._connections.keys()
if isinstance(engine, Engine)
}.intersection(NO_SAVEPOINT_SUPPORT):
raise AttributeError("savepoint")
return self._savepoint
def _savepoint(self):
return SessionSavepoint(self.session)
def should_retry(self, error):
if isinstance(error, ConcurrentModificationError):
return True
if isinstance(error, DBAPIError):
orig = error.orig
for error_type, test in _retryable_errors:
if isinstance(orig, error_type):
if test is None:
return True
if test(orig):
return True
class TwoPhaseSessionDataManager(SessionDataManager):
"""Two phase variant.
"""
def tpc_vote(self, trans):
if self.tx is not None: # there may have been no work to do
self.tx.prepare()
self.state = "voted"
def tpc_finish(self, trans):
if self.tx is not None:
self.tx.commit()
self._finish("committed")
def tpc_abort(self, trans):
# we may not have voted, and been aborted already
if self.tx is not None:
self.tx.rollback()
self._finish("aborted commit")
def sortKey(self):
# Sort normally
return "sqlalchemy.twophase:%d" % id(self.tx)
@implementer(IDataManagerSavepoint)
class SessionSavepoint:
def __init__(self, session):
self.session = session
self.transaction = session.begin_nested()
def rollback(self):
# no need to check validity, sqlalchemy should raise an exception.
self.transaction.rollback()
def join_transaction(
session,
initial_state=STATUS_ACTIVE,
transaction_manager=zope_transaction.manager,
keep_session=False,
):
"""Join a session to a transaction using the appropriate datamanager.
It is safe to call this multiple times, if the session is already joined
then it just returns.
`initial_state` is either STATUS_ACTIVE, STATUS_INVALIDATED or
STATUS_READONLY
If using the default initial status of STATUS_ACTIVE, you must ensure that
mark_changed(session) is called when data is written to the database.
The ZopeTransactionEvents can be used to ensure that this is
called automatically after session write operations.
"""
if _SESSION_STATE.get(session, None) is None:
if session.twophase:
DataManager = TwoPhaseSessionDataManager
else:
DataManager = SessionDataManager
DataManager(
session, initial_state, transaction_manager,
keep_session=keep_session
)
def mark_changed(
session, transaction_manager=zope_transaction.manager, keep_session=False
):
"""Mark a session as needing to be committed.
"""
assert (
_SESSION_STATE.get(session, None) is not STATUS_READONLY
), "Session already registered as read only"
join_transaction(session, STATUS_CHANGED,
transaction_manager, keep_session)
_SESSION_STATE[session] = STATUS_CHANGED
class ZopeTransactionEvents:
"""Record that a flush has occurred on a session's connection. This allows
the DataManager to rollback rather than commit on read only transactions.
"""
def __init__(
self,
initial_state=STATUS_ACTIVE,
transaction_manager=zope_transaction.manager,
keep_session=False,
):
if initial_state == "invalidated":
initial_state = STATUS_CHANGED # BBB
self.initial_state = initial_state
self.transaction_manager = transaction_manager
self.keep_session = keep_session
def after_begin(self, session, transaction, connection):
join_transaction(
session, self.initial_state, self.transaction_manager,
self.keep_session
)
def after_attach(self, session, instance):
join_transaction(
session, self.initial_state, self.transaction_manager,
self.keep_session
)
def after_flush(self, session, flush_context):
mark_changed(session, self.transaction_manager, self.keep_session)
def after_bulk_update(self, update_context):
mark_changed(update_context.session,
self.transaction_manager, self.keep_session)
def after_bulk_delete(self, delete_context):
mark_changed(delete_context.session,
self.transaction_manager, self.keep_session)
def before_commit(self, session):
in_nested_transaction = (
session.in_nested_transaction()
if SA_GE_14
# support sqlalchemy 1.3 and below
else session.transaction.nested
)
assert (
in_nested_transaction
or self.transaction_manager.get().status == ZopeStatus.COMMITTING
), "Transaction must be committed using the transaction manager"
def do_orm_execute(self, execute_state):
dml = any((execute_state.is_update, execute_state.is_insert,
execute_state.is_delete))
if execute_state.is_orm_statement and dml:
mark_changed(execute_state.session, self.transaction_manager,
self.keep_session)
def mark_changed(self, session):
"""Developer interface to `mark_changed` that preserves the extension's
active configuration.
"""
mark_changed(session, self.transaction_manager, self.keep_session)
def join_transaction(self, session):
"""Developer interface to `join_transaction` that preserves the
extension's active configuration.
"""
join_transaction(
session, self.initial_state, self.transaction_manager,
self.keep_session
)
def register(
session,
initial_state=STATUS_ACTIVE,
transaction_manager=zope_transaction.manager,
keep_session=False,
):
"""Register ZopeTransaction listener events on the
given Session or Session factory/class.
This function requires at least SQLAlchemy 0.7 and makes use
of the newer sqlalchemy.event package in order to register event listeners
on the given Session.
The session argument here may be a Session class or subclass, a
sessionmaker or scoped_session instance, or a specific Session instance.
Event listening will be specific to the scope of the type of argument
passed, including specificity to its subclass as well as its identity.
It returns the instance of ZopeTransactionEvents those methods where used
to register the event listeners.
"""
from sqlalchemy import event
ext = ZopeTransactionEvents(
initial_state=initial_state,
transaction_manager=transaction_manager,
keep_session=keep_session,
)
event.listen(session, "after_begin", ext.after_begin)
event.listen(session, "after_attach", ext.after_attach)
event.listen(session, "after_flush", ext.after_flush)
event.listen(session, "after_bulk_update", ext.after_bulk_update)
event.listen(session, "after_bulk_delete", ext.after_bulk_delete)
event.listen(session, "before_commit", ext.before_commit)
if SA_GE_14:
event.listen(session, "do_orm_execute", ext.do_orm_execute)
return ext | zope.sqlalchemy | /zope.sqlalchemy-3.0.tar.gz/zope.sqlalchemy-3.0/src/zope/sqlalchemy/datamanager.py | datamanager.py |
""" HTML renderer for STX documents.
"""
from functools import partial
from html import escape
escape = partial(escape, quote=False)
class HTML:
paragraph_nestable = {
'#text': '_text',
'StructuredTextLiteral': 'literal',
'StructuredTextEmphasis': 'emphasis',
'StructuredTextStrong': 'strong',
'StructuredTextLink': 'link',
'StructuredTextXref': 'xref',
'StructuredTextInnerLink': 'innerLink',
'StructuredTextNamedLink': 'namedLink',
'StructuredTextUnderline': 'underline',
'StructuredTextSGML': 'sgml', # this might or might not be valid
}
element_types = paragraph_nestable.copy()
element_types.update({
'StructuredTextDocument': 'document',
'StructuredTextParagraph': 'paragraph',
'StructuredTextExample': 'example',
'StructuredTextBullet': 'bullet',
'StructuredTextNumbered': 'numbered',
'StructuredTextDescription': 'description',
'StructuredTextDescriptionTitle': 'descriptionTitle',
'StructuredTextDescriptionBody': 'descriptionBody',
'StructuredTextSection': 'section',
'StructuredTextSectionTitle': 'sectionTitle',
'StructuredTextTable': 'table',
})
def dispatch(self, doc, level, output):
getattr(self, self.element_types[doc.getNodeName()]
)(doc, level, output)
def __call__(self, doc, level=1, header=True):
r = []
self.header = header
self.dispatch(doc, level - 1, r.append)
return ''.join(r)
def _text(self, doc, level, output):
output(doc.getNodeValue())
def document(self, doc, level, output):
children = doc.getChildNodes()
if self.header:
output('<html>\n')
if (children
and children[0].getNodeName() == 'StructuredTextSection'):
output('<head>\n<title>%s</title>\n</head>\n' %
children[0].getChildNodes()[0].getNodeValue())
output('<body>\n')
for c in children:
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
if self.header:
output('</body>\n')
output('</html>\n')
def section(self, doc, level, output):
children = doc.getChildNodes()
for c in children:
getattr(self, self.element_types[c.getNodeName()]
)(c, level + 1, output)
def sectionTitle(self, doc, level, output):
output('<h%d>' % (level))
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output('</h%d>\n' % (level))
def descriptionTitle(self, doc, level, output):
output('<dt>')
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output('</dt>\n')
def descriptionBody(self, doc, level, output):
output('<dd>')
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output('</dd>\n')
def _list(self, doc, level, output, list_tag, item_tag='li'):
p = doc.getPreviousSibling()
if p is None or p.getNodeName() != doc.getNodeName():
output('\n<' + list_tag + '>\n')
if item_tag:
output('<' + item_tag + '>')
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
n = doc.getNextSibling()
if item_tag:
output('</' + item_tag + '>\n')
if n is None or n.getNodeName() != doc.getNodeName():
output('\n</' + list_tag + '>\n')
def description(self, doc, level, output):
self._list(doc, level, output, 'dl', item_tag=None)
def bullet(self, doc, level, output):
self._list(doc, level, output, "ul")
def numbered(self, doc, level, output):
self._list(doc, level, output, "ol")
def example(self, doc, level, output):
for c in doc.getChildNodes():
output('\n<pre>\n')
output(escape(c.getNodeValue()))
output('\n</pre>\n')
def paragraph(self, doc, level, output):
output('<p>')
in_p = True
for c in doc.getChildNodes():
if c.getNodeName() in self.paragraph_nestable:
if not in_p:
output('<p>')
in_p = True
self.dispatch(c, level, output)
else:
if in_p:
output('</p>\n')
in_p = False
self.dispatch(c, level, output)
if in_p:
output('</p>\n')
in_p = False
def link(self, doc, level, output):
output('<a href="%s">' % doc.href)
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output('</a>')
def emphasis(self, doc, level, output):
output('<em>')
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output('</em>')
def literal(self, doc, level, output):
output('<code>')
for c in doc.getChildNodes():
output(escape(c.getNodeValue()))
output('</code>')
def strong(self, doc, level, output):
output('<strong>')
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output('</strong>')
def underline(self, doc, level, output):
output("<u>")
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output("</u>")
def innerLink(self, doc, level, output):
output('<a href="#ref')
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output('">[')
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output(']</a>')
def namedLink(self, doc, level, output):
output('<a name="ref')
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output('">[')
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output(']</a>')
def sgml(self, doc, level, output):
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
def xref(self, doc, level, output):
val = doc.getNodeValue()
output('<a href="#ref{}">[{}]</a>'.format(val, val))
def table(self, doc, level, output):
"""
A StructuredTextTable holds StructuredTextRow(s) which
holds StructuredTextColumn(s). A StructuredTextColumn
is a type of StructuredTextParagraph and thus holds
the actual data.
"""
output('<table border="1" cellpadding="2">\n')
for row in doc.getRows()[0]:
output("<tr>\n")
for column in row.getColumns()[0]:
str = ('<%s colspan="%s" align="%s" valign="%s">'
% (column.getType(),
column.getSpan(),
column.getAlign(),
column.getValign()))
output(str)
for c in column.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output("</" + column.getType() + ">\n")
output("</tr>\n")
output("</table>\n")
class HTMLWithImages(HTML):
paragraph_nestable = HTML.paragraph_nestable.copy()
paragraph_nestable.update({'StructuredTextImage': 'image'})
element_types = HTML.element_types.copy()
element_types.update({'StructuredTextImage': 'image'})
def image(self, doc, level, output):
if hasattr(doc, 'key'):
output('<a name="%s"></a>\n' % doc.key)
output('<img src="%s" alt="%s" />\n'
% (doc.href, doc.getNodeValue()))
if doc.getNodeValue() and hasattr(doc, 'key'):
output('<p><b>Figure %s</b> %s</p>\n'
% (doc.key, doc.getNodeValue())) | zope.structuredtext | /zope.structuredtext-5.0-py3-none-any.whl/zope/structuredtext/html.py | html.py |
""" Structured text document parser
"""
import re
from zope.structuredtext.stletters import dbl_quoted_punc
from zope.structuredtext.stletters import letters
from zope.structuredtext.stletters import literal_punc
from zope.structuredtext.stletters import phrase_delimiters
from zope.structuredtext.stletters import strongem_punc
from zope.structuredtext.stletters import under_punc
from zope.structuredtext.stng import StructuredTextBullet
from zope.structuredtext.stng import StructuredTextDescription
from zope.structuredtext.stng import StructuredTextDocument
from zope.structuredtext.stng import StructuredTextEmphasis
from zope.structuredtext.stng import StructuredTextExample
from zope.structuredtext.stng import StructuredTextImage
from zope.structuredtext.stng import StructuredTextInnerLink
from zope.structuredtext.stng import StructuredTextLink
from zope.structuredtext.stng import StructuredTextLiteral
from zope.structuredtext.stng import StructuredTextNamedLink
from zope.structuredtext.stng import StructuredTextNumbered
from zope.structuredtext.stng import StructuredTextParagraph
from zope.structuredtext.stng import StructuredTextSection
from zope.structuredtext.stng import StructuredTextSGML
from zope.structuredtext.stng import StructuredTextStrong
from zope.structuredtext.stng import StructuredTextTable
from zope.structuredtext.stng import StructuredTextUnderline
from zope.structuredtext.stng import StructuredTextXref
from zope.structuredtext.stng import structurize
class Document:
"""
Class instance calls [ex.=> x()] require a structured text
structure. Doc will then parse each paragraph in the structure
and will find the special structures within each paragraph.
Each special structure will be stored as an instance. Special
structures within another special structure are stored within
the 'top' structure
EX : '-underline this-' => would be turned into an underline
instance. '-underline **this**' would be stored as an underline
instance with a strong instance stored in its string
"""
paragraph_types = [
'doc_bullet',
'doc_numbered',
'doc_description',
'doc_header',
'doc_table',
]
# 'doc_inner_link',
# 'doc_named_link',
# 'doc_underline'
text_types = [
'doc_literal',
'doc_sgml',
'doc_inner_link',
'doc_named_link',
'doc_href1',
'doc_href2',
'doc_strong',
'doc_emphasize',
'doc_underline',
'doc_sgml',
'doc_xref',
]
def __call__(self, doc):
if isinstance(doc, str):
doc = structurize(doc)
doc.setSubparagraphs(self.color_paragraphs(
doc.getSubparagraphs()))
else:
doc = StructuredTextDocument(self.color_paragraphs(
doc.getSubparagraphs()))
return doc
def parse(self, raw_string, text_type, type=type):
"""
Parse accepts a raw_string, an expr to test the raw_string,
and the raw_string's subparagraphs.
Parse will continue to search through raw_string until
all instances of expr in raw_string are found.
If no instances of expr are found, raw_string is returned.
Otherwise a list of substrings and instances is returned
"""
tmp = [] # the list to be returned if raw_string is split
if isinstance(text_type, str):
text_type = getattr(self, text_type)
while True:
t = text_type(raw_string)
if not t:
break
# an instance of expr was found
t, start, end = t
if start:
tmp.append(raw_string[:start])
if isinstance(t, str):
# if we get a string back, add it to text to be parsed
raw_string = t + raw_string[end:len(raw_string)]
else:
if isinstance(t, list):
# is we get a list, append it's elements
tmp.extend(t)
else:
# normal case, an object
tmp.append(t)
raw_string = raw_string[end:len(raw_string)]
if not tmp:
return raw_string # nothing found
if raw_string:
tmp.append(raw_string)
elif len(tmp) == 1:
return tmp[0]
return tmp
def color_text(self, text, types=None):
"""Search the paragraph for each special structure
"""
if types is None:
types = self.text_types
for text_type in types:
if isinstance(text, str):
text = self.parse(text, text_type)
elif isinstance(text, list): # Waaaa
result = []
for s in text:
if isinstance(s, str):
s = self.parse(s, text_type)
if isinstance(s, list):
result.extend(s)
else:
result.append(s)
else:
s.setColorizableTexts(
[self.color_text(t)
for t in s.getColorizableTexts()])
result.append(s)
text = result
else:
result = []
color = self.color_text
for s in text.getColorizableTexts():
color(s, (text_type, ))
result.append(s)
text.setColorizableTexts(result)
return text
def color_paragraphs(self, raw_paragraphs,
type=type,
sequence_types=(tuple, list),
sts=str):
result = []
for paragraph in raw_paragraphs:
if paragraph.getNodeName() != 'StructuredTextParagraph':
result.append(paragraph)
continue
for pt in self.paragraph_types:
if isinstance(pt, sts):
# grab the corresponding function
pt = getattr(self, pt)
# evaluate the paragraph
new_paragraphs = pt(paragraph)
if new_paragraphs:
if not isinstance(new_paragraphs, sequence_types):
new_paragraphs = (new_paragraphs, )
for paragraph in new_paragraphs:
subs = self.color_paragraphs(
paragraph.getSubparagraphs()
)
paragraph.setSubparagraphs(subs)
break
else:
# copy, retain attributes
atts = getattr(paragraph, '_attributes', [])
kw = {att: getattr(paragraph, att) for att in atts}
subs = self.color_paragraphs(paragraph.getSubparagraphs())
new_paragraphs = StructuredTextParagraph(
paragraph.getColorizableTexts()[0], subs, **kw),
# color the inline StructuredText types
# for each StructuredTextParagraph
for paragraph in new_paragraphs:
if paragraph.getNodeName() == "StructuredTextTable":
# cells = paragraph.getColumns()
text = paragraph.getColorizableTexts()
text = [structurize(t) for t in text]
text = [self(t) for t in text]
text = [t.getSubparagraphs() for t in text]
paragraph.setColorizableTexts(text)
paragraph.setColorizableTexts(
[self.color_text(t)
for t in paragraph.getColorizableTexts()])
result.append(paragraph)
return result
def doc_table(self, paragraph, expr=re.compile(r'\s*\|[-]+\|').match):
text = paragraph.getColorizableTexts()[0]
m = expr(text)
subs = paragraph.getSubparagraphs()
if not m:
return None
rows = []
spans = []
ROWS = []
COLS = []
indexes = []
ignore = []
TDdivider = re.compile(r"[\-]+").match
THdivider = re.compile(r"[\=]+").match
col = re.compile(r'\|').search
innertable = re.compile(r'\|([-]+|[=]+)\|').search
text = text.strip()
rows = text.split('\n')
foo = ""
rows = [x.strip() for x in rows]
# have indexes store if a row is a divider
# or a cell part
for index in range(len(rows)):
tmpstr = rows[index][1:len(rows[index]) - 1]
if TDdivider(tmpstr):
indexes.append("TDdivider")
elif THdivider(tmpstr):
indexes.append("THdivider")
else:
indexes.append("cell")
for index in range(len(indexes)):
if indexes[index] in ("TDdivider", "THdivider"):
ignore = [] # reset ignore
# continue # skip dividers
tmp = rows[index].strip() # clean the row up
tmp = tmp[1:-1] # remove leading + trailing |
offset = 0
# find the start and end of inner
# tables. ignore everything between
if innertable(tmp):
tmpstr = tmp.strip()
while innertable(tmpstr):
start, end = innertable(tmpstr).span()
if not (start, end - 1) in ignore:
ignore.append((start, end - 1))
tmpstr = " " + tmpstr[end:]
# find the location of column dividers
# NOTE: |'s in inner tables do not count
# as column dividers
if col(tmp):
while col(tmp):
bar = 1 # true if start is not in ignore
start, end = col(tmp).span()
if not start + offset in spans:
for s, e in ignore:
if start + offset >= s or start + offset <= e:
bar = None
break
if bar: # start is clean
spans.append(start + offset)
if not bar:
foo += tmp[:end]
tmp = tmp[end:]
offset += end
else:
COLS.append((foo + tmp[:start], start + offset))
foo = ""
tmp = " " + tmp[end:]
offset = offset + start
if not offset + len(tmp) in spans:
spans.append(offset + len(tmp))
COLS.append((foo + tmp, offset + len(tmp)))
foo = ""
ROWS.append(COLS)
COLS = []
spans.sort()
ROWS = ROWS[1:]
# find each column span
cols = []
tmp = []
for row in ROWS:
for c in row:
tmp.append(c[1])
cols.append(tmp)
tmp = []
cur = 1
tmp = []
C = []
for col in cols:
for span in spans:
if span not in col:
cur += 1
else:
tmp.append(cur)
cur = 1
C.append(tmp)
tmp = []
for index in range(len(C)):
for i in range(len(C[index])):
ROWS[index][i] = (ROWS[index][i][0], C[index][i])
rows = ROWS
# label things as either TableData or
# Table header
TD = []
TH = []
all = []
for index in range(len(indexes)):
if indexes[index] == "TDdivider":
TD.append(index)
all.append(index)
if indexes[index] == "THdivider":
TH.append(index)
all.append(index)
TD = TD[1:]
dividers = all[1:]
# print "TD => ", TD
# print "TH => ", TH
# print "all => ", all, "\n"
for div in dividers:
if div in TD:
index = all.index(div)
for rowindex in range(all[index - 1], all[index]):
for i in range(len(rows[rowindex])):
rows[rowindex][i] = (rows[rowindex][i][0],
rows[rowindex][i][1],
"td")
else:
index = all.index(div)
for rowindex in range(all[index - 1], all[index]):
for i in range(len(rows[rowindex])):
rows[rowindex][i] = (rows[rowindex][i][0],
rows[rowindex][i][1],
"th")
# now munge the multi-line cells together
# as paragraphs
ROWS = []
COLS = []
for row in rows:
for index in range(len(row)):
if not COLS:
COLS = list(range(len(row)))
COLS = [["", 1, ""] for _ in COLS]
# for i in range(len(COLS)):
# COLS[i] = ["", 1 ,""]
if TDdivider(row[index][0]) or THdivider(row[index][0]):
ROWS.append(COLS)
COLS = []
else:
COLS[index][0] = COLS[index][0] + (row[index][0]) + "\n"
COLS[index][1] = row[index][1]
COLS[index][2] = row[index][2]
# now that each cell has been munged together,
# determine the cell's alignment.
# Default is to center. Also determine the cell's
# vertical alignment, top, middle, bottom. Default is
# to middle
rows = []
cols = []
for row in ROWS:
for index in range(len(row)):
topindent = 0
bottomindent = 0
leftindent = 0
rightindent = 0
left = []
right = []
text = row[index][0]
text = text.split('\n')
text = text[:-1]
align = ""
valign = ""
for t in text:
t = t.strip()
if not t:
topindent += 1
else:
break
text.reverse()
for t in text:
t = t.strip()
if not t:
bottomindent += 1
else:
break
text.reverse()
tmp = '\n'.join(text[topindent:len(text) - bottomindent])
pars = re.compile(r"\n\s*\n").split(tmp)
for par in pars:
if index > 0:
par = par[1:]
par = par.split(' ')
for p in par:
if not p:
leftindent += 1
else:
break
left.append(leftindent)
leftindent = 0
par.reverse()
for p in par:
if not p:
rightindent += 1
else:
break
right.append(rightindent)
rightindent = 0
left.sort()
right.sort()
valign = "middle"
if topindent == bottomindent:
valign = "middle"
elif topindent < 1:
valign = "top"
elif bottomindent < 1:
valign = "bottom"
if left[0] < 1:
align = "left"
elif right[0] < 1:
align = "right"
elif left[0] > 1 and right[0] > 1:
align = "center"
else:
align = "left"
cols.append(
(row[index][0], row[index][1],
align, valign, row[index][2])
)
rows.append(cols)
cols = []
return StructuredTextTable(rows,
text, subs, indent=paragraph.indent)
def doc_bullet(self, paragraph, expr=re.compile(r'\s*[-*o]\s+').match):
top = paragraph.getColorizableTexts()[0]
m = expr(top)
if not m:
return None
subs = paragraph.getSubparagraphs()
if top[-2:] == '::':
subs = [StructuredTextExample(subs)]
top = top[:-1]
return StructuredTextBullet(top[m.span()[1]:], subs,
indent=paragraph.indent,
bullet=top[:m.span()[1]])
def doc_numbered(
self, paragraph, expr=re.compile(
r'(\s*[%s]\.)|(\s*[0-9]+\.)|(\s*[0-9]+\s+)' %
letters).match):
# This is the old expression. It had a nasty habit
# of grabbing paragraphs that began with a single
# letter word even if there was no following period.
# expr = re.compile('\s*'
# '(([a-zA-Z]|[0-9]+|[ivxlcdmIVXLCDM]+)\.)*'
# '([a-zA-Z]|[0-9]+|[ivxlcdmIVXLCDM]+)\.?'
# '\s+').match):
top = paragraph.getColorizableTexts()[0]
m = expr(top)
if not m:
return None
subs = paragraph.getSubparagraphs()
if top[-2:] == '::':
subs = [StructuredTextExample(subs)]
top = top[:-1]
return StructuredTextNumbered(top[m.span()[1]:], subs,
indent=paragraph.indent,
number=top[:m.span()[1]])
def doc_description(self, paragraph,
delim=re.compile(r'\s+--\s+').search,
nb=re.compile(r'[^\000- ]').search):
top = paragraph.getColorizableTexts()[0]
d = delim(top)
if not d:
return None
start, end = d.span()
title = top[:start]
if title.find('\n') >= 0:
return None
if not nb(title):
return None
d = top[start:end]
top = top[end:]
subs = paragraph.getSubparagraphs()
if top[-2:] == '::':
subs = [StructuredTextExample(subs)]
top = top[:-1]
return StructuredTextDescription(
title, top, subs,
indent=paragraph.indent,
delim=d)
def doc_header(self, paragraph):
subs = paragraph.getSubparagraphs()
if not subs:
return None
top = paragraph.getColorizableTexts()[0]
if not top.strip():
return None
if top[-2:] == '::':
subs = StructuredTextExample(subs)
if top.strip() == '::':
return subs
# copy attrs when returning a paragraph
kw = {}
atts = getattr(paragraph, '_attributes', [])
for att in atts:
kw[att] = getattr(paragraph, att)
return StructuredTextParagraph(top[:-1], [subs], **kw)
if top.find('\n') >= 0:
return None
return StructuredTextSection(top, subs, indent=paragraph.indent)
def doc_literal(self,
s,
expr=re.compile(
r"(\W+|^)'([\w%s\s]+)'([%s]+|$)"
% (literal_punc, phrase_delimiters),
re.UNICODE).search):
r = expr(s)
if r:
start, end = r.span(2)
return (StructuredTextLiteral(s[start:end]), start - 1, end + 1)
def doc_emphasize(self,
s,
expr=re.compile(r'\*([\w%s\s]+?)\*'
% (strongem_punc), re.UNICODE).search):
r = expr(s)
if r:
start, end = r.span(1)
return (StructuredTextEmphasis(s[start:end]),
start - 1, end + 1)
def doc_inner_link(self,
s,
expr1=re.compile(r"\.\.\s*").search,
expr2=re.compile(r"\[[\w]+\]", re.UNICODE).search):
# make sure we dont grab a named link
if expr2(s) and expr1(s):
start1, end1 = expr1(s).span()
start2, end2 = expr2(s).span()
if end1 == start2:
# uh-oh, looks like a named link
return None
# the .. is somewhere else, ignore it
return (StructuredTextInnerLink(s[start2 + 1:end2 - 1]),
start2, end2)
elif expr2(s) and not expr1(s):
start, end = expr2(s).span()
return (StructuredTextInnerLink(s[start + 1:end - 1]),
start, end)
def doc_named_link(self,
s,
expr=re.compile(r"(\.\.\s)(\[[\w]+\])",
re.UNICODE).search):
result = expr(s)
if result:
start, end = result.span(2)
str = s[start + 1:end - 1]
st, en = result.span()
return (StructuredTextNamedLink(str), st, en)
def doc_underline(self,
s,
expr=re.compile(r'_([\w%s\s]+)_([\s%s]|$)'
% (under_punc, phrase_delimiters),
re.UNICODE).search):
result = expr(s)
if result:
if result.group(1)[:1] == '_':
return None # no double unders
start, end = result.span(1)
st, e = result.span()
return (StructuredTextUnderline(s[start:end]),
st, e - len(result.group(2)))
def doc_strong(self,
s,
expr=re.compile(r'\*\*([\w%s\s]+?)\*\*'
% (strongem_punc), re.UNICODE).search):
r = expr(s)
if r:
start, end = r.span(1)
return (StructuredTextStrong(s[start:end]),
start - 2, end + 2)
# Some constants to make the doc_href() regex easier to read.
# ## double quoted text
_DQUOTEDTEXT = r'("[ \w\n\r%s]+")' % (dbl_quoted_punc)
_ABSOLUTE_URL = (r'((http|https|ftp|mailto|file|about)[:/]'
r'+?[\w\@\.\,\?\!\/\:\;\-\#\~\=\&\%%\+]+)')
_ABS_AND_RELATIVE_URL = r'([\w\@\.\,\?\!\/\:\;\-\#\~\=\&\%%\+]+)'
_SPACES = r'(\s*)'
def doc_href1(self,
s,
expr=re.compile(_DQUOTEDTEXT
+ "(:)"
+ _ABS_AND_RELATIVE_URL
+ _SPACES, re.UNICODE).search):
return self.doc_href(s, expr)
def doc_href2(self,
s,
expr=re.compile(_DQUOTEDTEXT
+ r'(\,\s+)'
+ _ABSOLUTE_URL
+ _SPACES, re.UNICODE).search):
return self.doc_href(s, expr)
def doc_href(self,
s,
expr,
punctuation=re.compile(r"[\,\.\?\!\;]+", re.UNICODE).match):
r = expr(s)
if r:
# need to grab the href part and the
# beginning part
start, e = r.span(1)
name = s[start:e]
name = name.replace('"', '', 2)
# start = start + 1
st, end = r.span(3)
if punctuation(s[end - 1:end]):
end = end - 1
link = s[st:end]
# end = end - 1
# name is the href title, link is the target
# of the href
return (StructuredTextLink(name, href=link),
start, end)
def doc_sgml(self,
s,
expr=re.compile(r"\<[\w\.\=\'\"\:\/\-\#\+\s\*]+\>",
re.UNICODE).search):
"""SGML text is ignored and outputed as-is
"""
r = expr(s)
if r:
start, end = r.span()
text = s[start:end]
return (StructuredTextSGML(text),
start, end)
def doc_xref(self,
s,
expr=re.compile(r'\[([\w\-.:/;,\n\r\~]+)\]',
re.UNICODE).search):
r = expr(s)
if r:
start, end = r.span(1)
return (StructuredTextXref(s[start:end]),
start - 1, end + 1)
class DocumentWithImages(Document):
"""Document with images
"""
text_types = [
'doc_img',
] + Document.text_types
def doc_img(self,
s,
expr=re.compile(Document._DQUOTEDTEXT
+ ":img:"
+ Document._ABS_AND_RELATIVE_URL,
re.UNICODE).search):
r = expr(s)
if r:
startt, endt = r.span(1)
starth, endh = r.span(2)
start, end = r.span()
return (StructuredTextImage(s[startt + 1:endt - 1],
href=s[starth:endh]),
start, end)
return None | zope.structuredtext | /zope.structuredtext-5.0-py3-none-any.whl/zope/structuredtext/document.py | document.py |
class DocBook:
""" Structured text document renderer for Docbook.
"""
element_types = {
'#text': '_text',
'StructuredTextDocument': 'document',
'StructuredTextParagraph': 'paragraph',
'StructuredTextExample': 'example',
'StructuredTextBullet': 'bullet',
'StructuredTextNumbered': 'numbered',
'StructuredTextDescription': 'description',
'StructuredTextDescriptionTitle': 'descriptionTitle',
'StructuredTextDescriptionBody': 'descriptionBody',
'StructuredTextSection': 'section',
'StructuredTextSectionTitle': 'sectionTitle',
'StructuredTextLiteral': 'literal',
'StructuredTextEmphasis': 'emphasis',
'StructuredTextStrong': 'strong',
'StructuredTextUnderline': 'underline',
'StructuredTextLink': 'link',
'StructuredTextInnerLink': 'innerLink',
'StructuredTextNamedLink': 'namedLink',
'StructuredTextXref': 'xref',
'StructuredTextSGML': 'sgml',
}
def dispatch(self, doc, level, output):
getattr(self, self.element_types[doc.getNodeName()]
)(doc, level, output)
def __call__(self, doc, level=1):
r = []
self.dispatch(doc, level - 1, r.append)
return ''.join(r)
def _text(self, doc, level, output):
if doc.getNodeName() == 'StructuredTextLiteral':
output(doc.getNodeValue())
else:
output(doc.getNodeValue().lstrip())
def document(self, doc, level, output):
output('<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook V4.1//EN">\n')
output('<book>\n')
children = doc.getChildNodes()
if (children
and children[0].getNodeName() == 'StructuredTextSection'):
output('<title>%s</title>'
% children[0].getChildNodes()[0].getNodeValue())
for c in children:
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output('</book>\n')
def section(self, doc, level, output):
output('\n<section>\n')
children = doc.getChildNodes()
for c in children:
getattr(self, self.element_types[c.getNodeName()]
)(c, level + 1, output)
output('\n</section>\n')
def sectionTitle(self, doc, level, output):
output('<title>')
for c in doc.getChildNodes():
try:
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
except Exception: # pragma: no cover
print("failed", c.getNodeName(), c)
output('</title>\n')
def description(self, doc, level, output):
p = doc.getPreviousSibling()
if p is None or p.getNodeName() != doc.getNodeName():
output('<variablelist>\n')
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
n = doc.getNextSibling()
if n is None or n.getNodeName() != doc.getNodeName():
output('</variablelist>\n')
def descriptionTitle(self, doc, level, output):
output('<varlistentry><term>\n')
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output('</term>\n')
def descriptionBody(self, doc, level, output):
output('<listitem><para>\n')
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output('</para></listitem>\n')
output('</varlistentry>\n')
def _list(self, doc, level, output, list_tag):
p = doc.getPreviousSibling()
if p is None or p.getNodeName() != doc.getNodeName():
output('<' + list_tag + '>\n')
output('<listitem><para>\n')
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
n = doc.getNextSibling()
output('</para></listitem>\n')
if n is None or n.getNodeName() != doc.getNodeName():
output('</' + list_tag + '>\n')
def bullet(self, doc, level, output):
self._list(doc, level, output, 'itemizedlist')
def numbered(self, doc, level, output):
self._list(doc, level, output, 'orderedlist')
def example(self, doc, level, output):
for c in doc.getChildNodes():
output('<programlisting>\n<![CDATA[\n')
##
# eek. A ']]>' in your body will break this...
##
output(prestrip(c.getNodeValue()))
output('\n]]></programlisting>\n')
def paragraph(self, doc, level, output):
output('<para>\n\n')
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()])(
c, level, output)
output('</para>\n\n')
def link(self, doc, level, output):
output('<ulink url="%s">' % doc.href)
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output('</ulink>')
def innerLink(self, doc, level, output):
output('<ulink href="#ref')
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output('">[')
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output(']</ulink>')
def namedLink(self, doc, level, output):
output('<anchor id="ref')
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output('"/>[')
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output(']')
def _emphasis(self, doc, level, output, role):
output('<emphasis Role="%s">' % role)
for c in doc.getChildNodes():
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output('</emphasis> ')
def emphasis(self, doc, level, output):
self._emphasis(doc, level, output, 'emphasis')
def strong(self, doc, level, output):
self._emphasis(doc, level, output, 'strong')
def underline(self, doc, level, output):
self._emphasis(doc, level, output, 'underline')
def literal(self, doc, level, output):
output('<literal>')
for c in doc.getChildNodes():
output(c.getNodeValue())
output('</literal>')
def xref(self, doc, level, output):
output('<xref linkend="%s"/>' % doc.getNodeValue())
def sgml(self, doc, level, output):
output(doc.getNodeValue())
def prestrip(v):
v = v.replace('\r\n', '\n')
v = v.replace('\r', '\n')
v = v.replace('\t', ' ')
lines = v.split('\n')
indent = len(lines[0])
for line in lines:
if not line:
continue
i = len(line) - len(line.lstrip())
if i < indent:
indent = i
nlines = []
for line in lines:
nlines.append(line[indent:])
return '\n'.join(nlines)
class DocBookChapter(DocBook):
def document(self, doc, level, output):
output('<chapter>\n')
children = doc.getChildNodes()
if (children
and children[0].getNodeName() == 'StructuredTextSection'):
output('<title>%s</title>'
% children[0].getChildNodes()[0].getNodeValue())
for c in children[0].getChildNodes()[1:]:
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output('</chapter>\n')
class DocBookChapterWithFigures(DocBookChapter):
element_types = DocBook.element_types.copy()
element_types.update({'StructuredTextImage': 'image'})
def image(self, doc, level, output):
if hasattr(doc, 'key'):
output('<figure id="%s"><title>%s</title>\n'
% (doc.key, doc.getNodeValue()))
else:
output('<figure><title>%s</title>\n' % doc.getNodeValue())
output('<graphic fileref="%s"></graphic>\n</figure>\n' % doc.href)
class DocBookArticle(DocBook):
def document(self, doc, level, output):
output('<!DOCTYPE article PUBLIC "-//OASIS//DTD DocBook V4.1//EN">\n')
output('<article>\n')
children = doc.getChildNodes()
if (children
and children[0].getNodeName() == 'StructuredTextSection'):
output('<articleinfo>\n<title>%s</title>\n</articleinfo>\n' %
children[0].getChildNodes()[0].getNodeValue())
for c in children:
getattr(self, self.element_types[c.getNodeName()]
)(c, level, output)
output('</article>\n')
class DocBookBook:
def __init__(self, title=''):
self.title = title
self.chapters = []
def addChapter(self, chapter):
self.chapters.append(chapter)
def read(self):
out = ('<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook V4.1//EN">\n'
'<book>\n')
out = out + '<title>%s</title>\n' % self.title
for chapter in self.chapters:
out = out + chapter + '\n'
out += '\n</book>\n'
return out
def __str__(self):
return self.read() | zope.structuredtext | /zope.structuredtext-5.0-py3-none-any.whl/zope/structuredtext/docbook.py | docbook.py |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.