code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def get_node_name(name: str) -> str:
"""
Given a node name (CreateTableStmt), this function tries to guess the SQL
command text (CREATE TABLE).
"""
name = name.removesuffix('Stmt').removesuffix('Expr')
name = re.sub(r'(?<!^)(?=[A-Z])', ' ', name)
return name.upper() | Given a node name (CreateTableStmt), this function tries to guess the SQL
command text (CREATE TABLE). | get_node_name | python | geldata/gel | edb/pgsql/parser/exceptions.py | https://github.com/geldata/gel/blob/master/edb/pgsql/parser/exceptions.py | Apache-2.0 |
def code(self) -> str:
return textwrap.dedent(
'''\
CREATE {constr}TRIGGER {trigger_name} {timing} {events}
ON {table_name}
{deferred}
FOR EACH {granularity} {condition}
EXECUTE PROCEDURE {procedure}
'''
).format(
constr='CONSTRAINT ' if self.trigger.is_constraint else '',
trigger_name=qi(self.trigger.name),
timing=self.trigger.timing,
events=' OR '.join(self.trigger.events),
table_name=qn(*self.trigger.table_name),
deferred=(
'DEFERRABLE INITIALLY DEFERRED'
if self.trigger.deferred
else ''
),
granularity=self.trigger.granularity,
condition=(
f'WHEN ({self.trigger.condition})'
if self.trigger.condition
else ''
),
procedure=f'{qn(*self.trigger.procedure)}()',
) | \
CREATE {constr}TRIGGER {trigger_name} {timing} {events}
ON {table_name}
{deferred}
FOR EACH {granularity} {condition}
EXECUTE PROCEDURE {procedure} | code | python | geldata/gel | edb/pgsql/dbops/triggers.py | https://github.com/geldata/gel/blob/master/edb/pgsql/dbops/triggers.py | Apache-2.0 |
def encode_value(val: Any) -> str:
"""Encode value into an appropriate SQL expression."""
if hasattr(val, 'to_sql_expr'):
val = val.to_sql_expr()
elif isinstance(val, tuple):
val_list = [encode_value(el) for el in val]
val = f'ROW({", ".join(val_list)})'
elif isinstance(val, struct.Struct):
val_list = [encode_value(el) for el in val.as_tuple()]
val = f'ROW({", ".join(val_list)})'
elif typeutils.is_container(val):
val_list = [encode_value(el) for el in val]
val = f'ARRAY[{", ".join(val_list)}]'
elif val is None:
val = 'NULL'
elif not isinstance(val, numbers.Number):
val = ql(str(val))
elif isinstance(val, int):
val = str(int(val))
else:
val = str(val)
return val | Encode value into an appropriate SQL expression. | encode_value | python | geldata/gel | edb/pgsql/dbops/base.py | https://github.com/geldata/gel/blob/master/edb/pgsql/dbops/base.py | Apache-2.0 |
def compile_grouping_atom(
el: qlast.GroupingAtom,
stmt: irast.GroupStmt, *, ctx: context.CompilerContextLevel
) -> pgast.Base:
'''Compile a GroupingAtom into sql grouping sets'''
if isinstance(el, qlast.GroupingIdentList):
return pgast.GroupingOperation(
args=[
compile_grouping_atom(at, stmt, ctx=ctx) for at in el.elements
],
)
assert isinstance(el, qlast.ObjectRef)
alias_set, _ = stmt.using[el.name]
return pathctx.get_path_value_var(
ctx.rel, alias_set.path_id, env=ctx.env) | Compile a GroupingAtom into sql grouping sets | compile_grouping_atom | python | geldata/gel | edb/pgsql/compiler/group.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/group.py | Apache-2.0 |
def compile_grouping_el(
el: qlast.GroupingElement,
stmt: irast.GroupStmt, *, ctx: context.CompilerContextLevel
) -> pgast.Base:
'''Compile a GroupingElement into sql grouping sets'''
if isinstance(el, qlast.GroupingSets):
return pgast.GroupingOperation(
operation='GROUPING SETS',
args=[compile_grouping_el(sub, stmt, ctx=ctx) for sub in el.sets],
)
elif isinstance(el, qlast.GroupingOperation):
return pgast.GroupingOperation(
operation=el.oper,
args=[
compile_grouping_atom(at, stmt, ctx=ctx) for at in el.elements
],
)
elif isinstance(el, qlast.GroupingSimple):
return compile_grouping_atom(el.element, stmt, ctx=ctx)
raise AssertionError('Unknown GroupingElement') | Compile a GroupingElement into sql grouping sets | compile_grouping_el | python | geldata/gel | edb/pgsql/compiler/group.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/group.py | Apache-2.0 |
def _compile_grouping_value(
stmt: irast.GroupStmt, used_args: AbstractSet[str], *,
ctx: context.CompilerContextLevel) -> pgast.BaseExpr:
'''Produce the value for the grouping binding saying what is grouped on'''
assert stmt.grouping_binding
grouprel = ctx.rel
# If there is only one grouping set, hardcode the output
if all(isinstance(b, qlast.GroupingSimple) for b in stmt.by):
return pgast.ArrayExpr(
elements=[
pgast.StringConstant(val=desugar_group.key_name(arg))
for arg in used_args
],
)
using = {k: stmt.using[k] for k in used_args}
args = [
pathctx.get_path_var(
grouprel,
alias_set.path_id,
aspect=pgce.PathAspect.VALUE,
env=ctx.env,
)
for alias_set, _ in using.values()
]
# Call grouping on each element we group on to produce a bitmask
grouping_alias = ctx.env.aliases.get('g')
grouping_call = pgast.FuncCall(name=('grouping',), args=args)
subq = pgast.SelectStmt(
target_list=[
pgast.ResTarget(name=grouping_alias, val=grouping_call),
]
)
q = pgast.SelectStmt(
from_clause=[pgast.RangeSubselect(
subquery=subq,
alias=pgast.Alias(aliasname=ctx.env.aliases.get())
)]
)
grouping_ref = pgast.ColumnRef(name=(grouping_alias,))
# Generate a call to ARRAY[...] with a case for each grouping
# element, then array_remove out the NULLs.
els: List[pgast.BaseExpr] = []
for i, name in enumerate(using):
name = desugar_group.key_name(name)
mask = 1 << (len(using) - i - 1)
# (CASE (e & <mask>) WHEN 0 THEN '<name>' ELSE NULL END)
els.append(pgast.CaseExpr(
arg=pgast.Expr(
name='&',
lexpr=grouping_ref,
rexpr=pgast.LiteralExpr(expr=str(mask))
),
args=[
pgast.CaseWhen(
expr=pgast.LiteralExpr(expr='0'),
result=pgast.StringConstant(val=name)
)
],
defresult=pgast.NullConstant()
))
val = pgast.FuncCall(
name=('array_remove',),
args=[pgast.ArrayExpr(elements=els), pgast.NullConstant()]
)
q.target_list.append(pgast.ResTarget(val=val))
return q | Produce the value for the grouping binding saying what is grouped on | _compile_grouping_value | python | geldata/gel | edb/pgsql/compiler/group.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/group.py | Apache-2.0 |
def get_path_var(
rel: pgast.Query,
path_id: irast.PathId,
*,
flavor: str='normal',
aspect: pgce.PathAspect,
env: context.Environment,
) -> pgast.BaseExpr:
"""
Return a value expression for a given *path_id* in a given *rel*.
This function is a part of "recursive column injection" algorithm,
described in [./ARCHITECTURE.md].
"""
if isinstance(rel, pgast.CommonTableExpr):
rel = rel.query
if flavor == 'normal':
if rel.view_path_id_map:
path_id = map_path_id(path_id, rel.view_path_id_map)
if (path_id, aspect) in rel.path_namespace:
return rel.path_namespace[path_id, aspect]
elif flavor == 'packed':
if (
rel.packed_path_namespace
and (path_id, aspect) in rel.packed_path_namespace
):
return rel.packed_path_namespace[path_id, aspect]
if astutils.is_set_op_query(rel):
return _get_path_var_in_setop(
rel, path_id, aspect=aspect, flavor=flavor, env=env)
ptrref = path_id.rptr()
ptrref_dir = path_id.rptr_dir()
is_type_intersection = path_id.is_type_intersection_path()
src_path_id: Optional[irast.PathId] = None
if ptrref is not None and not is_type_intersection:
ptr_info = pg_types.get_ptrref_storage_info(
ptrref, resolve_type=False, link_bias=False, allow_missing=True)
ptr_dir = path_id.rptr_dir()
is_inbound = ptr_dir == s_pointers.PointerDirection.Inbound
if is_inbound:
src_path_id = path_id
else:
src_path_id = path_id.src_path()
assert src_path_id is not None
src_rptr = src_path_id.rptr()
if (
irtyputils.is_id_ptrref(ptrref)
and (
src_rptr is None
or ptrref_dir is not s_pointers.PointerDirection.Inbound
)
):
# When there is a reference to the id property of
# an object which is linked to by a link stored
# inline, we want to route the reference to the
# inline attribute. For example,
# Foo.__type__.id gets resolved to the Foo.__type__
# column. This can only be done if Foo is visible
# in scope, and Foo.__type__ is not a computable.
pid = src_path_id
while pid.is_type_intersection_path():
# Skip type intersection step(s).
src_pid = pid.src_path()
if src_pid is not None:
src_rptr = src_pid.rptr()
pid = src_pid
else:
break
if (src_rptr is not None
and not irtyputils.is_computable_ptrref(src_rptr)
and env.ptrref_source_visibility.get(src_rptr)):
src_ptr_info = pg_types.get_ptrref_storage_info(
src_rptr, resolve_type=False, link_bias=False,
allow_missing=True)
if (src_ptr_info
and src_ptr_info.table_type == 'ObjectType'):
src_path_id = src_path_id.src_path()
ptr_info = src_ptr_info
else:
ptr_info = None
ptr_dir = None
var: Optional[pgast.BaseExpr]
if ptrref is None:
if len(path_id) == 1:
# This is an scalar set derived from an expression.
src_path_id = path_id
elif ptrref.source_ptr is not None:
if ptr_info and ptr_info.table_type != 'link' and not is_inbound:
# This is a link prop that is stored in source rel,
# step back to link source rvar.
_prefix_pid = path_id.src_path()
assert _prefix_pid is not None
src_path_id = _prefix_pid.src_path()
elif is_type_intersection:
src_path_id = path_id
assert src_path_id is not None
# Find which rvar will have path_id as an output
src_aspect, rel_rvar, found_path_var = _find_rel_rvar(
rel, path_id, src_path_id, aspect=aspect, flavor=flavor
)
if found_path_var:
return found_path_var
# Slight hack: Inject the __type__ field of a FreeObject when necessary
if (
rel_rvar is None
and ptrref
and ptrref.shortname.name == '__type__'
and irtyputils.is_free_object(src_path_id.target)
):
return astutils.compile_typeref(src_path_id.target.real_material_type)
if isinstance(rel_rvar, pgast.DynamicRangeVar):
var = rel_rvar.dynamic_get_path(
rel, path_id, flavor=flavor, aspect=aspect, env=env)
if isinstance(var, pgast.PathRangeVar):
rel_rvar = var
elif var:
put_path_var(rel, path_id, var, aspect=aspect, flavor=flavor)
return var
else:
rel_rvar = None
if rel_rvar is None:
raise LookupError(
f'there is no range var for '
f'{src_path_id} {src_aspect} in {rel}')
if isinstance(rel_rvar, pgast.IntersectionRangeVar):
if (
(path_id.is_objtype_path() and src_path_id == path_id)
or (ptrref is not None and irtyputils.is_id_ptrref(ptrref))
):
rel_rvar = rel_rvar.component_rvars[-1]
else:
# Intersection rvars are basically JOINs of the relevant
# parts of the type intersection, and so we need to make
# sure we pick the correct component relation of that JOIN.
rel_rvar = _find_rvar_in_intersection_by_typeref(
path_id,
rel_rvar.component_rvars,
)
source_rel = rel_rvar.query
outvar = get_path_output(
source_rel, path_id, aspect=aspect, flavor=flavor, env=env)
var = astutils.get_rvar_var(rel_rvar, outvar)
put_path_var(rel, path_id, var, aspect=aspect, flavor=flavor)
if isinstance(var, pgast.TupleVar):
for element in var.elements:
put_path_var_if_not_exists(
rel, element.path_id, element.val, flavor=flavor, aspect=aspect
)
return var | Return a value expression for a given *path_id* in a given *rel*.
This function is a part of "recursive column injection" algorithm,
described in [./ARCHITECTURE.md]. | get_path_var | python | geldata/gel | edb/pgsql/compiler/pathctx.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/pathctx.py | Apache-2.0 |
def _find_rel_rvar(
rel: pgast.Query,
path_id: irast.PathId,
src_path_id: irast.PathId,
*,
aspect: pgce.PathAspect,
flavor: str,
) -> Tuple[str, Optional[pgast.PathRangeVar], Optional[pgast.BaseExpr]]:
"""Rummage around rel looking for an appropriate rvar for path_id.
Somewhat unfortunately, some checks to find the actual path var
(in a particular tuple case) need to occur in the middle of the
rvar rel search, so we can also find the actual path var in passing.
"""
src_aspect = aspect
rel_rvar = maybe_get_path_rvar(rel, path_id, aspect=aspect, flavor=flavor)
if rel_rvar is None:
alt_aspect = get_less_specific_aspect(path_id, aspect)
if alt_aspect is not None:
rel_rvar = maybe_get_path_rvar(rel, path_id, aspect=alt_aspect)
else:
alt_aspect = None
if rel_rvar is None:
if flavor == 'packed':
src_aspect = aspect
elif src_path_id.is_objtype_path():
src_aspect = pgce.PathAspect.SOURCE
else:
src_aspect = aspect
if src_path_id.is_tuple_path():
if src_aspect == pgce.PathAspect.IDENTITY:
src_aspect = pgce.PathAspect.VALUE
if var := _find_in_output_tuple(rel, path_id, src_aspect):
return src_aspect, None, var
rel_rvar = maybe_get_path_rvar(rel, src_path_id, aspect=src_aspect)
if rel_rvar is None:
_src_path_id_prefix = src_path_id.src_path()
if _src_path_id_prefix is not None:
rel_rvar = maybe_get_path_rvar(
rel, _src_path_id_prefix, aspect=src_aspect
)
else:
rel_rvar = maybe_get_path_rvar(rel, src_path_id, aspect=src_aspect)
if (
rel_rvar is None
and src_aspect != pgce.PathAspect.SOURCE
and path_id != src_path_id
):
rel_rvar = maybe_get_path_rvar(
rel,
src_path_id,
aspect=pgce.PathAspect.SOURCE
)
if rel_rvar is None and alt_aspect is not None and flavor == 'normal':
# There is no source range var for the requested aspect,
# check if there is a cached var with less specificity.
var = rel.path_namespace.get((path_id, alt_aspect))
if var is not None:
put_path_var(rel, path_id, var, aspect=aspect, flavor=flavor)
return src_aspect, None, var
return src_aspect, rel_rvar, None | Rummage around rel looking for an appropriate rvar for path_id.
Somewhat unfortunately, some checks to find the actual path var
(in a particular tuple case) need to occur in the middle of the
rvar rel search, so we can also find the actual path var in passing. | _find_rel_rvar | python | geldata/gel | edb/pgsql/compiler/pathctx.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/pathctx.py | Apache-2.0 |
def _find_in_output_tuple(
rel: pgast.Query, path_id: irast.PathId, aspect: pgce.PathAspect
) -> Optional[pgast.BaseExpr]:
"""Try indirecting a source tuple already present as an output.
Normally tuple indirections are handled by
process_set_as_tuple_indirection, but UNIONing an explicit tuple with a
tuple coming from a base relation (like `(Foo.bar UNION (1,2)).0`)
can lead to us looking for a tuple path in relations that only have
the actual full tuple.
(See test_edgeql_coalesce_tuple_{08,09}).
We handle this by checking whether some prefix of the tuple path
is present in the path_outputs.
This is sufficient because the relevant cases are all caused by
set ops, and the "fixup" done in set op cases ensures that the
tuple will be already present.
"""
steps = []
src_path_id = path_id.src_path()
ptrref = path_id.rptr()
while (
src_path_id
and src_path_id.is_tuple_path()
and isinstance(ptrref, irast.TupleIndirectionPointerRef)
):
steps.append((ptrref.shortname.name, src_path_id))
if (
(var := rel.path_namespace.get((src_path_id, aspect)))
and not isinstance(var, pgast.TupleVarBase)
):
for name, src in reversed(steps):
var = astutils.tuple_getattr(var, src.target, name)
put_path_var(rel, path_id, var, aspect=aspect)
return var
ptrref = src_path_id.rptr()
src_path_id = src_path_id.src_path()
return None | Try indirecting a source tuple already present as an output.
Normally tuple indirections are handled by
process_set_as_tuple_indirection, but UNIONing an explicit tuple with a
tuple coming from a base relation (like `(Foo.bar UNION (1,2)).0`)
can lead to us looking for a tuple path in relations that only have
the actual full tuple.
(See test_edgeql_coalesce_tuple_{08,09}).
We handle this by checking whether some prefix of the tuple path
is present in the path_outputs.
This is sufficient because the relevant cases are all caused by
set ops, and the "fixup" done in set op cases ensures that the
tuple will be already present. | _find_in_output_tuple | python | geldata/gel | edb/pgsql/compiler/pathctx.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/pathctx.py | Apache-2.0 |
def put_path_bond(
stmt: pgast.BaseRelation, path_id: irast.PathId, iterator: bool=False
) -> None:
'''Register a path id that should be joined on when joining stmt
iterator indicates whether the identity or iterator aspect should
be used.
'''
stmt.path_bonds.add((path_id, iterator)) | Register a path id that should be joined on when joining stmt
iterator indicates whether the identity or iterator aspect should
be used. | put_path_bond | python | geldata/gel | edb/pgsql/compiler/pathctx.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/pathctx.py | Apache-2.0 |
def get_rvar_path_var(
rvar: pgast.PathRangeVar,
path_id: irast.PathId,
aspect: pgce.PathAspect,
*,
flavor: str='normal',
env: context.Environment,
) -> pgast.OutputVar:
"""Return ColumnRef for a given *path_id* in a given *range var*."""
outvar = get_path_output(
rvar.query, path_id, aspect=aspect, flavor=flavor, env=env)
return astutils.get_rvar_var(rvar, outvar) | Return ColumnRef for a given *path_id* in a given *range var*. | get_rvar_path_var | python | geldata/gel | edb/pgsql/compiler/pathctx.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/pathctx.py | Apache-2.0 |
def get_set_rvar(
ir_set: irast.Set, *,
ctx: context.CompilerContextLevel) -> pgast.PathRangeVar:
"""Return a PathRangeVar for a given IR Set.
Basically all of compilation comes through here for each set.
@param ir_set: IR Set node.
"""
path_id = ir_set.path_id
scope_stmt = relctx.maybe_get_scope_stmt(path_id, ctx=ctx)
if rvar := _lookup_set_rvar(ir_set, scope_stmt=scope_stmt, ctx=ctx):
return rvar
if ctx.toplevel_stmt is context.NO_STMT:
# Top level query
return _process_toplevel_query(ir_set, ctx=ctx)
with contextlib.ExitStack() as cstack:
# If there was a scope_stmt registered for our path, we compile
# as a subrel of that scope_stmt. Otherwise we use whatever the
# current rel was.
if scope_stmt is not None:
newctx = cstack.enter_context(ctx.new())
newctx.rel = scope_stmt
else:
newctx = ctx
scope_stmt = newctx.rel
subctx = cstack.enter_context(newctx.subrel())
# *stmt* here is a tentative container for the relation generated
# by processing the *ir_set*. However, the actual compilation
# is free to return something else instead of a range var over
# stmt.
stmt = subctx.rel
stmt.name = ctx.env.aliases.get(get_set_rel_alias(ir_set, ctx=ctx))
# If ir.Set compilation needs to produce a subquery,
# make sure it uses the current subrel. This makes it
# possible to set up the path scope here and don't worry
# about it later.
subctx.pending_query = stmt
is_empty_set = isinstance(ir_set.expr, irast.EmptySet)
path_scope = relctx.get_scope(ir_set, ctx=subctx)
new_scope = path_scope or subctx.scope_tree
is_optional = (
subctx.scope_tree.is_optional(path_id) or
new_scope.is_optional(path_id) or
path_id in subctx.force_optional
) and not can_omit_optional_wrapper(ir_set, new_scope, ctx=ctx)
optional_wrapping = is_optional and not is_empty_set
if optional_wrapping:
stmt, optrel = prepare_optional_rel(
ir_set=ir_set, stmt=stmt, ctx=subctx)
subctx.pending_query = subctx.rel = stmt
# XXX: This is pretty dodgy, because it updates the path_scope
# *before* we call new_child() on it. Removing it only breaks two
# tests of lprops on backlinks.
if path_scope and path_scope.is_visible(path_id):
subctx.path_scope[path_id] = scope_stmt
# If this set has a scope in the scope tree associated with it,
# register paths in that scope to be compiled with this stmt
# as their scope_stmt.
if path_scope:
relctx.update_scope(ir_set, stmt, ctx=subctx)
# Actually compile the set
rvars = _get_expr_set_rvar(ir_set.expr, ir_set, ctx=subctx)
relctx.update_scope_masks(ir_set, rvars.main.rvar, ctx=subctx)
if ctx.env.is_explain:
for srvar in rvars.new:
if not srvar.rvar.ir_origins:
srvar.rvar.ir_origins = []
srvar.rvar.ir_origins.append(ir_set)
if optional_wrapping:
rvars = finalize_optional_rel(ir_set, optrel=optrel,
rvars=rvars, ctx=subctx)
relctx.update_scope_masks(ir_set, rvars.main.rvar, ctx=subctx)
elif not is_optional and is_empty_set:
# In most cases it is totally fine for us to represent an
# empty set as an empty relation.
# (except when it needs to be fed to an optional argument)
null_query = rvars.main.rvar.query
assert isinstance(
null_query, (pgast.SelectStmt, pgast.NullRelation))
null_query.where_clause = pgast.BooleanConstant(val=False)
result_rvar = _include_rvars(rvars, scope_stmt=scope_stmt, ctx=subctx)
for aspect in rvars.main.aspects:
pathctx.put_path_rvar_if_not_exists(
ctx.rel,
path_id,
result_rvar,
aspect=aspect,
)
return result_rvar | Return a PathRangeVar for a given IR Set.
Basically all of compilation comes through here for each set.
@param ir_set: IR Set node. | get_set_rvar | python | geldata/gel | edb/pgsql/compiler/relgen.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/relgen.py | Apache-2.0 |
def ensure_source_rvar(
ir_set: irast.Set,
stmt: pgast.Query,
*,
ctx: context.CompilerContextLevel,
) -> pgast.PathRangeVar:
"""Make sure that a source aspect is available for ir_set.
If no aspect is available, compile it. If value/identity is available
but source is not, select from the base relation and join it in.
"""
rvar = relctx.maybe_get_path_rvar(
stmt, ir_set.path_id, aspect=pgce.PathAspect.SOURCE, ctx=ctx)
if rvar is None:
get_set_rvar(ir_set, ctx=ctx)
rvar = relctx.maybe_get_path_rvar(
stmt, ir_set.path_id, aspect=pgce.PathAspect.SOURCE, ctx=ctx)
if rvar is None:
scope_stmt = relctx.maybe_get_scope_stmt(ir_set.path_id, ctx=ctx)
if scope_stmt is None:
scope_stmt = ctx.rel
rvar = relctx.maybe_get_path_rvar(
scope_stmt,
ir_set.path_id,
aspect=pgce.PathAspect.SOURCE,
ctx=ctx,
)
if rvar is None:
if irtyputils.is_free_object(ir_set.path_id.target):
# Free objects don't have a real source, and
# generating a new fake source doesn't work because
# the ids don't match, so instead we call the existing
# value rvar a source.
rvar = relctx.get_path_rvar(
scope_stmt,
ir_set.path_id,
aspect=pgce.PathAspect.VALUE,
ctx=ctx,
)
else:
rvar = _get_source_rvar(ir_set, scope_stmt, ctx=ctx)
pathctx.put_path_rvar(
stmt, ir_set.path_id, rvar, aspect=pgce.PathAspect.SOURCE,
)
return rvar | Make sure that a source aspect is available for ir_set.
If no aspect is available, compile it. If value/identity is available
but source is not, select from the base relation and join it in. | ensure_source_rvar | python | geldata/gel | edb/pgsql/compiler/relgen.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/relgen.py | Apache-2.0 |
def can_omit_optional_wrapper(
ir_set: irast.Set, new_scope: irast.ScopeTreeNode, *,
ctx: context.CompilerContextLevel) -> bool:
"""Determine whether it is safe to omit the optional wrapper.
Doing so is safe when the expression is guarenteed to result in
a NULL and not an empty set.
The main such case implemented is a path `foo.bar` where foo
is visible and bar is a single non-computed property, which we know
will be stored as NULL in the database.
We also handle trivial SELECTs wrapping such an expression.
"""
if ir_set.expr and irutils.is_trivial_select(ir_set.expr):
return can_omit_optional_wrapper(
ir_set.expr.result,
relctx.get_scope(ir_set.expr.result, ctx=ctx) or new_scope,
ctx=ctx,
)
if isinstance(ir_set.expr, irast.Parameter):
return True
# Our base json casts should all preserve nullity (instead of
# turning it into an empty set), so allow passing through those
# cases. This is mainly an optimization for passing globals to
# functions, where we need to convert a bunch of optional params
# to json, and for casting out of json there and in schema updates.
if (
isinstance(ir_set.expr, irast.TypeCast)
and ((
irtyputils.is_scalar(ir_set.expr.expr.typeref)
and irtyputils.is_json(ir_set.expr.to_type)
) or (
irtyputils.is_json(ir_set.expr.expr.typeref)
and irtyputils.is_scalar(ir_set.expr.to_type)
))
):
return can_omit_optional_wrapper(
ir_set.expr.expr,
relctx.get_scope(ir_set.expr.expr, ctx=ctx) or new_scope,
ctx=ctx,
)
if isinstance(ir_set.expr, irast.TupleIndirectionPointer):
return can_omit_optional_wrapper(ir_set.expr.source, new_scope, ctx=ctx)
return bool(
isinstance(ir_set.expr, irast.Pointer)
and (rptr := ir_set.expr)
and rptr.expr is None
and not ir_set.path_id.is_objtype_path()
and not ir_set.path_id.is_type_intersection_path()
and new_scope.is_visible(rptr.source.path_id)
and not rptr.is_inbound
and rptr.ptrref.out_cardinality.is_single()
and not rptr.ptrref.is_computable
) | Determine whether it is safe to omit the optional wrapper.
Doing so is safe when the expression is guarenteed to result in
a NULL and not an empty set.
The main such case implemented is a path `foo.bar` where foo
is visible and bar is a single non-computed property, which we know
will be stored as NULL in the database.
We also handle trivial SELECTs wrapping such an expression. | can_omit_optional_wrapper | python | geldata/gel | edb/pgsql/compiler/relgen.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/relgen.py | Apache-2.0 |
def _source_path_needs_semi_join(
ir_source: irast.Set,
ctx: context.CompilerContextLevel) -> bool:
"""Check if the path might need a semi-join
It does not need one if it has a visible prefix followed by single
pointers. Otherwise it might.
This is an optimization that allows us to avoid doing a semi-join
when there is a chain of single links referenced (probably in a filter
or a computable).
"""
if ctx.scope_tree.is_visible(ir_source.path_id):
return False
while (
isinstance(ir_source.expr, irast.Pointer)
and ir_source.expr.dir_cardinality.is_single()
and not ir_source.expr.expr
):
ir_source = ir_source.expr.source
if ctx.scope_tree.is_visible(ir_source.path_id):
return False
return True | Check if the path might need a semi-join
It does not need one if it has a visible prefix followed by single
pointers. Otherwise it might.
This is an optimization that allows us to avoid doing a semi-join
when there is a chain of single links referenced (probably in a filter
or a computable). | _source_path_needs_semi_join | python | geldata/gel | edb/pgsql/compiler/relgen.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/relgen.py | Apache-2.0 |
def process_set_as_existence_assertion(
ir_set: irast.SetE[irast.Call],
*,
ctx: context.CompilerContextLevel,
) -> SetRVars:
"""Implementation of std::assert_exists"""
expr = ir_set.expr
stmt = ctx.rel
msg_arg = expr.args['message']
ir_arg = expr.args[0]
ir_arg_set = ir_arg.expr
if (
not ir_arg.cardinality.can_be_zero()
and not msg_arg.cardinality.is_multi()
):
# If the argument has been statically proven to be non empty,
# elide the entire assertion.
arg_ref = dispatch.compile(ir_arg_set, ctx=ctx)
pathctx.put_path_value_var(stmt, ir_set.path_id, arg_ref)
pathctx.put_path_id_map(stmt, ir_set.path_id, ir_arg_set.path_id)
return new_stmt_set_rvar(ir_set, stmt, ctx=ctx)
with ctx.subrel() as newctx:
# The solution to assert_exists() is as simple as
# calling raise_on_null().
newctx.expr_exposed = False
newctx.force_optional |= {ir_arg_set.path_id}
pathctx.put_path_id_map(newctx.rel, ir_set.path_id, ir_arg_set.path_id)
arg_ref = dispatch.compile(ir_arg_set, ctx=newctx)
arg_val = output.output_as_value(arg_ref, env=newctx.env)
msg = dispatch.compile(msg_arg.expr, ctx=newctx)
set_expr = pgast.FuncCall(
name=astutils.edgedb_func('raise_on_null', ctx=ctx),
args=[
arg_val,
pgast.StringConstant(val='cardinality_violation'),
pgast.NamedFuncArg(
name='msg',
val=pgast.CoalesceExpr(
args=[
msg,
pgast.StringConstant(
val='assert_exists violation: expression '
'returned an empty set',
),
]
),
),
pgast.NamedFuncArg(
name='constraint',
val=pgast.StringConstant(val='std::assert_exists'),
),
],
)
pathctx.put_path_value_var(
newctx.rel,
ir_arg_set.path_id,
set_expr,
force=True,
)
other_aspect = (
pgce.PathAspect.IDENTITY
if ir_set.path_id.is_objtype_path() else
pgce.PathAspect.SERIALIZED
)
pathctx.put_path_var(
newctx.rel,
ir_arg_set.path_id,
set_expr,
force=True,
aspect=other_aspect,
)
# It is important that we do not provide source, which could allow
# fields on the object to be accessed without triggering the
# raise_on_null. Not providing source means another join is
# needed, which will trigger it.
func_rvar = relctx.new_rel_rvar(ir_set, newctx.rel, ctx=ctx)
relctx.include_rvar(
stmt,
func_rvar,
ir_set.path_id,
aspects=(pgce.PathAspect.VALUE,),
ctx=ctx,
)
return new_stmt_set_rvar(
ir_set,
stmt,
aspects=(pgce.PathAspect.VALUE,),
ctx=ctx,
) | Implementation of std::assert_exists | process_set_as_existence_assertion | python | geldata/gel | edb/pgsql/compiler/relgen.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/relgen.py | Apache-2.0 |
def process_set_as_multiplicity_assertion(
ir_set: irast.SetE[irast.Call],
*,
ctx: context.CompilerContextLevel,
) -> SetRVars:
"""Implementation of std::assert_distinct"""
expr = ir_set.expr
msg_arg = expr.args['message']
ir_arg = expr.args[0]
ir_arg_set = ir_arg.expr
if (
not ir_arg.multiplicity.is_duplicate()
and not msg_arg.cardinality.is_multi()
):
# If the argument has been statically proven to be distinct,
# elide the entire assertion.
arg_ref = dispatch.compile(ir_arg_set, ctx=ctx)
pathctx.put_path_value_var(ctx.rel, ir_set.path_id, arg_ref)
pathctx.put_path_id_map(ctx.rel, ir_set.path_id, ir_arg_set.path_id)
return new_stmt_set_rvar(ir_set, ctx.rel, ctx=ctx)
# Generate a distinct set assertion as the following SQL:
#
# SELECT
# <target_set>,
# (CASE WHEN
# <target_set>
# IS DISTINCT FROM
# lag(<target_set>) OVER (ORDER BY <target_set>)
# THEN <target_set>
# ELSE edgedb.raise(ConstraintViolationError)) AS check_expr
# FROM
# (SELECT <target_set>, row_number() OVER () AS i) AS q
# ORDER BY
# q.i, check_expr
#
# NOTE: sorting over original row_number() is necessary to preserve
# order, as assert_distinct() must be completely transparent for
# compliant sets.
with ctx.subrel() as newctx:
with newctx.subrel() as subctx:
dispatch.visit(ir_arg_set, ctx=subctx)
arg_ref = pathctx.get_path_output(
subctx.rel,
ir_arg_set.path_id,
aspect=pgce.PathAspect.VALUE,
env=subctx.env,
)
arg_val = output.output_as_value(arg_ref, env=newctx.env)
sub_rvar = relctx.new_rel_rvar(ir_arg_set, subctx.rel, ctx=subctx)
aspects = pathctx.list_path_aspects(subctx.rel, ir_arg_set.path_id)
relctx.include_rvar(
newctx.rel, sub_rvar, ir_arg_set.path_id,
aspects=aspects, ctx=subctx,
)
alias = ctx.env.aliases.get('i')
subctx.rel.target_list.append(
pgast.ResTarget(
name=alias,
val=pgast.FuncCall(
name=('row_number',),
args=[],
over=pgast.WindowDef(),
)
)
)
msg = dispatch.compile(msg_arg.expr, ctx=newctx)
do_raise = pgast.FuncCall(
name=astutils.edgedb_func('raise', ctx=ctx),
args=[
pgast.TypeCast(
arg=pgast.NullConstant(),
type_name=pgast.TypeName(
name=pg_types.pg_type_from_ir_typeref(
ir_arg_set.typeref),
),
),
pgast.StringConstant(val='cardinality_violation'),
pgast.NamedFuncArg(
name='msg',
val=pgast.CoalesceExpr(
args=[
msg,
pgast.StringConstant(
val='assert_distinct violation: expression '
'returned a set with duplicate elements',
),
],
),
),
pgast.NamedFuncArg(
name='constraint',
val=pgast.StringConstant(val='std::assert_distinct'),
),
],
)
check_expr = pgast.CaseExpr(
args=[
pgast.CaseWhen(
expr=astutils.new_binop(
lexpr=arg_val,
op='IS DISTINCT FROM',
rexpr=pgast.FuncCall(
name=('lag',),
args=[arg_val],
over=pgast.WindowDef(
order_clause=[pgast.SortBy(node=arg_val)],
),
),
),
result=arg_val,
),
],
defresult=do_raise,
)
alias2 = ctx.env.aliases.get('v')
newctx.rel.target_list.append(
pgast.ResTarget(
val=check_expr,
name=alias2,
)
)
pathctx.put_path_var(
newctx.rel,
ir_set.path_id,
check_expr,
aspect=pgce.PathAspect.VALUE,
)
if newctx.rel.sort_clause is None:
newctx.rel.sort_clause = []
newctx.rel.sort_clause.extend([
pgast.SortBy(
node=pgast.ColumnRef(name=[sub_rvar.alias.aliasname, alias]),
),
pgast.SortBy(
node=pgast.ColumnRef(name=[alias2]),
),
])
pathctx.put_path_id_map(newctx.rel, ir_set.path_id, ir_arg_set.path_id)
func_rvar = relctx.new_rel_rvar(ir_set, newctx.rel, ctx=ctx)
relctx.include_rvar(
ctx.rel, func_rvar, ir_set.path_id, aspects=aspects, ctx=ctx
)
return new_stmt_set_rvar(ir_set, ctx.rel, aspects=aspects, ctx=ctx) | Implementation of std::assert_distinct | process_set_as_multiplicity_assertion | python | geldata/gel | edb/pgsql/compiler/relgen.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/relgen.py | Apache-2.0 |
def _compile_call_args(
ir_set: irast.Set,
*,
skip: Collection[int] = (),
no_subquery_args: bool = False,
ctx: context.CompilerContextLevel,
) -> list[pgast.BaseExpr]:
"""
Compiles function call arguments, whose index is not in `skip`.
"""
expr = ir_set.expr
assert isinstance(expr, irast.Call)
args = []
if isinstance(expr, irast.FunctionCall) and expr.global_args:
for glob_arg in expr.global_args:
arg_ref = dispatch.compile(glob_arg, ctx=ctx)
args.append(output.output_as_value(arg_ref, env=ctx.env))
for ir_key, ir_arg in expr.args.items():
if ir_key in skip:
continue
assert ir_arg.multiplicity != qltypes.Multiplicity.UNKNOWN
typemod = ir_arg.param_typemod
# Support a mode where we try to compile arguments as pure
# subqueries. This is occasionally valuable as it lets us
# "push down" the subqueries from the top level, which is
# important for things like hitting pgvector indexes in an
# ORDER BY.
arg_typeref = ir_arg.expr.typeref
make_subquery = (
expr.prefer_subquery_args
and typemod != qltypes.TypeModifier.SetOfType
and ir_arg.cardinality.is_single()
and (arg_typeref.is_scalar or arg_typeref.collection)
and not _needs_arg_null_check(expr, ir_arg, typemod, ctx=ctx)
and not no_subquery_args
)
if make_subquery:
arg_ref = set_as_subquery(ir_arg.expr, as_value=True, ctx=ctx)
arg_ref.nullable = ir_arg.cardinality.can_be_zero()
arg_ref = astutils.collapse_query(arg_ref)
else:
arg_ref = dispatch.compile(ir_arg.expr, ctx=ctx)
arg_ref = output.output_as_value(arg_ref, env=ctx.env)
args.append(arg_ref)
_compile_arg_null_check(expr, ir_arg, arg_ref, typemod, ctx=ctx)
if (
isinstance(expr, irast.FunctionCall)
and ir_arg.expr_type_path_id is not None
):
# Object type arguments are represented by two
# SQL arguments: object id and object type id.
# The latter is needed for proper overload
# dispatch.
ensure_source_rvar(ir_arg.expr, ctx.rel, ctx=ctx)
type_ref = relctx.get_path_var(
ctx.rel,
ir_arg.expr_type_path_id,
aspect=pgce.PathAspect.IDENTITY,
ctx=ctx,
)
args.append(type_ref)
if (
isinstance(expr, irast.FunctionCall)
and expr.has_empty_variadic
and expr.variadic_param_type is not None
):
var = pgast.TypeCast(
arg=pgast.ArrayExpr(elements=[]),
type_name=pgast.TypeName(
name=pg_types.pg_type_from_ir_typeref(
expr.variadic_param_type)
)
)
args.append(pgast.VariadicArgument(expr=var))
return args | Compiles function call arguments, whose index is not in `skip`. | _compile_call_args | python | geldata/gel | edb/pgsql/compiler/relgen.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/relgen.py | Apache-2.0 |
def create_subrel_for_expr(
expr: pgast.BaseExpr, *, ctx: context.CompilerContextLevel
) -> irast.PathId:
"""
Creates a sub query relation that contains the given expression.
"""
# create a dummy path id for a dummy object
expr_id = irast.PathId.new_dummy(ctx.env.aliases.get('d'))
with ctx.subrel() as newctx:
# register the expression
pathctx.put_path_var(
newctx.rel,
expr_id,
expr,
aspect=pgce.PathAspect.VALUE,
)
# include the subrel in the parent
new_rvar = relctx.rvar_for_rel(newctx.rel, ctx=ctx)
relctx.include_rvar(
ctx.rel,
new_rvar,
expr_id,
aspects=(pgce.PathAspect.VALUE,),
ctx=ctx,
)
return expr_id | Creates a sub query relation that contains the given expression. | create_subrel_for_expr | python | geldata/gel | edb/pgsql/compiler/relgen.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/relgen.py | Apache-2.0 |
def init_dml_stmt(
ir_stmt: irast.MutatingStmt,
*,
ctx: context.CompilerContextLevel,
) -> DMLParts:
"""Prepare the common structure of the query representing a DML stmt.
Args:
ir_stmt:
IR of the DML statement.
Returns:
A ``DMLParts`` tuple containing a map of DML CTEs as well as the
common range CTE for UPDATE/DELETE statements.
"""
range_cte: Optional[pgast.CommonTableExpr]
range_rvar: Optional[pgast.RelRangeVar]
clauses.compile_volatile_bindings(ir_stmt, ctx=ctx)
if isinstance(ir_stmt, (irast.UpdateStmt, irast.DeleteStmt)):
# UPDATE and DELETE operate over a range, so generate
# the corresponding CTE and connect it to the DML statements.
range_cte = get_dml_range(ir_stmt, ctx=ctx)
range_rvar = pgast.RelRangeVar(
relation=range_cte,
alias=pgast.Alias(
aliasname=ctx.env.aliases.get(hint='range')
)
)
else:
range_cte = None
range_rvar = None
top_typeref = ir_stmt.material_type
typerefs = [top_typeref]
if isinstance(ir_stmt, (irast.UpdateStmt, irast.DeleteStmt)):
if top_typeref.union:
for component in top_typeref.union:
if component.material_type:
component = component.material_type
typerefs.append(component)
typerefs.extend(irtyputils.get_typeref_descendants(component))
typerefs.extend(irtyputils.get_typeref_descendants(top_typeref))
# Only update/delete concrete types. (Except in the degenerate
# corner case where there are none, in which case keep using
# everything so as to avoid needing a more complex special case.)
concrete_typerefs = [t for t in typerefs if not t.is_abstract]
if concrete_typerefs:
typerefs = concrete_typerefs
dml_map = {}
for typeref in typerefs:
if typeref.union:
continue
if (
isinstance(typeref.name_hint, sn.QualName)
and typeref.name_hint.module in ('sys', 'cfg')
):
continue
dml_cte, dml_rvar = gen_dml_cte(
ir_stmt,
range_rvar=range_rvar,
typeref=typeref,
ctx=ctx,
)
dml_map[typeref] = (dml_cte, dml_rvar)
else_cte = None
if (
isinstance(ir_stmt, irast.InsertStmt)
and ir_stmt.on_conflict and ir_stmt.on_conflict.else_ir is not None
):
dml_cte = pgast.CommonTableExpr(
query=pgast.SelectStmt(),
name=ctx.env.aliases.get(hint='melse'),
for_dml_stmt=ctx.get_current_dml_stmt(),
)
dml_rvar = relctx.rvar_for_rel(dml_cte, ctx=ctx)
else_cte = (dml_cte, dml_rvar)
put_iterator_bond(ctx.enclosing_cte_iterator, ctx.rel)
ctx.dml_stmt_stack.append(ir_stmt)
return DMLParts(
dml_ctes=dml_map,
range_cte=range_cte,
else_cte=else_cte,
) | Prepare the common structure of the query representing a DML stmt.
Args:
ir_stmt:
IR of the DML statement.
Returns:
A ``DMLParts`` tuple containing a map of DML CTEs as well as the
common range CTE for UPDATE/DELETE statements. | init_dml_stmt | python | geldata/gel | edb/pgsql/compiler/dml.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/dml.py | Apache-2.0 |
def get_dml_range(
ir_stmt: Union[irast.UpdateStmt, irast.DeleteStmt],
*,
ctx: context.CompilerContextLevel,
) -> pgast.CommonTableExpr:
"""Create a range CTE for the given DML statement.
Args:
ir_stmt:
IR of the DML statement.
Returns:
A CommonTableExpr node representing the range affected
by the DML statement.
"""
target_ir_set = ir_stmt.subject
ir_qual_expr = ir_stmt.where
ir_qual_card = ir_stmt.where_card
with ctx.newrel() as subctx:
subctx.expr_exposed = False
range_stmt = subctx.rel
merge_iterator(ctx.enclosing_cte_iterator, range_stmt, ctx=subctx)
dispatch.visit(target_ir_set, ctx=subctx)
pathctx.get_path_identity_output(
range_stmt, target_ir_set.path_id, env=subctx.env)
if ir_qual_expr is not None:
with subctx.new() as wctx:
clauses.setup_iterator_volatility(target_ir_set, ctx=wctx)
range_stmt.where_clause = astutils.extend_binop(
range_stmt.where_clause,
clauses.compile_filter_clause(
ir_qual_expr, ir_qual_card, ctx=wctx))
range_stmt.path_id_mask.discard(target_ir_set.path_id)
pathctx.put_path_bond(range_stmt, target_ir_set.path_id)
range_cte = pgast.CommonTableExpr(
query=range_stmt,
name=ctx.env.aliases.get('range'),
for_dml_stmt=ctx.get_current_dml_stmt(),
)
return range_cte | Create a range CTE for the given DML statement.
Args:
ir_stmt:
IR of the DML statement.
Returns:
A CommonTableExpr node representing the range affected
by the DML statement. | get_dml_range | python | geldata/gel | edb/pgsql/compiler/dml.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/dml.py | Apache-2.0 |
def _mk_dynamic_get_path(
ptr_map: Dict[sn.Name, pgast.BaseExpr],
typeref: irast.TypeRef,
fallback_rvar: Optional[pgast.PathRangeVar] = None,
) -> pgast.DynamicRangeVarFunc:
"""A dynamic rvar function for insert/update.
It returns values out of a select purely based on material rptr,
as if it was a base relation. This is to make it easy for access
policies to operate on the results.
"""
def dynamic_get_path(
rel: pgast.Query, path_id: irast.PathId, *,
flavor: str,
aspect: str, env: context.Environment
) -> Optional[pgast.BaseExpr | pgast.PathRangeVar]:
if (
flavor != 'normal'
or aspect not in (
pgce.PathAspect.VALUE, pgce.PathAspect.IDENTITY
)
):
return None
if rptr := path_id.rptr():
if ret := ptr_map.get(rptr.real_material_ptr.name):
return ret
if rptr.real_material_ptr.shortname.name == '__type__':
return astutils.compile_typeref(typeref)
# If a fallback rvar is specified, defer to that.
# This is used in rewrites to go back to the original
if fallback_rvar:
return fallback_rvar
if not rptr:
raise LookupError('only pointers appear in insert fallback')
# Properties that aren't specified are {}
return pgast.NullConstant()
return dynamic_get_path | A dynamic rvar function for insert/update.
It returns values out of a select purely based on material rptr,
as if it was a base relation. This is to make it easy for access
policies to operate on the results. | _mk_dynamic_get_path | python | geldata/gel | edb/pgsql/compiler/dml.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/dml.py | Apache-2.0 |
def process_insert_body(
*,
ir_stmt: irast.InsertStmt,
insert_cte: pgast.CommonTableExpr,
dml_parts: DMLParts,
ctx: context.CompilerContextLevel,
) -> None:
"""Generate SQL DML CTEs from an InsertStmt IR.
Args:
ir_stmt:
IR of the DML statement.
insert_cte:
A CommonTableExpr node representing the SQL INSERT into
the main relation of the DML subject.
else_cte_rvar:
If present, a tuple containing a CommonTableExpr and
a RangeVar for it, which represent the body of an
ELSE clause in an UNLESS CONFLICT construct.
dml_parts:
A DMLParts tuple returned by init_dml_stmt().
"""
# We build the tuples to insert in a select we put into a CTE
select = pgast.SelectStmt(target_list=[])
# The main INSERT query of this statement will always be
# present to insert at least the `id` property.
insert_stmt = insert_cte.query
assert isinstance(insert_stmt, pgast.InsertStmt)
typeref = ir_stmt.subject.typeref.real_material_type
# Handle an UNLESS CONFLICT if we need it
# If there is an UNLESS CONFLICT, we need to know that there is a
# conflict *before* we execute DML for fields stored in the object
# itself, so we can prevent that execution from happening. If that
# is necessary, compile_insert_else_body will generate an iterator
# CTE with a row for each non-conflicting insert we want to do. We
# then use that as the iterator for any DML in inline fields.
#
# (For DML in the definition of pointers stored in link tables, we
# don't need to worry about this, because we can run that DML
# after the enclosing INSERT, using the enclosing INSERT as the
# iterator.)
on_conflict_fake_iterator = None
if ir_stmt.on_conflict:
assert not insert_stmt.on_conflict
on_conflict_fake_iterator = compile_insert_else_body(
insert_stmt,
ir_stmt,
ir_stmt.on_conflict,
ctx.enclosing_cte_iterator,
dml_parts.else_cte,
ctx=ctx,
)
iterator = ctx.enclosing_cte_iterator
inner_iterator = on_conflict_fake_iterator or iterator
# ptr_map needs to be set up in advance of compiling the shape
# because defaults might reference earlier pointers.
ptr_map: Dict[sn.Name, pgast.BaseExpr] = {}
# Use a dynamic rvar to return values out of the select purely
# based on material rptr, as if it was a base relation.
# This is to make it easy for access policies to operate on the result
# of the INSERT.
fallback_rvar = pgast.DynamicRangeVar(
dynamic_get_path=_mk_dynamic_get_path(ptr_map, typeref))
pathctx.put_path_source_rvar(
select, ir_stmt.subject.path_id, fallback_rvar
)
pathctx.put_path_value_rvar(select, ir_stmt.subject.path_id, fallback_rvar)
# compile contents CTE
elements: List[Tuple[irast.SetE[irast.Pointer], irast.BasePointerRef]] = []
for shape_el, shape_op in ir_stmt.subject.shape:
assert shape_op is qlast.ShapeOp.ASSIGN
# If the shape element is a linkprop, we do nothing.
# It will be picked up by the enclosing DML.
if shape_el.path_id.is_linkprop_path():
continue
ptrref = shape_el.expr.ptrref
if ptrref.material_ptr is not None:
ptrref = ptrref.material_ptr
assert shape_el.expr.expr
elements.append((shape_el, ptrref))
external_inserts = process_insert_shape(
ir_stmt, select, ptr_map, elements, iterator, inner_iterator, ctx
)
single_external = [
ir for ir in external_inserts
if ir.expr.dir_cardinality.is_single()
]
# Put the select that builds the tuples to insert into its own CTE.
# We do this for two reasons:
# 1. Generating the object ids outside of the actual SQL insert allows
# us to join any enclosing iterators into any nested external inserts.
# 2. We can use the contents CTE to evaluate insert access policies
# before we actually try the insert. This is important because
# otherwise an exclusive constraint could be raised first,
# which leaks information.
pathctx.put_path_bond(select, ir_stmt.subject.path_id)
contents_cte = pgast.CommonTableExpr(
query=select,
name=ctx.env.aliases.get('ins_contents'),
for_dml_stmt=ctx.get_current_dml_stmt(),
)
ctx.toplevel_stmt.append_cte(contents_cte)
contents_rvar = relctx.rvar_for_rel(contents_cte, ctx=ctx)
rewrites = ir_stmt.rewrites and ir_stmt.rewrites.by_type.get(typeref)
pol_expr = ir_stmt.write_policies.get(typeref.id)
pol_ctx = None
if pol_expr or rewrites or single_external:
# Create a context for handling policies/rewrites that we will
# use later. We do this in advance so that the link update code
# can populate overlay fields in it.
with ctx.new() as pol_ctx:
pass
needs_insert_on_conflict = bool(
ir_stmt.on_conflict and not on_conflict_fake_iterator)
# The first serious bit of trickiness: if there are rewrites, the link
# table updates need to be done *before* we compute the rewrites, since
# the rewrites might refer to them.
#
# However, we can't unconditionally do it like this, because we
# want to be able to use ON CONFLICT to implement UNLESS CONFLICT
# ON when possible, and in that case the link table operations
# need to be done after the *actual insert*, because it is the actual
# insert that filters out conflicting rows. (This also means that we
# can't use ON CONFLICT if there are rewrites.)
#
# Similar issues obtain with access policies: we can't use ON
# CONFLICT if there are access policies, since we can't "see" all
# possible conflicting objects.
#
# We *also* need link tables to go first if there are any single links
# with link properties. We do the actual computation for those in a link
# table and then join it in to the main table, where it is duplicated.
link_ctes = []
def _update_link_tables(inp_cte: pgast.CommonTableExpr) -> None:
# Process necessary updates to the link tables.
for shape_el in external_inserts:
link_cte, check_cte = process_link_update(
ir_stmt=ir_stmt,
ir_set=shape_el,
dml_cte=inp_cte,
source_typeref=typeref,
iterator=iterator,
policy_ctx=pol_ctx,
ctx=ctx,
)
if link_cte:
link_ctes.append(link_cte)
if check_cte is not None:
ctx.env.check_ctes.append(check_cte)
if not needs_insert_on_conflict:
_update_link_tables(contents_cte)
# compile rewrites CTE
if rewrites or single_external:
rewrites = rewrites or {}
assert not needs_insert_on_conflict
assert pol_ctx
with pol_ctx.reenter(), pol_ctx.newrel() as rctx:
# Pull in ptr rel overlays, so we can see the pointers
merge_overlays_globally((ir_stmt,), ctx=rctx)
contents_cte, contents_rvar = process_insert_rewrites(
ir_stmt,
contents_cte=contents_cte,
iterator=iterator,
inner_iterator=inner_iterator,
rewrites=rewrites,
single_external=single_external,
elements=elements,
ctx=rctx,
)
# Populate the real insert statement based on the select we generated
insert_stmt.cols = [
pgast.InsertTarget(name=name)
for value in contents_cte.query.target_list
# Filter out generated columns; only keep concrete ones
if '~' not in (name := not_none(value.name))
]
insert_stmt.select_stmt = pgast.SelectStmt(
target_list=[
pgast.ResTarget(val=col) for col in insert_stmt.cols
],
from_clause=[contents_rvar],
)
pathctx.put_path_bond(insert_stmt, ir_stmt.subject.path_id)
real_insert_cte = pgast.CommonTableExpr(
query=insert_stmt,
name=ctx.env.aliases.get('ins'),
for_dml_stmt=ctx.get_current_dml_stmt(),
)
# Create the final CTE for the insert that joins the insert
# and the select together.
with ctx.newrel() as ictx:
merge_iterator(iterator, ictx.rel, ctx=ictx)
insert_rvar = relctx.rvar_for_rel(real_insert_cte, ctx=ctx)
relctx.include_rvar(
ictx.rel, insert_rvar, ir_stmt.subject.path_id, ctx=ictx)
relctx.include_rvar(
ictx.rel, contents_rvar, ir_stmt.subject.path_id, ctx=ictx)
# TODO: set up dml_parts with a SelectStmt for inserts always?
insert_cte.query = ictx.rel
# If there is an ON CONFLICT clause, insert the CTEs now so that the
# link inserts can depend on it. Otherwise we have the link updates
# depend on the contents cte so that policies can operate before
# doing any actual INSERTs.
if needs_insert_on_conflict:
ctx.toplevel_stmt.append_cte(real_insert_cte)
ctx.toplevel_stmt.append_cte(insert_cte)
link_op_cte = insert_cte
else:
link_op_cte = contents_cte
if needs_insert_on_conflict:
_update_link_tables(link_op_cte)
if pol_expr:
assert pol_ctx
assert not needs_insert_on_conflict
with pol_ctx.reenter():
policy_cte = compile_policy_check(
contents_cte, ir_stmt, pol_expr, typeref=typeref, ctx=pol_ctx
)
force_policy_checks(
policy_cte,
(insert_stmt,) + tuple(cte.query for cte in link_ctes),
ctx=ctx)
for link_cte in link_ctes:
ctx.toplevel_stmt.append_cte(link_cte)
if not needs_insert_on_conflict:
ctx.toplevel_stmt.append_cte(real_insert_cte)
ctx.toplevel_stmt.append_cte(insert_cte)
for extra_conflict in (ir_stmt.conflict_checks or ()):
compile_insert_else_body(
insert_stmt,
ir_stmt,
extra_conflict,
inner_iterator,
None,
ctx=ctx,
) | Generate SQL DML CTEs from an InsertStmt IR.
Args:
ir_stmt:
IR of the DML statement.
insert_cte:
A CommonTableExpr node representing the SQL INSERT into
the main relation of the DML subject.
else_cte_rvar:
If present, a tuple containing a CommonTableExpr and
a RangeVar for it, which represent the body of an
ELSE clause in an UNLESS CONFLICT construct.
dml_parts:
A DMLParts tuple returned by init_dml_stmt(). | process_insert_body | python | geldata/gel | edb/pgsql/compiler/dml.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/dml.py | Apache-2.0 |
def process_update_body(
*,
ir_stmt: irast.UpdateStmt,
update_cte: pgast.CommonTableExpr,
dml_parts: DMLParts,
typeref: irast.TypeRef,
ctx: context.CompilerContextLevel,
) -> None:
"""Generate SQL DML CTEs from an UpdateStmt IR.
Args:
ir_stmt:
IR of the DML statement.
update_cte:
CTE representing the SQL UPDATE to the main relation of the UPDATE
subject.
dml_parts:
A DMLParts tuple returned by init_dml_stmt().
typeref:
A TypeRef corresponding the the type of a subject being updated
by the update_cte.
"""
assert isinstance(update_cte.query, pgast.SelectStmt)
contents_select = update_cte.query
toplevel = ctx.toplevel_stmt
put_iterator_bond(ctx.enclosing_cte_iterator, contents_select)
assert dml_parts.range_cte
iterator = pgast.IteratorCTE(
path_id=ir_stmt.subject.path_id,
cte=dml_parts.range_cte,
parent=ctx.enclosing_cte_iterator,
)
with ctx.newscope() as subctx:
# It is necessary to process the expressions in
# the UpdateStmt shape body in the context of the
# UPDATE statement so that references to the current
# values of the updated object are resolved correctly.
subctx.parent_rel = contents_select
subctx.expr_exposed = False
subctx.enclosing_cte_iterator = iterator
clauses.setup_iterator_volatility(iterator, ctx=subctx)
# compile contents CTE
elements = [
(shape_el, shape_el.expr.ptrref, shape_op)
for shape_el, shape_op in ir_stmt.subject.shape
if shape_op != qlast.ShapeOp.MATERIALIZE
]
values, external_updates, ptr_map = process_update_shape(
ir_stmt, contents_select, elements, typeref, subctx
)
relation = contents_select.from_clause[0]
assert isinstance(relation, pgast.PathRangeVar)
# Use a dynamic rvar to return values out of the select purely
# based on material rptr, as if it was a base relation (and to
# fall back to the base relation if the value wasn't updated.)
fallback_rvar = pgast.DynamicRangeVar(
dynamic_get_path=_mk_dynamic_get_path(ptr_map, typeref, relation),
)
pathctx.put_path_source_rvar(
contents_select,
ir_stmt.subject.path_id,
fallback_rvar,
)
pathctx.put_path_value_rvar(
contents_select,
ir_stmt.subject.path_id,
fallback_rvar,
)
update_stmt = None
single_external = [
ir for ir, _ in external_updates
if ir.expr.dir_cardinality.is_single()
]
rewrites = ir_stmt.rewrites and ir_stmt.rewrites.by_type.get(typeref)
pol_expr = ir_stmt.write_policies.get(typeref.id)
pol_ctx = None
if pol_expr or rewrites or single_external:
# Create a context for handling policies/rewrites that we will
# use later. We do this in advance so that the link update code
# can populate overlay fields in it.
with ctx.new() as pol_ctx:
pass
no_update = not values and not rewrites and not single_external
if no_update:
# No updates directly to the set target table,
# so convert the UPDATE statement into a SELECT.
update_cte.query = contents_select
contents_cte = update_cte
else:
contents_cte = pgast.CommonTableExpr(
query=contents_select,
name=ctx.env.aliases.get("upd_contents"),
for_dml_stmt=ctx.get_current_dml_stmt(),
)
toplevel.append_cte(contents_cte)
# Process necessary updates to the link tables.
# We do link tables before we do the main update so that
link_ctes = []
for expr, shape_op in external_updates:
link_cte, check_cte = process_link_update(
ir_stmt=ir_stmt,
ir_set=expr,
dml_cte=contents_cte,
iterator=iterator,
shape_op=shape_op,
source_typeref=typeref,
ctx=ctx,
policy_ctx=pol_ctx,
)
if link_cte:
link_ctes.append(link_cte)
if check_cte is not None:
ctx.env.check_ctes.append(check_cte)
if not no_update:
table_relation = contents_select.from_clause[0]
assert isinstance(table_relation, pgast.RelRangeVar)
range_relation = contents_select.from_clause[1]
assert isinstance(range_relation, pgast.PathRangeVar)
contents_rvar = relctx.rvar_for_rel(contents_cte, ctx=ctx)
subject_path_id = ir_stmt.subject.path_id
# Compile rewrites CTE
if rewrites or single_external:
rewrites = rewrites or {}
assert pol_ctx
with pol_ctx.reenter(), pol_ctx.new() as rctx:
merge_overlays_globally((ir_stmt,), ctx=rctx)
contents_cte, contents_rvar, values = process_update_rewrites(
ir_stmt,
typeref=typeref,
contents_cte=contents_cte,
contents_rvar=contents_rvar,
iterator=iterator,
contents_select=contents_select,
table_relation=table_relation,
range_relation=range_relation,
single_external=single_external,
rewrites=rewrites,
elements=elements,
ctx=rctx,
)
update_stmt = pgast.UpdateStmt(
relation=table_relation,
where_clause=astutils.new_binop(
lexpr=pgast.ColumnRef(
name=[table_relation.alias.aliasname, "id"]
),
op="=",
rexpr=pathctx.get_rvar_path_identity_var(
contents_rvar, subject_path_id, env=ctx.env
),
),
from_clause=[contents_rvar],
targets=[
pgast.MultiAssignRef(
columns=[not_none(value.name) for value, _ in values],
source=pgast.SelectStmt(
target_list=[
pgast.ResTarget(
val=pgast.ColumnRef(
name=[
contents_rvar.alias.aliasname,
not_none(value.name),
]
)
)
for value, _ in values
],
),
)
],
)
relctx.pull_path_namespace(
target=update_stmt, source=contents_rvar, ctx=ctx
)
pathctx.put_path_value_rvar(
update_stmt, subject_path_id, table_relation
)
pathctx.put_path_source_rvar(
update_stmt, subject_path_id, table_relation
)
put_iterator_bond(ctx.enclosing_cte_iterator, update_stmt)
update_cte.query = update_stmt
if pol_expr:
assert pol_ctx
with pol_ctx.reenter():
policy_cte = compile_policy_check(
contents_cte, ir_stmt, pol_expr, typeref=typeref, ctx=pol_ctx
)
force_policy_checks(
policy_cte,
((update_stmt,) if update_stmt else ())
+ tuple(cte.query for cte in link_ctes),
ctx=ctx,
)
if values:
toplevel.append_cte(update_cte)
for link_cte in link_ctes:
toplevel.append_cte(link_cte) | Generate SQL DML CTEs from an UpdateStmt IR.
Args:
ir_stmt:
IR of the DML statement.
update_cte:
CTE representing the SQL UPDATE to the main relation of the UPDATE
subject.
dml_parts:
A DMLParts tuple returned by init_dml_stmt().
typeref:
A TypeRef corresponding the the type of a subject being updated
by the update_cte. | process_update_body | python | geldata/gel | edb/pgsql/compiler/dml.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/dml.py | Apache-2.0 |
def check_update_type(
val: pgast.BaseExpr,
rel_or_rvar: Union[pgast.BaseExpr, pgast.PathRangeVar],
*,
is_subquery: bool,
ir_stmt: irast.UpdateStmt,
ir_set: irast.Set,
shape_ptrref: irast.BasePointerRef,
actual_ptrref: irast.BasePointerRef,
ctx: context.CompilerContextLevel,
) -> pgast.BaseExpr:
"""Possibly insert a type check on an UPDATE to a link
Because edgedb allows subtypes to covariantly override the target
types of links, we need to insert runtime type checks when
the target in a base type being UPDATEd does not match the
target type for this concrete subtype being handled.
"""
assert isinstance(actual_ptrref, irast.PointerRef)
base_ptrref = shape_ptrref.real_material_ptr
# We skip the check if either the base type matches exactly
# or the shape type matches exactly. FIXME: *Really* we want to do
# a subtype check, here, though, since this could do a needless
# check if we have multiple levels of overloading, but we don't
# have the infrastructure here.
if (
not irtyputils.is_object(ir_set.typeref)
or base_ptrref.out_target.id == actual_ptrref.out_target.id
or shape_ptrref.out_target.id == actual_ptrref.out_target.id
):
return val
if isinstance(rel_or_rvar, pgast.PathRangeVar):
rvar = rel_or_rvar
else:
assert isinstance(rel_or_rvar, pgast.BaseRelation)
rvar = relctx.rvar_for_rel(rel_or_rvar, ctx=ctx)
# Make up a ptrref for the __type__ link on our actual target type
# and make up a new path_id to access it. Relies on __type__ always
# being named __type__, but that's fine.
# (Arranging to actually get a legit pointer ref is pointlessly expensive.)
el_name = sn.QualName('__', '__type__')
actual_type_ptrref = irast.SpecialPointerRef(
name=el_name,
shortname=el_name,
out_source=actual_ptrref.out_target,
# HACK: This is obviously not the right target type, but we don't
# need it for anything and the pathid never escapes this function.
out_target=actual_ptrref.out_target,
out_cardinality=qltypes.Cardinality.AT_MOST_ONE,
)
type_pathid = ir_set.path_id.extend(ptrref=actual_type_ptrref)
# Grab the actual value we have inserted and pull the __type__ out
rval = pathctx.get_rvar_path_identity_var(
rvar, ir_set.path_id, env=ctx.env)
typ = pathctx.get_rvar_path_identity_var(rvar, type_pathid, env=ctx.env)
typeref_val = dispatch.compile(actual_ptrref.out_target, ctx=ctx)
# Do the check! Include the ptrref for this concrete class and
# also the (dynamic) type of the argument, so that we can produce
# a good error message.
check_result = pgast.FuncCall(
name=astutils.edgedb_func('issubclass', ctx=ctx),
args=[typ, typeref_val],
)
maybe_null = pgast.CaseExpr(
args=[pgast.CaseWhen(expr=check_result, result=rval)])
maybe_raise = pgast.FuncCall(
name=astutils.edgedb_func('raise_on_null', ctx=ctx),
args=[
maybe_null,
pgast.StringConstant(val='wrong_object_type'),
pgast.NamedFuncArg(
name='msg',
val=pgast.StringConstant(val='covariance error')
),
pgast.NamedFuncArg(
name='column',
val=pgast.StringConstant(val=str(actual_ptrref.id)),
),
pgast.NamedFuncArg(
name='table',
val=pgast.TypeCast(
arg=typ, type_name=pgast.TypeName(name=('text',))
),
),
],
)
if is_subquery:
# If this is supposed to be a subquery (because it is an
# update of a single link), wrap the result query in a new one,
# since we need to access two outputs from it and produce just one
# from this query
return pgast.SelectStmt(
from_clause=[rvar],
target_list=[pgast.ResTarget(val=maybe_raise)],
)
else:
return maybe_raise | Possibly insert a type check on an UPDATE to a link
Because edgedb allows subtypes to covariantly override the target
types of links, we need to insert runtime type checks when
the target in a base type being UPDATEd does not match the
target type for this concrete subtype being handled. | check_update_type | python | geldata/gel | edb/pgsql/compiler/dml.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/dml.py | Apache-2.0 |
def process_link_update(
*,
ir_stmt: irast.MutatingStmt,
ir_set: irast.SetE[irast.Pointer],
shape_op: qlast.ShapeOp = qlast.ShapeOp.ASSIGN,
source_typeref: irast.TypeRef,
dml_cte: pgast.CommonTableExpr,
iterator: Optional[pgast.IteratorCTE] = None,
ctx: context.CompilerContextLevel,
policy_ctx: Optional[context.CompilerContextLevel],
) -> Tuple[Optional[pgast.CommonTableExpr], Optional[pgast.CommonTableExpr]]:
"""Perform updates to a link relation as part of a DML statement.
Args:
ir_stmt:
IR of the DML statement.
ir_set:
IR of the INSERT/UPDATE body element.
shape_op:
The operation of the UPDATE body element (:=, +=, -=). For
INSERT this should always be :=.
source_typeref:
An ir.TypeRef instance representing the specific type of an object
being updated.
dml_cte:
CTE representing the SQL INSERT or UPDATE to the main
relation of the DML subject.
iterator:
IR and CTE representing the iterator range in the FOR clause
of the EdgeQL DML statement (if present).
policy_ctx:
Optionally, a context in which to populate overlays that
use the select CTE for overlays instead of the
actual insert CTE. This is needed if an access policy is to
be applied, and requires disabling a potential optimization.
We need separate overlay contexts because default values for
link properties don't currently get populated in our IR, so we
need to do actual SQL DML to get their values. (And so we disallow
their use in policies.)
"""
toplevel = ctx.toplevel_stmt
is_insert = isinstance(ir_stmt, irast.InsertStmt)
rptr = ir_set.expr
ptrref = rptr.ptrref
assert isinstance(ptrref, irast.PointerRef)
target_is_scalar = not irtyputils.is_object(ir_set.typeref)
path_id = ir_set.path_id
# The links in the dml class shape have been derived,
# but we must use the correct specialized link class for the
# base material type.
mptrref = irtyputils.find_actual_ptrref(source_typeref, ptrref)
assert isinstance(mptrref, irast.PointerRef)
target_rvar = relctx.range_for_ptrref(
mptrref, for_mutation=True, only_self=True, ctx=ctx)
assert isinstance(target_rvar, pgast.RelRangeVar)
assert isinstance(target_rvar.relation, pgast.Relation)
target_alias = target_rvar.alias.aliasname
dml_cte_rvar = pgast.RelRangeVar(
relation=dml_cte,
alias=pgast.Alias(
aliasname=ctx.env.aliases.get('m')
)
)
# Turn the IR of the expression on the right side of :=
# into a subquery returning records for the link table.
data_cte, specified_cols = process_link_values(
ir_stmt=ir_stmt,
ir_expr=ir_set,
dml_rvar=dml_cte_rvar,
source_typeref=source_typeref,
target_is_scalar=target_is_scalar,
enforce_cardinality=(shape_op is qlast.ShapeOp.ASSIGN),
dml_cte=dml_cte,
iterator=iterator,
ctx=ctx,
)
toplevel.append_cte(data_cte)
delqry: Optional[pgast.DeleteStmt]
data_select = pgast.SelectStmt(
target_list=[
pgast.ResTarget(
val=pgast.ColumnRef(
name=[data_cte.name, pgast.Star()]
),
),
],
from_clause=[
pgast.RelRangeVar(relation=data_cte),
],
)
if not is_insert and shape_op is not qlast.ShapeOp.APPEND:
source_ref = pathctx.get_rvar_path_identity_var(
dml_cte_rvar,
ir_stmt.subject.path_id,
env=ctx.env,
)
if shape_op is qlast.ShapeOp.SUBTRACT:
data_rvar = relctx.rvar_for_rel(data_select, ctx=ctx)
if target_is_scalar:
# MULTI properties are not distinct, and since `-=` must
# be a proper inverse of `+=` we cannot simply DELETE
# all property values matching the `-=` expression, and
# instead have to resort to careful deletion of no more
# than the number of tuples returned by the expression.
# Here, we rely on the "ctid" system column to refer to
# specific tuples.
#
# DELETE
# FROM <link-tab>
# WHERE
# ctid IN (
# SELECT
# shortlist.ctid
# FROM
# (SELECT
# source,
# target,
# count(target) AS cnt
# FROM
# <data-expr>
# GROUP BY source, target
# ) AS counts,
# LATERAL (
# SELECT
# candidates.ctid
# FROM
# (SELECT
# ctid,
# row_number() OVER (
# PARTITION BY data
# ORDER BY data
# ) AS rn
# FROM
# <link-tab>
# WHERE
# source = counts.source
# AND target = counts.target
# ) AS candidates
# WHERE
# candidates.rn <= counts.cnt
# ) AS shortlist
# );
val_src_ref = pgast.ColumnRef(
name=[data_rvar.alias.aliasname, 'source'],
)
val_tgt_ref = pgast.ColumnRef(
name=[data_rvar.alias.aliasname, 'target'],
)
counts_select = pgast.SelectStmt(
target_list=[
pgast.ResTarget(name='source', val=val_src_ref),
pgast.ResTarget(name='target', val=val_tgt_ref),
pgast.ResTarget(
name='cnt',
val=pgast.FuncCall(
name=('count',),
args=[val_tgt_ref],
),
),
],
from_clause=[data_rvar],
group_clause=[val_src_ref, val_tgt_ref],
)
counts_rvar = relctx.rvar_for_rel(counts_select, ctx=ctx)
counts_alias = counts_rvar.alias.aliasname
target_ref = pgast.ColumnRef(name=[target_alias, 'target'])
candidates_select = pgast.SelectStmt(
target_list=[
pgast.ResTarget(
name='ctid',
val=pgast.ColumnRef(
name=[target_alias, 'ctid'],
),
),
pgast.ResTarget(
name='rn',
val=pgast.FuncCall(
name=('row_number',),
args=[],
over=pgast.WindowDef(
partition_clause=[target_ref],
order_clause=[
pgast.SortBy(node=target_ref),
],
),
),
),
],
from_clause=[target_rvar],
where_clause=astutils.new_binop(
lexpr=astutils.new_binop(
lexpr=pgast.ColumnRef(
name=[counts_alias, 'source'],
),
op='=',
rexpr=pgast.ColumnRef(
name=[target_alias, 'source'],
),
),
op='AND',
rexpr=astutils.new_binop(
lexpr=target_ref,
op='=',
rexpr=pgast.ColumnRef(
name=[counts_alias, 'target']),
),
),
)
candidates_rvar = relctx.rvar_for_rel(
candidates_select, ctx=ctx)
candidates_alias = candidates_rvar.alias.aliasname
shortlist_select = pgast.SelectStmt(
target_list=[
pgast.ResTarget(
name='ctid',
val=pgast.ColumnRef(
name=[candidates_alias, 'ctid'],
),
),
],
from_clause=[candidates_rvar],
where_clause=astutils.new_binop(
lexpr=pgast.ColumnRef(name=[candidates_alias, 'rn']),
op='<=',
rexpr=pgast.ColumnRef(name=[counts_alias, 'cnt']),
),
)
shortlist_rvar = relctx.rvar_for_rel(
shortlist_select, lateral=True, ctx=ctx)
shortlist_alias = shortlist_rvar.alias.aliasname
ctid_select = pgast.SelectStmt(
target_list=[
pgast.ResTarget(
name='ctid',
val=pgast.ColumnRef(name=[shortlist_alias, 'ctid'])
),
],
from_clause=[
counts_rvar,
shortlist_rvar,
],
)
delqry = pgast.DeleteStmt(
relation=target_rvar,
where_clause=astutils.new_binop(
lexpr=pgast.ColumnRef(
name=[target_alias, 'ctid'],
),
op='=',
rexpr=pgast.SubLink(
operator="ANY",
expr=ctid_select,
),
),
returning_list=[
pgast.ResTarget(
val=pgast.ColumnRef(
name=[target_alias, pgast.Star()],
),
)
]
)
else:
# Links are always distinct, so we can simply
# DELETE the tuples matching the `-=` expression.
delqry = pgast.DeleteStmt(
relation=target_rvar,
where_clause=astutils.new_binop(
lexpr=astutils.new_binop(
lexpr=source_ref,
op='=',
rexpr=pgast.ColumnRef(
name=[target_alias, 'source'],
),
),
op='AND',
rexpr=astutils.new_binop(
lexpr=pgast.ColumnRef(
name=[target_alias, 'target'],
),
op='=',
rexpr=pgast.ColumnRef(
name=[data_rvar.alias.aliasname, 'target'],
),
),
),
using_clause=[
dml_cte_rvar,
data_rvar,
],
returning_list=[
pgast.ResTarget(
val=pgast.ColumnRef(
name=[target_alias, pgast.Star()],
),
)
]
)
else:
# Drop all previous link records for this source.
delqry = pgast.DeleteStmt(
relation=target_rvar,
where_clause=astutils.new_binop(
lexpr=source_ref,
op='=',
rexpr=pgast.ColumnRef(
name=[target_alias, 'source'],
),
),
using_clause=[dml_cte_rvar],
returning_list=[
pgast.ResTarget(
val=pgast.ColumnRef(
name=[target_alias, pgast.Star()],
),
)
]
)
delcte = pgast.CommonTableExpr(
name=ctx.env.aliases.get(hint='d'),
query=delqry,
for_dml_stmt=ctx.get_current_dml_stmt(),
)
if shape_op is not qlast.ShapeOp.SUBTRACT:
# Correlate the deletion with INSERT to make sure
# link properties get erased properly and we aren't
# just ON CONFLICT UPDATE-ing the link rows.
# This basically just tacks on a
# WHERE (SELECT count(*) FROM delcte) IS NOT NULL)
del_select = pgast.SelectStmt(
target_list=[
pgast.ResTarget(
val=pgast.FuncCall(
name=['count'],
args=[pgast.ColumnRef(name=[pgast.Star()])],
),
),
],
from_clause=[
pgast.RelRangeVar(relation=delcte),
],
)
data_select.where_clause = astutils.extend_binop(
data_select.where_clause,
pgast.NullTest(arg=del_select, negated=True),
)
pathctx.put_path_value_rvar(
delcte.query, path_id.ptr_path(), target_rvar
)
# Record the effect of this removal in the relation overlay
# context to ensure that references to the link in the result
# of this DML statement yield the expected results.
relctx.add_ptr_rel_overlay(
mptrref,
context.OverlayOp.EXCEPT,
delcte,
path_id=path_id.ptr_path(),
dml_stmts=ctx.dml_stmt_stack,
ctx=ctx
)
toplevel.append_cte(delcte)
else:
delqry = None
if shape_op is qlast.ShapeOp.SUBTRACT:
if mptrref.dir_cardinality(rptr.direction).can_be_zero():
# The pointer is OPTIONAL, no checks or further processing
# is needed.
return None, None
else:
# The pointer is REQUIRED, so we must take the result of
# the subtraction produced by the "delcte" above, apply it
# as a subtracting overlay, and re-compute the pointer relation
# to see if there are any newly created empty sets.
#
# The actual work is done via raise_on_null injection performed
# by "process_link_values()" below (hence "enforce_cardinality").
#
# The other part of this enforcement is in doing it when a
# target is deleted and the link policy is ALLOW. This is
# handled in _get_outline_link_trigger_proc_text in
# pgsql/delta.py.
# Turn `foo := <expr>` into just `foo`.
ptr_ref_set = irast.Set(
path_id=ir_set.path_id,
path_scope_id=ir_set.path_scope_id,
typeref=ir_set.typeref,
expr=ir_set.expr.replace(expr=None),
)
assert irutils.is_set_instance(ptr_ref_set, irast.Pointer)
with ctx.new() as subctx:
# TODO: Do we really need a copy here? things /seem/
# to work without it
subctx.rel_overlays = subctx.rel_overlays.copy()
relctx.add_ptr_rel_overlay(
ptrref,
context.OverlayOp.EXCEPT,
delcte,
path_id=path_id.ptr_path(),
ctx=subctx
)
check_cte, _ = process_link_values(
ir_stmt=ir_stmt,
ir_expr=ptr_ref_set,
dml_rvar=dml_cte_rvar,
source_typeref=source_typeref,
target_is_scalar=target_is_scalar,
enforce_cardinality=True,
dml_cte=dml_cte,
iterator=iterator,
ctx=subctx,
)
toplevel.append_cte(check_cte)
return None, check_cte
cols = [pgast.ColumnRef(name=[col]) for col in specified_cols]
conflict_cols = ['source', 'target']
if is_insert or target_is_scalar:
conflict_clause = None
elif (
len(cols) == len(conflict_cols)
and delqry is not None
and not policy_ctx
):
# There are no link properties, so we can optimize the
# link replacement operation by omitting the overlapping
# link rows from deletion.
filter_select = pgast.SelectStmt(
target_list=[
pgast.ResTarget(
val=pgast.ColumnRef(name=['source']),
),
pgast.ResTarget(
val=pgast.ColumnRef(name=['target']),
),
],
from_clause=[pgast.RelRangeVar(relation=data_cte)],
)
delqry.where_clause = astutils.extend_binop(
delqry.where_clause,
astutils.new_binop(
lexpr=pgast.ImplicitRowExpr(
args=[
pgast.ColumnRef(name=['source']),
pgast.ColumnRef(name=['target']),
],
),
rexpr=pgast.SubLink(
operator="ALL",
expr=filter_select,
),
op='!=',
)
)
conflict_clause = pgast.OnConflictClause(
action='nothing',
infer=pgast.InferClause(
index_elems=[
pgast.ColumnRef(name=[col]) for col in conflict_cols
]
),
)
else:
# Inserting rows into the link table may produce cardinality
# constraint violations, since the INSERT into the link table
# is executed in the snapshot where the above DELETE from
# the link table is not visible. Hence, we need to use
# the ON CONFLICT clause to resolve this.
conflict_inference = [
pgast.ColumnRef(name=[col])
for col in conflict_cols
]
target_cols = [
col.name[0]
for col in cols
if isinstance(col.name[0], str) and col.name[0] not in conflict_cols
]
if len(target_cols) == 0:
conflict_clause = pgast.OnConflictClause(
action='nothing',
infer=pgast.InferClause(
index_elems=conflict_inference
)
)
else:
conflict_data = pgast.RowExpr(
args=[
pgast.ColumnRef(name=['excluded', col])
for col in target_cols
],
)
conflict_clause = pgast.OnConflictClause(
action='update',
infer=pgast.InferClause(
index_elems=conflict_inference
),
target_list=[
pgast.MultiAssignRef(
columns=target_cols,
source=conflict_data
)
]
)
update = pgast.CommonTableExpr(
name=ctx.env.aliases.get(hint='i'),
query=pgast.InsertStmt(
relation=target_rvar,
select_stmt=data_select,
cols=[
pgast.InsertTarget(name=downcast(str, col.name[0]))
for col in cols
],
on_conflict=conflict_clause,
returning_list=[
pgast.ResTarget(
val=pgast.ColumnRef(name=[pgast.Star()])
)
]
),
for_dml_stmt=ctx.get_current_dml_stmt(),
)
pathctx.put_path_value_rvar(update.query, path_id.ptr_path(), target_rvar)
def register_overlays(
overlay_cte: pgast.CommonTableExpr, octx: context.CompilerContextLevel
) -> None:
assert isinstance(mptrref, irast.PointerRef)
# Record the effect of this insertion in the relation overlay
# context to ensure that references to the link in the result
# of this DML statement yield the expected results.
if shape_op is qlast.ShapeOp.APPEND and not target_is_scalar:
# When doing an UPDATE with +=, we need to do an anti-join
# based filter to filter out links that were already present
# and have been re-added.
relctx.add_ptr_rel_overlay(
mptrref,
context.OverlayOp.FILTER,
overlay_cte,
dml_stmts=ctx.dml_stmt_stack,
path_id=path_id.ptr_path(),
ctx=octx
)
relctx.add_ptr_rel_overlay(
mptrref,
context.OverlayOp.UNION,
overlay_cte,
dml_stmts=ctx.dml_stmt_stack,
path_id=path_id.ptr_path(),
ctx=octx
)
if policy_ctx:
policy_ctx.rel_overlays = policy_ctx.rel_overlays.copy()
register_overlays(data_cte, policy_ctx)
register_overlays(update, ctx)
return update, None | Perform updates to a link relation as part of a DML statement.
Args:
ir_stmt:
IR of the DML statement.
ir_set:
IR of the INSERT/UPDATE body element.
shape_op:
The operation of the UPDATE body element (:=, +=, -=). For
INSERT this should always be :=.
source_typeref:
An ir.TypeRef instance representing the specific type of an object
being updated.
dml_cte:
CTE representing the SQL INSERT or UPDATE to the main
relation of the DML subject.
iterator:
IR and CTE representing the iterator range in the FOR clause
of the EdgeQL DML statement (if present).
policy_ctx:
Optionally, a context in which to populate overlays that
use the select CTE for overlays instead of the
actual insert CTE. This is needed if an access policy is to
be applied, and requires disabling a potential optimization.
We need separate overlay contexts because default values for
link properties don't currently get populated in our IR, so we
need to do actual SQL DML to get their values. (And so we disallow
their use in policies.) | process_link_update | python | geldata/gel | edb/pgsql/compiler/dml.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/dml.py | Apache-2.0 |
def process_link_values(
*,
ir_stmt: irast.MutatingStmt,
ir_expr: irast.SetE[irast.Pointer],
dml_rvar: pgast.PathRangeVar,
dml_cte: pgast.CommonTableExpr,
source_typeref: irast.TypeRef,
target_is_scalar: bool,
enforce_cardinality: bool,
iterator: Optional[pgast.IteratorCTE],
ctx: context.CompilerContextLevel,
) -> Tuple[pgast.CommonTableExpr, List[str]]:
"""Produce a pointer relation for a given body element of an INSERT/UPDATE.
Given an INSERT/UPDATE body shape element that mutates a MULTI pointer,
produce a (source, target [, link properties]) relation as a CTE and
return it along with a list of relation attribute names.
Args:
ir_stmt:
IR of the DML statement.
ir_set:
IR of the INSERT/UPDATE body element.
dml_rvar:
The RangeVar over the SQL INSERT/UPDATE of the main relation
of the object being updated.
dml_cte:
CTE representing the SQL INSERT or UPDATE to the main
relation of the DML subject.
source_typeref:
An ir.TypeRef instance representing the specific type of an object
being updated.
target_is_scalar:
True, if mutating a property, False if a link.
enforce_cardinality:
Whether an explicit empty set check should be generated.
Used for REQUIRED pointers.
iterator:
IR and CTE representing the iterator range in the FOR clause
of the EdgeQL DML statement (if present).
Returns:
A tuple containing the pointer relation CTE and a list of attribute
names in it.
"""
old_dml_count = len(ctx.dml_stmts)
with ctx.newrel() as subrelctx:
# For inserts, we need to use the main DML statement as the
# iterator, while for updates, we need to use the DML range
# CTE as the iterator (and so arrange for it to be passed in).
# This is because, for updates, we need to execute any nested
# DML once for each row in the range over all types, while
# dml_cte contains just one subtype.
if isinstance(ir_stmt, irast.InsertStmt):
subrelctx.enclosing_cte_iterator = pgast.IteratorCTE(
path_id=ir_stmt.subject.path_id, cte=dml_cte,
parent=iterator)
else:
subrelctx.enclosing_cte_iterator = iterator
row_query = subrelctx.rel
merge_iterator(iterator, row_query, ctx=subrelctx)
relctx.include_rvar(row_query, dml_rvar, pull_namespace=False,
path_id=ir_stmt.subject.path_id, ctx=subrelctx)
subrelctx.path_scope[ir_stmt.subject.path_id] = row_query
ir_rptr = ir_expr.expr
ptrref = ir_rptr.ptrref
if ptrref.material_ptr is not None:
ptrref = ptrref.material_ptr
assert isinstance(ptrref, irast.PointerRef)
ptr_is_multi_required = (
ptrref.out_cardinality == qltypes.Cardinality.AT_LEAST_ONE
)
with subrelctx.newscope() as sctx, sctx.subrel() as input_rel_ctx:
input_rel = input_rel_ctx.rel
input_rel_ctx.expr_exposed = False
input_rel_ctx.volatility_ref = (
lambda _stmt, _ctx: pathctx.get_path_identity_var(
row_query, ir_stmt.subject.path_id,
env=input_rel_ctx.env),)
# Check if some nested Set provides a shape that is
# visible here.
shape_expr = ir_expr.shape_source or ir_expr
# Register that this shape needs to be compiled for use by DML,
# so that the values will be there for us to grab later.
input_rel_ctx.shapes_needed_by_dml.add(shape_expr)
if ptr_is_multi_required and enforce_cardinality:
input_rel_ctx.force_optional |= {ir_expr.path_id}
dispatch.visit(ir_expr, ctx=input_rel_ctx)
input_stmt: pgast.Query = input_rel
input_rvar = pgast.RangeSubselect(
subquery=input_rel,
lateral=True,
alias=pgast.Alias(
aliasname=ctx.env.aliases.get('val')
)
)
if len(ctx.dml_stmts) > old_dml_count:
# If there were any nested inserts, we need to join them in.
pathctx.put_rvar_path_bond(input_rvar, ir_stmt.subject.path_id)
relctx.include_rvar(row_query, input_rvar,
path_id=ir_stmt.subject.path_id,
ctx=ctx)
source_data: Dict[str, Tuple[irast.PathId, pgast.BaseExpr]] = {}
if isinstance(input_stmt, pgast.SelectStmt) and input_stmt.op is not None:
# UNION
assert input_stmt.rarg
input_stmt = input_stmt.rarg
path_id = ir_expr.path_id
target_ref: pgast.BaseExpr
if shape_expr.shape:
for element, _ in shape_expr.shape:
if not element.path_id.is_linkprop_path():
continue
val = pathctx.get_rvar_path_value_var(
input_rvar, element.path_id, env=ctx.env)
rptr = element.path_id.rptr()
assert isinstance(rptr, irast.PointerRef)
actual_rptr = irtyputils.find_actual_ptrref(source_typeref, rptr)
ptr_info = pg_types.get_ptrref_storage_info(actual_rptr)
real_path_id = path_id.ptr_path().extend(ptrref=actual_rptr)
source_data.setdefault(
ptr_info.column_name, (real_path_id, val))
if not target_is_scalar and 'target' not in source_data:
target_ref = pathctx.get_rvar_path_identity_var(
input_rvar, path_id, env=ctx.env)
else:
if target_is_scalar:
target_ref = pathctx.get_rvar_path_value_var(
input_rvar, path_id, env=ctx.env)
target_ref = output.output_as_value(target_ref, env=ctx.env)
else:
target_ref = pathctx.get_rvar_path_identity_var(
input_rvar, path_id, env=ctx.env)
if isinstance(ir_stmt, irast.UpdateStmt) and not target_is_scalar:
actual_ptrref = irtyputils.find_actual_ptrref(source_typeref, ptrref)
target_ref = check_update_type(
target_ref,
input_rvar,
is_subquery=False,
ir_stmt=ir_stmt,
ir_set=ir_expr,
shape_ptrref=ptrref,
actual_ptrref=actual_ptrref,
ctx=ctx,
)
if ptr_is_multi_required and enforce_cardinality:
target_ref = pgast.FuncCall(
name=astutils.edgedb_func('raise_on_null', ctx=ctx),
args=[
target_ref,
pgast.StringConstant(val='not_null_violation'),
pgast.NamedFuncArg(
name='msg',
val=pgast.StringConstant(val='missing value'),
),
pgast.NamedFuncArg(
name='column',
val=pgast.StringConstant(val=str(ptrref.id)),
),
],
)
source_data['target'] = (path_id, target_ref)
row_query.target_list.append(
pgast.ResTarget(
name='source',
val=pathctx.get_rvar_path_identity_var(
dml_rvar,
ir_stmt.subject.path_id,
env=ctx.env,
),
),
)
specified_cols = ['source']
for col, (col_path_id, expr) in source_data.items():
row_query.target_list.append(
pgast.ResTarget(
val=expr,
name=col,
),
)
specified_cols.append(col)
# XXX: This is dodgy. Do we need to do the dynamic rvar thing?
# XXX: And can we make defaults work?
pathctx._put_path_output_var(
row_query, col_path_id, aspect=pgce.PathAspect.VALUE,
var=pgast.ColumnRef(name=[col]),
)
link_rows = pgast.CommonTableExpr(
query=row_query,
name=ctx.env.aliases.get(hint='r'),
for_dml_stmt=ctx.get_current_dml_stmt(),
)
return link_rows, specified_cols | Produce a pointer relation for a given body element of an INSERT/UPDATE.
Given an INSERT/UPDATE body shape element that mutates a MULTI pointer,
produce a (source, target [, link properties]) relation as a CTE and
return it along with a list of relation attribute names.
Args:
ir_stmt:
IR of the DML statement.
ir_set:
IR of the INSERT/UPDATE body element.
dml_rvar:
The RangeVar over the SQL INSERT/UPDATE of the main relation
of the object being updated.
dml_cte:
CTE representing the SQL INSERT or UPDATE to the main
relation of the DML subject.
source_typeref:
An ir.TypeRef instance representing the specific type of an object
being updated.
target_is_scalar:
True, if mutating a property, False if a link.
enforce_cardinality:
Whether an explicit empty set check should be generated.
Used for REQUIRED pointers.
iterator:
IR and CTE representing the iterator range in the FOR clause
of the EdgeQL DML statement (if present).
Returns:
A tuple containing the pointer relation CTE and a list of attribute
names in it. | process_link_values | python | geldata/gel | edb/pgsql/compiler/dml.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/dml.py | Apache-2.0 |
def process_delete_body(
*,
ir_stmt: irast.DeleteStmt,
delete_cte: pgast.CommonTableExpr,
typeref: irast.TypeRef,
ctx: context.CompilerContextLevel,
) -> None:
"""Finalize DELETE on an object.
The actual DELETE was generated in gen_dml_cte, so we only
have work to do here if there are link tables to clean up.
"""
ctx.toplevel_stmt.append_cte(delete_cte)
put_iterator_bond(ctx.enclosing_cte_iterator, delete_cte.query)
pointers = ir_stmt.links_to_delete[typeref.id]
for ptrref in pointers:
target_rvar = relctx.range_for_ptrref(
ptrref, for_mutation=True, only_self=True, ctx=ctx)
assert isinstance(target_rvar, pgast.RelRangeVar)
range_rvar = pgast.RelRangeVar(
relation=delete_cte,
alias=pgast.Alias(
aliasname=ctx.env.aliases.get(hint='range')
)
)
where_clause = astutils.new_binop(
lexpr=pgast.ColumnRef(name=[
target_rvar.alias.aliasname, 'source'
]),
op='=',
rexpr=pathctx.get_rvar_path_identity_var(
range_rvar, ir_stmt.result.path_id, env=ctx.env)
)
del_query = pgast.DeleteStmt(
relation=target_rvar,
where_clause=where_clause,
using_clause=[range_rvar],
)
ctx.toplevel_stmt.append_cte(pgast.CommonTableExpr(
query=del_query,
name=ctx.env.aliases.get(hint='mlink'),
for_dml_stmt=ctx.get_current_dml_stmt(),
)) | Finalize DELETE on an object.
The actual DELETE was generated in gen_dml_cte, so we only
have work to do here if there are link tables to clean up. | process_delete_body | python | geldata/gel | edb/pgsql/compiler/dml.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/dml.py | Apache-2.0 |
def request(self, method, uri, headers=None, bodyProducer=None):
"""
Issue a new request via the configured proxy.
"""
uri = _ensureValidURI(uri.strip())
# Cache *all* connections under the same key, since we are only
# connecting to a single destination, the proxy:
key = ("http-proxy", self._proxyEndpoint)
# To support proxying HTTPS via CONNECT, we will use key
# ("http-proxy-CONNECT", scheme, host, port), and an endpoint that
# wraps _proxyEndpoint with an additional callback to do the CONNECT.
return self._requestWithEndpoint(key, self._proxyEndpoint, method,
URI.fromBytes(uri), headers,
bodyProducer, uri) | Issue a new request via the configured proxy. | request | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | MIT |
def __init__(self, uri):
"""
Create a fake Urllib2 request.
@param uri: Request URI.
@type uri: L{bytes}
"""
self.uri = nativeString(uri)
self.headers = Headers()
_uri = URI.fromBytes(uri)
self.type = nativeString(_uri.scheme)
self.host = nativeString(_uri.host)
if (_uri.scheme, _uri.port) not in ((b'http', 80), (b'https', 443)):
# If it's not a schema on the regular port, add the port.
self.host += ":" + str(_uri.port)
if _PY3:
self.origin_req_host = nativeString(_uri.host)
self.unverifiable = lambda _: False | Create a fake Urllib2 request.
@param uri: Request URI.
@type uri: L{bytes} | __init__ | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | MIT |
def request(self, method, uri, headers=None, bodyProducer=None):
"""
Issue a new request to the wrapped L{Agent}.
Send a I{Cookie} header if a cookie for C{uri} is stored in
L{CookieAgent.cookieJar}. Cookies are automatically extracted and
stored from requests.
If a C{'cookie'} header appears in C{headers} it will override the
automatic cookie header obtained from the cookie jar.
@see: L{Agent.request}
"""
if headers is None:
headers = Headers()
lastRequest = _FakeUrllib2Request(uri)
# Setting a cookie header explicitly will disable automatic request
# cookies.
if not headers.hasHeader(b'cookie'):
self.cookieJar.add_cookie_header(lastRequest)
cookieHeader = lastRequest.get_header('Cookie', None)
if cookieHeader is not None:
headers = headers.copy()
headers.addRawHeader(b'cookie', networkString(cookieHeader))
d = self._agent.request(method, uri, headers, bodyProducer)
d.addCallback(self._extractCookies, lastRequest)
return d | Issue a new request to the wrapped L{Agent}.
Send a I{Cookie} header if a cookie for C{uri} is stored in
L{CookieAgent.cookieJar}. Cookies are automatically extracted and
stored from requests.
If a C{'cookie'} header appears in C{headers} it will override the
automatic cookie header obtained from the cookie jar.
@see: L{Agent.request} | request | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | MIT |
def _extractCookies(self, response, request):
"""
Extract response cookies and store them in the cookie jar.
@type response: L{twisted.web.iweb.IResponse}
@param response: Twisted Web response.
@param request: A urllib2 compatible request object.
"""
resp = _FakeUrllib2Response(response)
self.cookieJar.extract_cookies(resp, request)
return response | Extract response cookies and store them in the cookie jar.
@type response: L{twisted.web.iweb.IResponse}
@param response: Twisted Web response.
@param request: A urllib2 compatible request object. | _extractCookies | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | MIT |
def deliverBody(self, protocol):
"""
Override C{deliverBody} to wrap the given C{protocol} with
L{_GzipProtocol}.
"""
self.original.deliverBody(_GzipProtocol(protocol, self.original)) | Override C{deliverBody} to wrap the given C{protocol} with
L{_GzipProtocol}. | deliverBody | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | MIT |
def dataReceived(self, data):
"""
Decompress C{data} with the zlib decompressor, forwarding the raw data
to the original protocol.
"""
try:
rawData = self._zlibDecompress.decompress(data)
except zlib.error:
raise ResponseFailed([Failure()], self._response)
if rawData:
self.original.dataReceived(rawData) | Decompress C{data} with the zlib decompressor, forwarding the raw data
to the original protocol. | dataReceived | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | MIT |
def connectionLost(self, reason):
"""
Forward the connection lost event, flushing remaining data from the
decompressor if any.
"""
try:
rawData = self._zlibDecompress.flush()
except zlib.error:
raise ResponseFailed([reason, Failure()], self._response)
if rawData:
self.original.dataReceived(rawData)
self.original.connectionLost(reason) | Forward the connection lost event, flushing remaining data from the
decompressor if any. | connectionLost | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | MIT |
def request(self, method, uri, headers=None, bodyProducer=None):
"""
Send a client request which declares supporting compressed content.
@see: L{Agent.request}.
"""
if headers is None:
headers = Headers()
else:
headers = headers.copy()
headers.addRawHeader(b'accept-encoding', self._supported)
deferred = self._agent.request(method, uri, headers, bodyProducer)
return deferred.addCallback(self._handleResponse) | Send a client request which declares supporting compressed content.
@see: L{Agent.request}. | request | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | MIT |
def _handleResponse(self, response):
"""
Check if the response is encoded, and wrap it to handle decompression.
"""
contentEncodingHeaders = response.headers.getRawHeaders(
b'content-encoding', [])
contentEncodingHeaders = b','.join(contentEncodingHeaders).split(b',')
while contentEncodingHeaders:
name = contentEncodingHeaders.pop().strip()
decoder = self._decoders.get(name)
if decoder is not None:
response = decoder(response)
else:
# Add it back
contentEncodingHeaders.append(name)
break
if contentEncodingHeaders:
response.headers.setRawHeaders(
b'content-encoding', [b','.join(contentEncodingHeaders)])
else:
response.headers.removeHeader(b'content-encoding')
return response | Check if the response is encoded, and wrap it to handle decompression. | _handleResponse | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | MIT |
def request(self, method, uri, headers=None, bodyProducer=None):
"""
Send a client request following HTTP redirects.
@see: L{Agent.request}.
"""
deferred = self._agent.request(method, uri, headers, bodyProducer)
return deferred.addCallback(
self._handleResponse, method, uri, headers, 0) | Send a client request following HTTP redirects.
@see: L{Agent.request}. | request | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | MIT |
def _resolveLocation(self, requestURI, location):
"""
Resolve the redirect location against the request I{URI}.
@type requestURI: C{bytes}
@param requestURI: The request I{URI}.
@type location: C{bytes}
@param location: The redirect location.
@rtype: C{bytes}
@return: Final resolved I{URI}.
"""
return _urljoin(requestURI, location) | Resolve the redirect location against the request I{URI}.
@type requestURI: C{bytes}
@param requestURI: The request I{URI}.
@type location: C{bytes}
@param location: The redirect location.
@rtype: C{bytes}
@return: Final resolved I{URI}. | _resolveLocation | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | MIT |
def _handleRedirect(self, response, method, uri, headers, redirectCount):
"""
Handle a redirect response, checking the number of redirects already
followed, and extracting the location header fields.
"""
if redirectCount >= self._redirectLimit:
err = error.InfiniteRedirection(
response.code,
b'Infinite redirection detected',
location=uri)
raise ResponseFailed([Failure(err)], response)
locationHeaders = response.headers.getRawHeaders(b'location', [])
if not locationHeaders:
err = error.RedirectWithNoLocation(
response.code, b'No location header field', uri)
raise ResponseFailed([Failure(err)], response)
location = self._resolveLocation(uri, locationHeaders[0])
deferred = self._agent.request(method, location, headers)
def _chainResponse(newResponse):
newResponse.setPreviousResponse(response)
return newResponse
deferred.addCallback(_chainResponse)
return deferred.addCallback(
self._handleResponse, method, uri, headers, redirectCount + 1) | Handle a redirect response, checking the number of redirects already
followed, and extracting the location header fields. | _handleRedirect | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | MIT |
def _handleResponse(self, response, method, uri, headers, redirectCount):
"""
Handle the response, making another request if it indicates a redirect.
"""
if response.code in self._redirectResponses:
if method not in (b'GET', b'HEAD'):
err = error.PageRedirect(response.code, location=uri)
raise ResponseFailed([Failure(err)], response)
return self._handleRedirect(response, method, uri, headers,
redirectCount)
elif response.code in self._seeOtherResponses:
return self._handleRedirect(response, b'GET', uri, headers,
redirectCount)
return response | Handle the response, making another request if it indicates a redirect. | _handleResponse | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | MIT |
def __init__(self, status, message, deferred):
"""
@param status: Status of L{IResponse}
@ivar status: L{int}
@param message: Message of L{IResponse}
@type message: L{bytes}
@param deferred: deferred to fire when response is complete
@type deferred: L{Deferred} firing with L{bytes}
"""
self.deferred = deferred
self.status = status
self.message = message
self.dataBuffer = [] | @param status: Status of L{IResponse}
@ivar status: L{int}
@param message: Message of L{IResponse}
@type message: L{bytes}
@param deferred: deferred to fire when response is complete
@type deferred: L{Deferred} firing with L{bytes} | __init__ | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | MIT |
def dataReceived(self, data):
"""
Accumulate some more bytes from the response.
"""
self.dataBuffer.append(data) | Accumulate some more bytes from the response. | dataReceived | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | MIT |
def connectionLost(self, reason):
"""
Deliver the accumulated response bytes to the waiting L{Deferred}, if
the response body has been completely received without error.
"""
if reason.check(ResponseDone):
self.deferred.callback(b''.join(self.dataBuffer))
elif reason.check(PotentialDataLoss):
self.deferred.errback(
PartialDownloadError(self.status, self.message,
b''.join(self.dataBuffer)))
else:
self.deferred.errback(reason) | Deliver the accumulated response bytes to the waiting L{Deferred}, if
the response body has been completely received without error. | connectionLost | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | MIT |
def cancel(deferred):
"""
Cancel a L{readBody} call, close the connection to the HTTP server
immediately, if it is still open.
@param deferred: The cancelled L{defer.Deferred}.
"""
abort = getAbort()
if abort is not None:
abort() | Cancel a L{readBody} call, close the connection to the HTTP server
immediately, if it is still open.
@param deferred: The cancelled L{defer.Deferred}. | readBody.cancel | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | MIT |
def readBody(response):
"""
Get the body of an L{IResponse} and return it as a byte string.
This is a helper function for clients that don't want to incrementally
receive the body of an HTTP response.
@param response: The HTTP response for which the body will be read.
@type response: L{IResponse} provider
@return: A L{Deferred} which will fire with the body of the response.
Cancelling it will close the connection to the server immediately.
"""
def cancel(deferred):
"""
Cancel a L{readBody} call, close the connection to the HTTP server
immediately, if it is still open.
@param deferred: The cancelled L{defer.Deferred}.
"""
abort = getAbort()
if abort is not None:
abort()
d = defer.Deferred(cancel)
protocol = _ReadBodyProtocol(response.code, response.phrase, d)
def getAbort():
return getattr(protocol.transport, 'abortConnection', None)
response.deliverBody(protocol)
if protocol.transport is not None and getAbort() is None:
warnings.warn(
'Using readBody with a transport that does not have an '
'abortConnection method',
category=DeprecationWarning,
stacklevel=2)
return d | Get the body of an L{IResponse} and return it as a byte string.
This is a helper function for clients that don't want to incrementally
receive the body of an HTTP response.
@param response: The HTTP response for which the body will be read.
@type response: L{IResponse} provider
@return: A L{Deferred} which will fire with the body of the response.
Cancelling it will close the connection to the server immediately. | readBody | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/client.py | MIT |
def saveMark(self):
'''Get the line number and column of the last character parsed'''
# This gets replaced during dataReceived, restored afterwards
return (self.lineno, self.colno) | Get the line number and column of the last character parsed | saveMark | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/sux.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/sux.py | MIT |
def _buildStateTable(self):
'''Return a dictionary of begin, do, end state function tuples'''
# _buildStateTable leaves something to be desired but it does what it
# does.. probably slowly, so I'm doing some evil caching so it doesn't
# get called more than once per class.
stateTable = getattr(self.__class__, '__stateTable', None)
if stateTable is None:
stateTable = self.__class__.__stateTable = zipfndict(
*[prefixedMethodObjDict(self, prefix)
for prefix in ('begin_', 'do_', 'end_')])
return stateTable | Return a dictionary of begin, do, end state function tuples | _buildStateTable | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/sux.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/sux.py | MIT |
def connectionLost(self, reason):
"""
End the last state we were in.
"""
stateTable = self._buildStateTable()
stateTable[self.state][END_HANDLER]() | End the last state we were in. | connectionLost | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/sux.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/sux.py | MIT |
def gotTagStart(self, name, attributes):
'''Encountered an opening tag.
Default behaviour is to print.'''
print('begin', name, attributes) | Encountered an opening tag.
Default behaviour is to print. | gotTagStart | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/sux.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/sux.py | MIT |
def gotText(self, data):
'''Encountered text
Default behaviour is to print.'''
print('text:', repr(data)) | Encountered text
Default behaviour is to print. | gotText | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/sux.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/sux.py | MIT |
def gotEntityReference(self, entityRef):
'''Encountered mnemonic entity reference
Default behaviour is to print.'''
print('entityRef: &%s;' % entityRef) | Encountered mnemonic entity reference
Default behaviour is to print. | gotEntityReference | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/sux.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/sux.py | MIT |
def gotComment(self, comment):
'''Encountered comment.
Default behaviour is to ignore.'''
pass | Encountered comment.
Default behaviour is to ignore. | gotComment | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/sux.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/sux.py | MIT |
def gotCData(self, cdata):
'''Encountered CDATA
Default behaviour is to call the gotText method'''
self.gotText(cdata) | Encountered CDATA
Default behaviour is to call the gotText method | gotCData | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/sux.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/sux.py | MIT |
def gotDoctype(self, doctype):
"""Encountered DOCTYPE
This is really grotty: it basically just gives you everything between
'<!DOCTYPE' and '>' as an argument.
"""
print('!DOCTYPE', repr(doctype)) | Encountered DOCTYPE
This is really grotty: it basically just gives you everything between
'<!DOCTYPE' and '>' as an argument. | gotDoctype | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/sux.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/sux.py | MIT |
def gotTagEnd(self, name):
'''Encountered closing tag
Default behaviour is to print.'''
print('end', name) | Encountered closing tag
Default behaviour is to print. | gotTagEnd | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/sux.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/sux.py | MIT |
def output(func, *args, **kw):
"""output(func, *args, **kw) -> html string
Either return the result of a function (which presumably returns an
HTML-legal string) or a sparse HTMLized error message and a message
in the server log.
"""
try:
return func(*args, **kw)
except:
log.msg("Error calling %r:" % (func,))
log.err()
return PRE("An error occurred.") | output(func, *args, **kw) -> html string
Either return the result of a function (which presumably returns an
HTML-legal string) or a sparse HTMLized error message and a message
in the server log. | output | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/html.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/html.py | MIT |
def __init__(self, data, type):
"""
@param data: The bytes that make up this data resource.
@type data: L{bytes}
@param type: A native string giving the Internet media type for this
content.
@type type: L{str}
"""
resource.Resource.__init__(self)
self.data = data
self.type = type | @param data: The bytes that make up this data resource.
@type data: L{bytes}
@param type: A native string giving the Internet media type for this
content.
@type type: L{str} | __init__ | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def addSlash(request):
"""
Add a trailing slash to C{request}'s URI. Deprecated, do not use.
"""
return _addSlash(request) | Add a trailing slash to C{request}'s URI. Deprecated, do not use. | addSlash | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def _addSlash(request):
"""
Add a trailing slash to C{request}'s URI.
@param request: The incoming request to add the ending slash to.
@type request: An object conforming to L{twisted.web.iweb.IRequest}
@return: A URI with a trailing slash, with query and fragment preserved.
@rtype: L{bytes}
"""
url = URL.fromText(request.uri.decode('ascii'))
# Add an empty path segment at the end, so that it adds a trailing slash
url = url.replace(path=list(url.path) + [u""])
return url.asText().encode('ascii') | Add a trailing slash to C{request}'s URI.
@param request: The incoming request to add the ending slash to.
@type request: An object conforming to L{twisted.web.iweb.IRequest}
@return: A URI with a trailing slash, with query and fragment preserved.
@rtype: L{bytes} | _addSlash | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def loadMimeTypes(mimetype_locations=None, init=mimetypes.init):
"""
Produces a mapping of extensions (with leading dot) to MIME types.
It does this by calling the C{init} function of the L{mimetypes} module.
This will have the side effect of modifying the global MIME types cache
in that module.
Multiple file locations containing mime-types can be passed as a list.
The files will be sourced in that order, overriding mime-types from the
files sourced beforehand, but only if a new entry explicitly overrides
the current entry.
@param mimetype_locations: Optional. List of paths to C{mime.types} style
files that should be used.
@type mimetype_locations: iterable of paths or L{None}
@param init: The init function to call. Defaults to the global C{init}
function of the C{mimetypes} module. For internal use (testing) only.
@type init: callable
"""
init(mimetype_locations)
mimetypes.types_map.update(
{
'.conf': 'text/plain',
'.diff': 'text/plain',
'.flac': 'audio/x-flac',
'.java': 'text/plain',
'.oz': 'text/x-oz',
'.swf': 'application/x-shockwave-flash',
'.wml': 'text/vnd.wap.wml',
'.xul': 'application/vnd.mozilla.xul+xml',
'.patch': 'text/plain'
}
)
return mimetypes.types_map | Produces a mapping of extensions (with leading dot) to MIME types.
It does this by calling the C{init} function of the L{mimetypes} module.
This will have the side effect of modifying the global MIME types cache
in that module.
Multiple file locations containing mime-types can be passed as a list.
The files will be sourced in that order, overriding mime-types from the
files sourced beforehand, but only if a new entry explicitly overrides
the current entry.
@param mimetype_locations: Optional. List of paths to C{mime.types} style
files that should be used.
@type mimetype_locations: iterable of paths or L{None}
@param init: The init function to call. Defaults to the global C{init}
function of the C{mimetypes} module. For internal use (testing) only.
@type init: callable | loadMimeTypes | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def __init__(self, path, defaultType="text/html", ignoredExts=(), registry=None, allowExt=0):
"""
Create a file with the given path.
@param path: The filename of the file from which this L{File} will
serve data.
@type path: C{str}
@param defaultType: A I{major/minor}-style MIME type specifier
indicating the I{Content-Type} with which this L{File}'s data
will be served if a MIME type cannot be determined based on
C{path}'s extension.
@type defaultType: C{str}
@param ignoredExts: A sequence giving the extensions of paths in the
filesystem which will be ignored for the purposes of child
lookup. For example, if C{ignoredExts} is C{(".bar",)} and
C{path} is a directory containing a file named C{"foo.bar"}, a
request for the C{"foo"} child of this resource will succeed
with a L{File} pointing to C{"foo.bar"}.
@param registry: The registry object being used to handle this
request. If L{None}, one will be created.
@type registry: L{Registry}
@param allowExt: Ignored parameter, only present for backwards
compatibility. Do not pass a value for this parameter.
"""
resource.Resource.__init__(self)
filepath.FilePath.__init__(self, path)
self.defaultType = defaultType
if ignoredExts in (0, 1) or allowExt:
warnings.warn("ignoredExts should receive a list, not a boolean")
if ignoredExts or allowExt:
self.ignoredExts = ['*']
else:
self.ignoredExts = []
else:
self.ignoredExts = list(ignoredExts)
self.registry = registry or Registry() | Create a file with the given path.
@param path: The filename of the file from which this L{File} will
serve data.
@type path: C{str}
@param defaultType: A I{major/minor}-style MIME type specifier
indicating the I{Content-Type} with which this L{File}'s data
will be served if a MIME type cannot be determined based on
C{path}'s extension.
@type defaultType: C{str}
@param ignoredExts: A sequence giving the extensions of paths in the
filesystem which will be ignored for the purposes of child
lookup. For example, if C{ignoredExts} is C{(".bar",)} and
C{path} is a directory containing a file named C{"foo.bar"}, a
request for the C{"foo"} child of this resource will succeed
with a L{File} pointing to C{"foo.bar"}.
@param registry: The registry object being used to handle this
request. If L{None}, one will be created.
@type registry: L{Registry}
@param allowExt: Ignored parameter, only present for backwards
compatibility. Do not pass a value for this parameter. | __init__ | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def ignoreExt(self, ext):
"""Ignore the given extension.
Serve file.ext if file is requested
"""
self.ignoredExts.append(ext) | Ignore the given extension.
Serve file.ext if file is requested | ignoreExt | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def directoryListing(self):
"""
Return a resource that generates an HTML listing of the
directory this path represents.
@return: A resource that renders the directory to HTML.
@rtype: L{DirectoryLister}
"""
if _PY3:
path = self.path
names = self.listNames()
else:
# DirectoryLister works in terms of native strings, so on
# Python 2, ensure we have a bytes paths for this
# directory and its contents. We use the asBytesMode
# method inherited from FilePath to ensure consistent
# encoding of the actual path. This returns a FilePath
# instance even when called on subclasses, however, so we
# have to create a new File instance.
nativeStringPath = self.createSimilarFile(self.asBytesMode().path)
path = nativeStringPath.path
names = nativeStringPath.listNames()
return DirectoryLister(path,
names,
self.contentTypes,
self.contentEncodings,
self.defaultType) | Return a resource that generates an HTML listing of the
directory this path represents.
@return: A resource that renders the directory to HTML.
@rtype: L{DirectoryLister} | directoryListing | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def getChild(self, path, request):
"""
If this L{File}"s path refers to a directory, return a L{File}
referring to the file named C{path} in that directory.
If C{path} is the empty string, return a L{DirectoryLister}
instead.
@param path: The current path segment.
@type path: L{bytes}
@param request: The incoming request.
@type request: An that provides L{twisted.web.iweb.IRequest}.
@return: A resource representing the requested file or
directory, or L{NoResource} if the path cannot be
accessed.
@rtype: An object that provides L{resource.IResource}.
"""
if isinstance(path, bytes):
try:
# Request calls urllib.unquote on each path segment,
# leaving us with raw bytes.
path = path.decode('utf-8')
except UnicodeDecodeError:
log.err(None,
"Could not decode path segment as utf-8: %r" % (path,))
return self.childNotFound
self.restat(reraise=False)
if not self.isdir():
return self.childNotFound
if path:
try:
fpath = self.child(path)
except filepath.InsecurePath:
return self.childNotFound
else:
fpath = self.childSearchPreauth(*self.indexNames)
if fpath is None:
return self.directoryListing()
if not fpath.exists():
fpath = fpath.siblingExtensionSearch(*self.ignoredExts)
if fpath is None:
return self.childNotFound
extension = fpath.splitext()[1]
if platformType == "win32":
# don't want .RPY to be different than .rpy, since that would allow
# source disclosure.
processor = InsensitiveDict(self.processors).get(extension)
else:
processor = self.processors.get(extension)
if processor:
return resource.IResource(processor(fpath.path, self.registry))
return self.createSimilarFile(fpath.path) | If this L{File}"s path refers to a directory, return a L{File}
referring to the file named C{path} in that directory.
If C{path} is the empty string, return a L{DirectoryLister}
instead.
@param path: The current path segment.
@type path: L{bytes}
@param request: The incoming request.
@type request: An that provides L{twisted.web.iweb.IRequest}.
@return: A resource representing the requested file or
directory, or L{NoResource} if the path cannot be
accessed.
@rtype: An object that provides L{resource.IResource}. | getChild | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def openForReading(self):
"""Open a file and return it."""
return self.open() | Open a file and return it. | openForReading | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def getFileSize(self):
"""Return file size."""
return self.getsize() | Return file size. | getFileSize | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def _parseRangeHeader(self, range):
"""
Parse the value of a Range header into (start, stop) pairs.
In a given pair, either of start or stop can be None, signifying that
no value was provided, but not both.
@return: A list C{[(start, stop)]} of pairs of length at least one.
@raise ValueError: if the header is syntactically invalid or if the
Bytes-Unit is anything other than "bytes'.
"""
try:
kind, value = range.split(b'=', 1)
except ValueError:
raise ValueError("Missing '=' separator")
kind = kind.strip()
if kind != b'bytes':
raise ValueError("Unsupported Bytes-Unit: %r" % (kind,))
unparsedRanges = list(filter(None, map(bytes.strip, value.split(b','))))
parsedRanges = []
for byteRange in unparsedRanges:
try:
start, end = byteRange.split(b'-', 1)
except ValueError:
raise ValueError("Invalid Byte-Range: %r" % (byteRange,))
if start:
try:
start = int(start)
except ValueError:
raise ValueError("Invalid Byte-Range: %r" % (byteRange,))
else:
start = None
if end:
try:
end = int(end)
except ValueError:
raise ValueError("Invalid Byte-Range: %r" % (byteRange,))
else:
end = None
if start is not None:
if end is not None and start > end:
# Start must be less than or equal to end or it is invalid.
raise ValueError("Invalid Byte-Range: %r" % (byteRange,))
elif end is None:
# One or both of start and end must be specified. Omitting
# both is invalid.
raise ValueError("Invalid Byte-Range: %r" % (byteRange,))
parsedRanges.append((start, end))
return parsedRanges | Parse the value of a Range header into (start, stop) pairs.
In a given pair, either of start or stop can be None, signifying that
no value was provided, but not both.
@return: A list C{[(start, stop)]} of pairs of length at least one.
@raise ValueError: if the header is syntactically invalid or if the
Bytes-Unit is anything other than "bytes'. | _parseRangeHeader | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def _rangeToOffsetAndSize(self, start, end):
"""
Convert a start and end from a Range header to an offset and size.
This method checks that the resulting range overlaps with the resource
being served (and so has the value of C{getFileSize()} as an indirect
input).
Either but not both of start or end can be L{None}:
- Omitted start means that the end value is actually a start value
relative to the end of the resource.
- Omitted end means the end of the resource should be the end of
the range.
End is interpreted as inclusive, as per RFC 2616.
If this range doesn't overlap with any of this resource, C{(0, 0)} is
returned, which is not otherwise a value return value.
@param start: The start value from the header, or L{None} if one was
not present.
@param end: The end value from the header, or L{None} if one was not
present.
@return: C{(offset, size)} where offset is how far into this resource
this resource the range begins and size is how long the range is,
or C{(0, 0)} if the range does not overlap this resource.
"""
size = self.getFileSize()
if start is None:
start = size - end
end = size
elif end is None:
end = size
elif end < size:
end += 1
elif end > size:
end = size
if start >= size:
start = end = 0
return start, (end - start) | Convert a start and end from a Range header to an offset and size.
This method checks that the resulting range overlaps with the resource
being served (and so has the value of C{getFileSize()} as an indirect
input).
Either but not both of start or end can be L{None}:
- Omitted start means that the end value is actually a start value
relative to the end of the resource.
- Omitted end means the end of the resource should be the end of
the range.
End is interpreted as inclusive, as per RFC 2616.
If this range doesn't overlap with any of this resource, C{(0, 0)} is
returned, which is not otherwise a value return value.
@param start: The start value from the header, or L{None} if one was
not present.
@param end: The end value from the header, or L{None} if one was not
present.
@return: C{(offset, size)} where offset is how far into this resource
this resource the range begins and size is how long the range is,
or C{(0, 0)} if the range does not overlap this resource. | _rangeToOffsetAndSize | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def _contentRange(self, offset, size):
"""
Return a string suitable for the value of a Content-Range header for a
range with the given offset and size.
The offset and size are not sanity checked in any way.
@param offset: How far into this resource the range begins.
@param size: How long the range is.
@return: The value as appropriate for the value of a Content-Range
header.
"""
return networkString('bytes %d-%d/%d' % (
offset, offset + size - 1, self.getFileSize())) | Return a string suitable for the value of a Content-Range header for a
range with the given offset and size.
The offset and size are not sanity checked in any way.
@param offset: How far into this resource the range begins.
@param size: How long the range is.
@return: The value as appropriate for the value of a Content-Range
header. | _contentRange | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def _doSingleRangeRequest(self, request, startAndEnd):
"""
Set up the response for Range headers that specify a single range.
This method checks if the request is satisfiable and sets the response
code and Content-Range header appropriately. The return value
indicates which part of the resource to return.
@param request: The Request object.
@param startAndEnd: A 2-tuple of start of the byte range as specified by
the header and the end of the byte range as specified by the header.
At most one of the start and end may be L{None}.
@return: A 2-tuple of the offset and size of the range to return.
offset == size == 0 indicates that the request is not satisfiable.
"""
start, end = startAndEnd
offset, size = self._rangeToOffsetAndSize(start, end)
if offset == size == 0:
# This range doesn't overlap with any of this resource, so the
# request is unsatisfiable.
request.setResponseCode(http.REQUESTED_RANGE_NOT_SATISFIABLE)
request.setHeader(
b'content-range', networkString('bytes */%d' % (self.getFileSize(),)))
else:
request.setResponseCode(http.PARTIAL_CONTENT)
request.setHeader(
b'content-range', self._contentRange(offset, size))
return offset, size | Set up the response for Range headers that specify a single range.
This method checks if the request is satisfiable and sets the response
code and Content-Range header appropriately. The return value
indicates which part of the resource to return.
@param request: The Request object.
@param startAndEnd: A 2-tuple of start of the byte range as specified by
the header and the end of the byte range as specified by the header.
At most one of the start and end may be L{None}.
@return: A 2-tuple of the offset and size of the range to return.
offset == size == 0 indicates that the request is not satisfiable. | _doSingleRangeRequest | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def _doMultipleRangeRequest(self, request, byteRanges):
"""
Set up the response for Range headers that specify a single range.
This method checks if the request is satisfiable and sets the response
code and Content-Type and Content-Length headers appropriately. The
return value, which is a little complicated, indicates which parts of
the resource to return and the boundaries that should separate the
parts.
In detail, the return value is a tuple rangeInfo C{rangeInfo} is a
list of 3-tuples C{(partSeparator, partOffset, partSize)}. The
response to this request should be, for each element of C{rangeInfo},
C{partSeparator} followed by C{partSize} bytes of the resource
starting at C{partOffset}. Each C{partSeparator} includes the
MIME-style boundary and the part-specific Content-type and
Content-range headers. It is convenient to return the separator as a
concrete string from this method, because this method needs to compute
the number of bytes that will make up the response to be able to set
the Content-Length header of the response accurately.
@param request: The Request object.
@param byteRanges: A list of C{(start, end)} values as specified by
the header. For each range, at most one of C{start} and C{end}
may be L{None}.
@return: See above.
"""
matchingRangeFound = False
rangeInfo = []
contentLength = 0
boundary = networkString("%x%x" % (int(time.time()*1000000), os.getpid()))
if self.type:
contentType = self.type
else:
contentType = b'bytes' # It's what Apache does...
for start, end in byteRanges:
partOffset, partSize = self._rangeToOffsetAndSize(start, end)
if partOffset == partSize == 0:
continue
contentLength += partSize
matchingRangeFound = True
partContentRange = self._contentRange(partOffset, partSize)
partSeparator = networkString((
"\r\n"
"--%s\r\n"
"Content-type: %s\r\n"
"Content-range: %s\r\n"
"\r\n") % (nativeString(boundary), nativeString(contentType), nativeString(partContentRange)))
contentLength += len(partSeparator)
rangeInfo.append((partSeparator, partOffset, partSize))
if not matchingRangeFound:
request.setResponseCode(http.REQUESTED_RANGE_NOT_SATISFIABLE)
request.setHeader(
b'content-length', b'0')
request.setHeader(
b'content-range', networkString('bytes */%d' % (self.getFileSize(),)))
return [], b''
finalBoundary = b"\r\n--" + boundary + b"--\r\n"
rangeInfo.append((finalBoundary, 0, 0))
request.setResponseCode(http.PARTIAL_CONTENT)
request.setHeader(
b'content-type', networkString('multipart/byteranges; boundary="%s"' % (nativeString(boundary),)))
request.setHeader(
b'content-length', intToBytes(contentLength + len(finalBoundary)))
return rangeInfo | Set up the response for Range headers that specify a single range.
This method checks if the request is satisfiable and sets the response
code and Content-Type and Content-Length headers appropriately. The
return value, which is a little complicated, indicates which parts of
the resource to return and the boundaries that should separate the
parts.
In detail, the return value is a tuple rangeInfo C{rangeInfo} is a
list of 3-tuples C{(partSeparator, partOffset, partSize)}. The
response to this request should be, for each element of C{rangeInfo},
C{partSeparator} followed by C{partSize} bytes of the resource
starting at C{partOffset}. Each C{partSeparator} includes the
MIME-style boundary and the part-specific Content-type and
Content-range headers. It is convenient to return the separator as a
concrete string from this method, because this method needs to compute
the number of bytes that will make up the response to be able to set
the Content-Length header of the response accurately.
@param request: The Request object.
@param byteRanges: A list of C{(start, end)} values as specified by
the header. For each range, at most one of C{start} and C{end}
may be L{None}.
@return: See above. | _doMultipleRangeRequest | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def _setContentHeaders(self, request, size=None):
"""
Set the Content-length and Content-type headers for this request.
This method is not appropriate for requests for multiple byte ranges;
L{_doMultipleRangeRequest} will set these headers in that case.
@param request: The L{twisted.web.http.Request} object.
@param size: The size of the response. If not specified, default to
C{self.getFileSize()}.
"""
if size is None:
size = self.getFileSize()
request.setHeader(b'content-length', intToBytes(size))
if self.type:
request.setHeader(b'content-type', networkString(self.type))
if self.encoding:
request.setHeader(b'content-encoding', networkString(self.encoding)) | Set the Content-length and Content-type headers for this request.
This method is not appropriate for requests for multiple byte ranges;
L{_doMultipleRangeRequest} will set these headers in that case.
@param request: The L{twisted.web.http.Request} object.
@param size: The size of the response. If not specified, default to
C{self.getFileSize()}. | _setContentHeaders | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def makeProducer(self, request, fileForReading):
"""
Make a L{StaticProducer} that will produce the body of this response.
This method will also set the response code and Content-* headers.
@param request: The L{twisted.web.http.Request} object.
@param fileForReading: The file object containing the resource.
@return: A L{StaticProducer}. Calling C{.start()} on this will begin
producing the response.
"""
byteRange = request.getHeader(b'range')
if byteRange is None:
self._setContentHeaders(request)
request.setResponseCode(http.OK)
return NoRangeStaticProducer(request, fileForReading)
try:
parsedRanges = self._parseRangeHeader(byteRange)
except ValueError:
log.msg("Ignoring malformed Range header %r" % (byteRange.decode(),))
self._setContentHeaders(request)
request.setResponseCode(http.OK)
return NoRangeStaticProducer(request, fileForReading)
if len(parsedRanges) == 1:
offset, size = self._doSingleRangeRequest(
request, parsedRanges[0])
self._setContentHeaders(request, size)
return SingleRangeStaticProducer(
request, fileForReading, offset, size)
else:
rangeInfo = self._doMultipleRangeRequest(request, parsedRanges)
return MultipleRangeStaticProducer(
request, fileForReading, rangeInfo) | Make a L{StaticProducer} that will produce the body of this response.
This method will also set the response code and Content-* headers.
@param request: The L{twisted.web.http.Request} object.
@param fileForReading: The file object containing the resource.
@return: A L{StaticProducer}. Calling C{.start()} on this will begin
producing the response. | makeProducer | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def render_GET(self, request):
"""
Begin sending the contents of this L{File} (or a subset of the
contents, based on the 'range' header) to the given request.
"""
self.restat(False)
if self.type is None:
self.type, self.encoding = getTypeAndEncoding(self.basename(),
self.contentTypes,
self.contentEncodings,
self.defaultType)
if not self.exists():
return self.childNotFound.render(request)
if self.isdir():
return self.redirect(request)
request.setHeader(b'accept-ranges', b'bytes')
try:
fileForReading = self.openForReading()
except IOError as e:
if e.errno == errno.EACCES:
return self.forbidden.render(request)
else:
raise
if request.setLastModified(self.getModificationTime()) is http.CACHED:
# `setLastModified` also sets the response code for us, so if the
# request is cached, we close the file now that we've made sure that
# the request would otherwise succeed and return an empty body.
fileForReading.close()
return b''
if request.method == b'HEAD':
# Set the content headers here, rather than making a producer.
self._setContentHeaders(request)
# We've opened the file to make sure it's accessible, so close it
# now that we don't need it.
fileForReading.close()
return b''
producer = self.makeProducer(request, fileForReading)
producer.start()
# and make sure the connection doesn't get closed
return server.NOT_DONE_YET | Begin sending the contents of this L{File} (or a subset of the
contents, based on the 'range' header) to the given request. | render_GET | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def __init__(self, request, fileObject):
"""
Initialize the instance.
"""
self.request = request
self.fileObject = fileObject | Initialize the instance. | __init__ | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def stopProducing(self):
"""
Stop producing data.
L{twisted.internet.interfaces.IProducer.stopProducing}
is called when our consumer has died, and subclasses also call this
method when they are done producing data.
"""
self.fileObject.close()
self.request = None | Stop producing data.
L{twisted.internet.interfaces.IProducer.stopProducing}
is called when our consumer has died, and subclasses also call this
method when they are done producing data. | stopProducing | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def __init__(self, request, fileObject, offset, size):
"""
Initialize the instance.
@param request: See L{StaticProducer}.
@param fileObject: See L{StaticProducer}.
@param offset: The offset into the file of the chunk to be written.
@param size: The size of the chunk to write.
"""
StaticProducer.__init__(self, request, fileObject)
self.offset = offset
self.size = size | Initialize the instance.
@param request: See L{StaticProducer}.
@param fileObject: See L{StaticProducer}.
@param offset: The offset into the file of the chunk to be written.
@param size: The size of the chunk to write. | __init__ | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def __init__(self, request, fileObject, rangeInfo):
"""
Initialize the instance.
@param request: See L{StaticProducer}.
@param fileObject: See L{StaticProducer}.
@param rangeInfo: A list of tuples C{[(boundary, offset, size)]}
where:
- C{boundary} will be written to the request first.
- C{offset} the offset into the file of chunk to write.
- C{size} the size of the chunk to write.
"""
StaticProducer.__init__(self, request, fileObject)
self.rangeInfo = rangeInfo | Initialize the instance.
@param request: See L{StaticProducer}.
@param fileObject: See L{StaticProducer}.
@param rangeInfo: A list of tuples C{[(boundary, offset, size)]}
where:
- C{boundary} will be written to the request first.
- C{offset} the offset into the file of chunk to write.
- C{size} the size of the chunk to write. | __init__ | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def formatFileSize(size):
"""
Format the given file size in bytes to human readable format.
"""
if size < 1024:
return '%iB' % size
elif size < (1024 ** 2):
return '%iK' % (size / 1024)
elif size < (1024 ** 3):
return '%iM' % (size / (1024 ** 2))
else:
return '%iG' % (size / (1024 ** 3)) | Format the given file size in bytes to human readable format. | formatFileSize | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def _getFilesAndDirectories(self, directory):
"""
Helper returning files and directories in given directory listing, with
attributes to be used to build a table content with
C{self.linePattern}.
@return: tuple of (directories, files)
@rtype: C{tuple} of C{list}
"""
files = []
dirs = []
for path in directory:
if _PY3:
if isinstance(path, bytes):
path = path.decode("utf8")
url = quote(path, "/")
escapedPath = escape(path)
childPath = filepath.FilePath(self.path).child(path)
if childPath.isdir():
dirs.append({'text': escapedPath + "/", 'href': url + "/",
'size': '', 'type': '[Directory]',
'encoding': ''})
else:
mimetype, encoding = getTypeAndEncoding(path, self.contentTypes,
self.contentEncodings,
self.defaultType)
try:
size = childPath.getsize()
except OSError:
continue
files.append({
'text': escapedPath, "href": url,
'type': '[%s]' % mimetype,
'encoding': (encoding and '[%s]' % encoding or ''),
'size': formatFileSize(size)})
return dirs, files | Helper returning files and directories in given directory listing, with
attributes to be used to build a table content with
C{self.linePattern}.
@return: tuple of (directories, files)
@rtype: C{tuple} of C{list} | _getFilesAndDirectories | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def _buildTableContent(self, elements):
"""
Build a table content using C{self.linePattern} and giving elements odd
and even classes.
"""
tableContent = []
rowClasses = itertools.cycle(['odd', 'even'])
for element, rowClass in zip(elements, rowClasses):
element["class"] = rowClass
tableContent.append(self.linePattern % element)
return tableContent | Build a table content using C{self.linePattern} and giving elements odd
and even classes. | _buildTableContent | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def render(self, request):
"""
Render a listing of the content of C{self.path}.
"""
request.setHeader(b"content-type", b"text/html; charset=utf-8")
if self.dirs is None:
directory = os.listdir(self.path)
directory.sort()
else:
directory = self.dirs
dirs, files = self._getFilesAndDirectories(directory)
tableContent = "".join(self._buildTableContent(dirs + files))
header = "Directory listing for %s" % (
escape(unquote(nativeString(request.uri))),)
done = self.template % {"header": header, "tableContent": tableContent}
if _PY3:
done = done.encode("utf8")
return done | Render a listing of the content of C{self.path}. | render | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/static.py | MIT |
def connectionMade(self):
"""
Called by the reactor when a connection is received. May also be called
by the L{twisted.web.http._GenericHTTPChannelProtocol} during upgrade
to HTTP/2.
"""
self.setTimeout(self.timeOut)
self.conn.initiate_connection()
self.transport.write(self.conn.data_to_send()) | Called by the reactor when a connection is received. May also be called
by the L{twisted.web.http._GenericHTTPChannelProtocol} during upgrade
to HTTP/2. | connectionMade | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | MIT |
def dataReceived(self, data):
"""
Called whenever a chunk of data is received from the transport.
@param data: The data received from the transport.
@type data: L{bytes}
"""
self.resetTimeout()
try:
events = self.conn.receive_data(data)
except h2.exceptions.ProtocolError:
# A remote protocol error terminates the connection.
dataToSend = self.conn.data_to_send()
self.transport.write(dataToSend)
self.transport.loseConnection()
self.connectionLost(Failure())
return
for event in events:
if isinstance(event, h2.events.RequestReceived):
self._requestReceived(event)
elif isinstance(event, h2.events.DataReceived):
self._requestDataReceived(event)
elif isinstance(event, h2.events.StreamEnded):
self._requestEnded(event)
elif isinstance(event, h2.events.StreamReset):
self._requestAborted(event)
elif isinstance(event, h2.events.WindowUpdated):
self._handleWindowUpdate(event)
elif isinstance(event, h2.events.PriorityUpdated):
self._handlePriorityUpdate(event)
elif isinstance(event, h2.events.ConnectionTerminated):
self.transport.loseConnection()
self.connectionLost(ConnectionLost("Remote peer sent GOAWAY"))
dataToSend = self.conn.data_to_send()
if dataToSend:
self.transport.write(dataToSend) | Called whenever a chunk of data is received from the transport.
@param data: The data received from the transport.
@type data: L{bytes} | dataReceived | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | MIT |
def timeoutConnection(self):
"""
Called when the connection has been inactive for
L{self.timeOut<twisted.protocols.policies.TimeoutMixin.timeOut>}
seconds. Cleanly tears the connection down, attempting to notify the
peer if needed.
We override this method to add two extra bits of functionality:
- We want to log the timeout.
- We want to send a GOAWAY frame indicating that the connection is
being terminated, and whether it was clean or not. We have to do this
before the connection is torn down.
"""
self._log.info(
"Timing out client {client}", client=self.transport.getPeer()
)
# Check whether there are open streams. If there are, we're going to
# want to use the error code PROTOCOL_ERROR. If there aren't, use
# NO_ERROR.
if (self.conn.open_outbound_streams > 0 or
self.conn.open_inbound_streams > 0):
error_code = h2.errors.ErrorCodes.PROTOCOL_ERROR
else:
error_code = h2.errors.ErrorCodes.NO_ERROR
self.conn.close_connection(error_code=error_code)
self.transport.write(self.conn.data_to_send())
# Don't let the client hold this connection open too long.
if self.abortTimeout is not None:
# We use self.callLater because that's what TimeoutMixin does, even
# though we have a perfectly good reactor sitting around. See
# https://twistedmatrix.com/trac/ticket/8488.
self._abortingCall = self.callLater(
self.abortTimeout, self.forceAbortClient
)
# We're done, throw the connection away.
self.transport.loseConnection() | Called when the connection has been inactive for
L{self.timeOut<twisted.protocols.policies.TimeoutMixin.timeOut>}
seconds. Cleanly tears the connection down, attempting to notify the
peer if needed.
We override this method to add two extra bits of functionality:
- We want to log the timeout.
- We want to send a GOAWAY frame indicating that the connection is
being terminated, and whether it was clean or not. We have to do this
before the connection is torn down. | timeoutConnection | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | MIT |
def forceAbortClient(self):
"""
Called if C{abortTimeout} seconds have passed since the timeout fired,
and the connection still hasn't gone away. This can really only happen
on extremely bad connections or when clients are maliciously attempting
to keep connections open.
"""
self._log.info(
"Forcibly timing out client: {client}",
client=self.transport.getPeer()
)
# We want to lose track of the _abortingCall so that no-one tries to
# cancel it.
self._abortingCall = None
self.transport.abortConnection() | Called if C{abortTimeout} seconds have passed since the timeout fired,
and the connection still hasn't gone away. This can really only happen
on extremely bad connections or when clients are maliciously attempting
to keep connections open. | forceAbortClient | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | MIT |
def connectionLost(self, reason):
"""
Called when the transport connection is lost.
Informs all outstanding response handlers that the connection has been
lost, and cleans up all internal state.
"""
self._stillProducing = False
self.setTimeout(None)
for stream in self.streams.values():
stream.connectionLost(reason)
for streamID in list(self.streams.keys()):
self._requestDone(streamID)
# If we were going to force-close the transport, we don't have to now.
if self._abortingCall is not None:
self._abortingCall.cancel()
self._abortingCall = None | Called when the transport connection is lost.
Informs all outstanding response handlers that the connection has been
lost, and cleans up all internal state. | connectionLost | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | MIT |
def stopProducing(self):
"""
Stop producing data.
This tells the L{H2Connection} that its consumer has died, so it must
stop producing data for good.
"""
self.connectionLost(ConnectionLost("Producing stopped")) | Stop producing data.
This tells the L{H2Connection} that its consumer has died, so it must
stop producing data for good. | stopProducing | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | MIT |
def pauseProducing(self):
"""
Pause producing data.
Tells the L{H2Connection} that it has produced too much data to process
for the time being, and to stop until resumeProducing() is called.
"""
self._consumerBlocked = Deferred() | Pause producing data.
Tells the L{H2Connection} that it has produced too much data to process
for the time being, and to stop until resumeProducing() is called. | pauseProducing | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | MIT |
def resumeProducing(self):
"""
Resume producing data.
This tells the L{H2Connection} to re-add itself to the main loop and
produce more data for the consumer.
"""
if self._consumerBlocked is not None:
d = self._consumerBlocked
self._consumerBlocked = None
d.callback(None) | Resume producing data.
This tells the L{H2Connection} to re-add itself to the main loop and
produce more data for the consumer. | resumeProducing | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | MIT |
def _sendPrioritisedData(self, *args):
"""
The data sending loop. This function repeatedly calls itself, either
from L{Deferred}s or from
L{reactor.callLater<twisted.internet.interfaces.IReactorTime.callLater>}
This function sends data on streams according to the rules of HTTP/2
priority. It ensures that the data from each stream is interleved
according to the priority signalled by the client, making sure that the
connection is used with maximal efficiency.
This function will execute if data is available: if all data is
exhausted, the function will place a deferred onto the L{H2Connection}
object and wait until it is called to resume executing.
"""
# If producing has stopped, we're done. Don't reschedule ourselves
if not self._stillProducing:
return
stream = None
while stream is None:
try:
stream = next(self.priority)
except priority.DeadlockError:
# All streams are currently blocked or not progressing. Wait
# until a new one becomes available.
assert self._sendingDeferred is None
self._sendingDeferred = Deferred()
self._sendingDeferred.addCallback(self._sendPrioritisedData)
return
# Wait behind the transport.
if self._consumerBlocked is not None:
self._consumerBlocked.addCallback(self._sendPrioritisedData)
return
self.resetTimeout()
remainingWindow = self.conn.local_flow_control_window(stream)
frameData = self._outboundStreamQueues[stream].popleft()
maxFrameSize = min(self.conn.max_outbound_frame_size, remainingWindow)
if frameData is _END_STREAM_SENTINEL:
# There's no error handling here even though this can throw
# ProtocolError because we really shouldn't encounter this problem.
# If we do, that's a nasty bug.
self.conn.end_stream(stream)
self.transport.write(self.conn.data_to_send())
# Clean up the stream
self._requestDone(stream)
else:
# Respect the max frame size.
if len(frameData) > maxFrameSize:
excessData = frameData[maxFrameSize:]
frameData = frameData[:maxFrameSize]
self._outboundStreamQueues[stream].appendleft(excessData)
# There's deliberately no error handling here, because this just
# absolutely should not happen.
# If for whatever reason the max frame length is zero and so we
# have no frame data to send, don't send any.
if frameData:
self.conn.send_data(stream, frameData)
self.transport.write(self.conn.data_to_send())
# If there's no data left, this stream is now blocked.
if not self._outboundStreamQueues[stream]:
self.priority.block(stream)
# Also, if the stream's flow control window is exhausted, tell it
# to stop.
if self.remainingOutboundWindow(stream) <= 0:
self.streams[stream].flowControlBlocked()
self._reactor.callLater(0, self._sendPrioritisedData) | The data sending loop. This function repeatedly calls itself, either
from L{Deferred}s or from
L{reactor.callLater<twisted.internet.interfaces.IReactorTime.callLater>}
This function sends data on streams according to the rules of HTTP/2
priority. It ensures that the data from each stream is interleved
according to the priority signalled by the client, making sure that the
connection is used with maximal efficiency.
This function will execute if data is available: if all data is
exhausted, the function will place a deferred onto the L{H2Connection}
object and wait until it is called to resume executing. | _sendPrioritisedData | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | MIT |
def _requestReceived(self, event):
"""
Internal handler for when a request has been received.
@param event: The Hyper-h2 event that encodes information about the
received request.
@type event: L{h2.events.RequestReceived}
"""
stream = H2Stream(
event.stream_id,
self, event.headers,
self.requestFactory,
self.site,
self.factory
)
self.streams[event.stream_id] = stream
self._streamCleanupCallbacks[event.stream_id] = Deferred()
self._outboundStreamQueues[event.stream_id] = deque()
# Add the stream to the priority tree but immediately block it.
try:
self.priority.insert_stream(event.stream_id)
except priority.DuplicateStreamError:
# Stream already in the tree. This can happen if we received a
# PRIORITY frame before a HEADERS frame. Just move on: we set the
# stream up properly in _handlePriorityUpdate.
pass
else:
self.priority.block(event.stream_id) | Internal handler for when a request has been received.
@param event: The Hyper-h2 event that encodes information about the
received request.
@type event: L{h2.events.RequestReceived} | _requestReceived | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | MIT |
def _requestDataReceived(self, event):
"""
Internal handler for when a chunk of data is received for a given
request.
@param event: The Hyper-h2 event that encodes information about the
received data.
@type event: L{h2.events.DataReceived}
"""
stream = self.streams[event.stream_id]
stream.receiveDataChunk(event.data, event.flow_controlled_length) | Internal handler for when a chunk of data is received for a given
request.
@param event: The Hyper-h2 event that encodes information about the
received data.
@type event: L{h2.events.DataReceived} | _requestDataReceived | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | MIT |
def _requestEnded(self, event):
"""
Internal handler for when a request is complete, and we expect no
further data for that request.
@param event: The Hyper-h2 event that encodes information about the
completed stream.
@type event: L{h2.events.StreamEnded}
"""
stream = self.streams[event.stream_id]
stream.requestComplete() | Internal handler for when a request is complete, and we expect no
further data for that request.
@param event: The Hyper-h2 event that encodes information about the
completed stream.
@type event: L{h2.events.StreamEnded} | _requestEnded | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | MIT |
def _requestAborted(self, event):
"""
Internal handler for when a request is aborted by a remote peer.
@param event: The Hyper-h2 event that encodes information about the
reset stream.
@type event: L{h2.events.StreamReset}
"""
stream = self.streams[event.stream_id]
stream.connectionLost(
ConnectionLost("Stream reset with code %s" % event.error_code)
)
self._requestDone(event.stream_id) | Internal handler for when a request is aborted by a remote peer.
@param event: The Hyper-h2 event that encodes information about the
reset stream.
@type event: L{h2.events.StreamReset} | _requestAborted | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | MIT |
def _handlePriorityUpdate(self, event):
"""
Internal handler for when a stream priority is updated.
@param event: The Hyper-h2 event that encodes information about the
stream reprioritization.
@type event: L{h2.events.PriorityUpdated}
"""
try:
self.priority.reprioritize(
stream_id=event.stream_id,
depends_on=event.depends_on or None,
weight=event.weight,
exclusive=event.exclusive,
)
except priority.MissingStreamError:
# A PRIORITY frame arrived before the HEADERS frame that would
# trigger us to insert the stream into the tree. That's fine: we
# can create the stream here and mark it as blocked.
self.priority.insert_stream(
stream_id=event.stream_id,
depends_on=event.depends_on or None,
weight=event.weight,
exclusive=event.exclusive,
)
self.priority.block(event.stream_id) | Internal handler for when a stream priority is updated.
@param event: The Hyper-h2 event that encodes information about the
stream reprioritization.
@type event: L{h2.events.PriorityUpdated} | _handlePriorityUpdate | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | MIT |
def writeHeaders(self, version, code, reason, headers, streamID):
"""
Called by L{twisted.web.http.Request} objects to write a complete set
of HTTP headers to a stream.
@param version: The HTTP version in use. Unused in HTTP/2.
@type version: L{bytes}
@param code: The HTTP status code to write.
@type code: L{bytes}
@param reason: The HTTP reason phrase to write. Unused in HTTP/2.
@type reason: L{bytes}
@param headers: The headers to write to the stream.
@type headers: L{twisted.web.http_headers.Headers}
@param streamID: The ID of the stream to write the headers to.
@type streamID: L{int}
"""
headers.insert(0, (b':status', code))
try:
self.conn.send_headers(streamID, headers)
except h2.exceptions.StreamClosedError:
# Stream was closed by the client at some point. We need to not
# explode here: just swallow the error. That's what write() does
# when a connection is lost, so that's what we do too.
return
else:
self.transport.write(self.conn.data_to_send()) | Called by L{twisted.web.http.Request} objects to write a complete set
of HTTP headers to a stream.
@param version: The HTTP version in use. Unused in HTTP/2.
@type version: L{bytes}
@param code: The HTTP status code to write.
@type code: L{bytes}
@param reason: The HTTP reason phrase to write. Unused in HTTP/2.
@type reason: L{bytes}
@param headers: The headers to write to the stream.
@type headers: L{twisted.web.http_headers.Headers}
@param streamID: The ID of the stream to write the headers to.
@type streamID: L{int} | writeHeaders | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | MIT |
def writeDataToStream(self, streamID, data):
"""
May be called by L{H2Stream} objects to write response data to a given
stream. Writes a single data frame.
@param streamID: The ID of the stream to write the data to.
@type streamID: L{int}
@param data: The data chunk to write to the stream.
@type data: L{bytes}
"""
self._outboundStreamQueues[streamID].append(data)
# There's obviously no point unblocking this stream and the sending
# loop if the data can't actually be sent, so confirm that there's
# some room to send data.
if self.conn.local_flow_control_window(streamID) > 0:
self.priority.unblock(streamID)
if self._sendingDeferred is not None:
d = self._sendingDeferred
self._sendingDeferred = None
d.callback(streamID)
if self.remainingOutboundWindow(streamID) <= 0:
self.streams[streamID].flowControlBlocked() | May be called by L{H2Stream} objects to write response data to a given
stream. Writes a single data frame.
@param streamID: The ID of the stream to write the data to.
@type streamID: L{int}
@param data: The data chunk to write to the stream.
@type data: L{bytes} | writeDataToStream | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | MIT |
def endRequest(self, streamID):
"""
Called by L{H2Stream} objects to signal completion of a response.
@param streamID: The ID of the stream to write the data to.
@type streamID: L{int}
"""
self._outboundStreamQueues[streamID].append(_END_STREAM_SENTINEL)
self.priority.unblock(streamID)
if self._sendingDeferred is not None:
d = self._sendingDeferred
self._sendingDeferred = None
d.callback(streamID) | Called by L{H2Stream} objects to signal completion of a response.
@param streamID: The ID of the stream to write the data to.
@type streamID: L{int} | endRequest | python | wistbean/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | https://github.com/wistbean/learn_python3_spider/blob/master/stackoverflow/venv/lib/python3.6/site-packages/twisted/web/_http2.py | MIT |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.