code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def _get_target_from_range(
target: pgast.BaseExpr, rvar: pgast.BaseRangeVar
) -> Optional[pgast.BaseExpr]:
"""Try to read a target out of a very simple rvar.
The goal here is to allow collapsing trivial pass-through subqueries.
In particular, given a target `foo.bar` and an rvar
`(SELECT <expr> as "bar") AS "foo"`, we produce <expr>.
We can also recursively handle the nested case.
"""
if (
not isinstance(rvar, pgast.RangeSubselect)
# Check that the relation name matches the rvar
or not isinstance(target, pgast.ColumnRef)
or not target.name
or target.name[0] != rvar.alias.aliasname
# And that the rvar is a simple subquery with one target
# and at most one from clause
or not (subq := rvar.subquery)
or len(subq.target_list) != 1
or not isinstance(subq, pgast.SelectStmt)
or not select_is_simple(subq)
or len(subq.from_clause) > 1
# And that the one target matches
or not (inner_tgt := rvar.subquery.target_list[0])
or inner_tgt.name != target.name[1]
):
return None
if subq.from_clause:
return _get_target_from_range(inner_tgt.val, subq.from_clause[0])
else:
return inner_tgt.val | Try to read a target out of a very simple rvar.
The goal here is to allow collapsing trivial pass-through subqueries.
In particular, given a target `foo.bar` and an rvar
`(SELECT <expr> as "bar") AS "foo"`, we produce <expr>.
We can also recursively handle the nested case. | _get_target_from_range | python | geldata/gel | edb/pgsql/compiler/astutils.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/astutils.py | Apache-2.0 |
def collapse_query(query: pgast.Query) -> pgast.BaseExpr:
"""Try to collapse trivial queries into simple expressions.
In particular, we want to transform
`(SELECT foo.bar FROM LATERAL (SELECT <expr> as "bar") AS "foo")`
into simply `<expr>`.
"""
if not isinstance(query, pgast.SelectStmt):
return query
if (
isinstance(query, pgast.SelectStmt)
and len(query.target_list) == 1
and len(query.from_clause) == 0
and select_is_simple(query)
):
return query.target_list[0].val
if (
not isinstance(query, pgast.SelectStmt)
or len(query.target_list) != 1
or len(query.from_clause) != 1
):
return query
val = _get_target_from_range(
query.target_list[0].val, query.from_clause[0])
if val:
return val
else:
return query | Try to collapse trivial queries into simple expressions.
In particular, we want to transform
`(SELECT foo.bar FROM LATERAL (SELECT <expr> as "bar") AS "foo")`
into simply `<expr>`. | collapse_query | python | geldata/gel | edb/pgsql/compiler/astutils.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/astutils.py | Apache-2.0 |
def visit(ir: irast.Base, *, ctx: context.CompilerContextLevel) -> None:
"""A compilation version that does not pull the value eagerly."""
compile(ir, ctx=ctx) | A compilation version that does not pull the value eagerly. | visit | python | geldata/gel | edb/pgsql/compiler/dispatch.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/dispatch.py | Apache-2.0 |
def serialize_custom_tuple(
expr: pgast.BaseExpr,
*,
styperef: irast.TypeRef,
env: context.Environment,
) -> pgast.BaseExpr:
"""Serialize a tuple that needs custom serialization for a component"""
vals: List[pgast.BaseExpr] = []
obj: pgast.BaseExpr
if irtyputils.is_persistent_tuple(styperef):
for el_idx, el_type in enumerate(styperef.subtypes):
val: pgast.BaseExpr = pgast.Indirection(
arg=expr,
indirection=[
pgast.RecordIndirectionOp(name=str(el_idx)),
],
)
val = output_as_value(
val, ser_typeref=el_type, env=env)
vals.append(val)
obj = _row(vals)
else:
coldeflist = []
for el_idx, el_type in enumerate(styperef.subtypes):
coldeflist.append(pgast.ColumnDef(
name=str(el_idx),
typename=pgast.TypeName(
name=pgtypes.pg_type_from_ir_typeref(el_type),
),
))
val = pgast.ColumnRef(name=[str(el_idx)])
val = output_as_value(
val, ser_typeref=el_type, env=env)
vals.append(val)
obj = _row(vals)
obj = pgast.SelectStmt(
target_list=[
pgast.ResTarget(
val=obj,
),
],
from_clause=[
pgast.RangeFunction(
functions=[
pgast.FuncCall(
name=('unnest',),
args=[
pgast.ArrayExpr(
elements=[expr],
)
],
coldeflist=coldeflist,
)
]
)
] if styperef.subtypes else []
)
if expr.nullable:
obj = pgast.SelectStmt(
target_list=[pgast.ResTarget(val=obj)],
where_clause=pgast.NullTest(arg=expr, negated=True)
)
return obj | Serialize a tuple that needs custom serialization for a component | serialize_custom_tuple | python | geldata/gel | edb/pgsql/compiler/output.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/output.py | Apache-2.0 |
def serialize_custom_array(
expr: pgast.BaseExpr,
*,
styperef: irast.TypeRef,
env: context.Environment,
) -> pgast.BaseExpr:
"""Serialize an array that needs custom serialization for a component"""
el_type = styperef.subtypes[0]
is_tuple = irtyputils.is_tuple(el_type)
if is_tuple:
coldeflist = []
out_alias = env.aliases.get('q')
val: pgast.BaseExpr
args: List[pgast.BaseExpr] = []
is_named = any(st.element_name for st in el_type.subtypes)
for i, st in enumerate(el_type.subtypes):
if is_named:
colname = st.element_name
assert colname
args.append(pgast.StringConstant(val=colname))
else:
colname = str(i)
val = pgast.ColumnRef(name=[colname])
val = output_as_value(val, ser_typeref=st, env=env)
args.append(val)
if not irtyputils.is_persistent_tuple(el_type):
# Column definition list is only allowed for functions
# returning "record", i.e. an anonymous tuple, which
# would not be the case for schema-persistent tuple types.
coldeflist.append(
pgast.ColumnDef(
name=colname,
typename=pgast.TypeName(
name=pgtypes.pg_type_from_ir_typeref(st)
)
)
)
agg_arg: pgast.BaseExpr = _row(args)
return pgast.SelectStmt(
target_list=[
pgast.ResTarget(
val=pgast.CoalesceExpr(
args=[
pgast.FuncCall(
name=('array_agg',),
args=[agg_arg],
),
pgast.TypeCast(
arg=pgast.ArrayExpr(elements=[]),
type_name=pgast.TypeName(name=('record[]',)),
),
]
),
ser_safe=True,
)
],
from_clause=[
pgast.RangeFunction(
alias=pgast.Alias(aliasname=out_alias),
is_rowsfrom=True,
functions=[
pgast.FuncCall(
name=('unnest',),
args=[expr],
coldeflist=coldeflist,
)
]
)
]
)
else:
el_sql_type = el_type.real_base_type.custom_sql_serialization
return pgast.TypeCast(
arg=expr,
type_name=pgast.TypeName(name=(f'{el_sql_type}[]',)),
) | Serialize an array that needs custom serialization for a component | serialize_custom_array | python | geldata/gel | edb/pgsql/compiler/output.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/output.py | Apache-2.0 |
def output_as_value(
expr: pgast.BaseExpr, *,
ser_typeref: Optional[irast.TypeRef] = None,
env: context.Environment) -> pgast.BaseExpr:
"""Format an expression as a proper value.
Normally this just means packing TupleVars into real expressions,
but if ser_typeref is provided, we also will do binary serialization.
In particular, certain types actually need to be serialized as text or
or some other format, and we handle that here.
"""
needs_custom_serialization = ser_typeref and (
irtyputils.needs_custom_serialization(ser_typeref))
val = expr
if isinstance(expr, pgast.TupleVar):
if (
env.output_format is context.OutputFormat.NATIVE_INTERNAL
and len(expr.elements) == 1
and (path_id := (el0 := expr.elements[0]).path_id) is not None
and (rptr_name := path_id.rptr_name()) is not None
and (rptr_name.name == 'id')
):
# This is is a special mode whereby bare refs to objects
# are serialized to UUID values.
return output_as_value(el0.val, env=env)
ser_typerefs = [
ser_typeref.subtypes[i]
if ser_typeref and ser_typeref.subtypes else None
for i in range(len(expr.elements))
]
val = _row([
output_as_value(e.val, ser_typeref=ser_typerefs[i], env=env)
for i, e in enumerate(expr.elements)
])
if (expr.typeref is not None
and not needs_custom_serialization
and not env.singleton_mode
and irtyputils.is_persistent_tuple(expr.typeref)):
pg_type = pgtypes.pg_type_from_ir_typeref(expr.typeref)
val = pgast.TypeCast(
arg=val,
type_name=pgast.TypeName(
name=pg_type,
),
)
elif (needs_custom_serialization and not expr.ser_safe):
assert ser_typeref is not None
if irtyputils.is_array(ser_typeref):
return serialize_custom_array(expr, styperef=ser_typeref, env=env)
elif irtyputils.is_tuple(ser_typeref):
return serialize_custom_tuple(expr, styperef=ser_typeref, env=env)
else:
el_sql_type = ser_typeref.real_base_type.custom_sql_serialization
assert el_sql_type is not None
val = pgast.TypeCast(
arg=val,
type_name=pgast.TypeName(name=(el_sql_type,)),
)
return val | Format an expression as a proper value.
Normally this just means packing TupleVars into real expressions,
but if ser_typeref is provided, we also will do binary serialization.
In particular, certain types actually need to be serialized as text or
or some other format, and we handle that here. | output_as_value | python | geldata/gel | edb/pgsql/compiler/output.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/output.py | Apache-2.0 |
def top_output_as_value(
stmt: pgast.SelectStmt,
ir_set: irast.Set, *,
env: context.Environment) -> pgast.SelectStmt:
"""Finalize output serialization on the top level."""
if (env.output_format is context.OutputFormat.JSON and
not env.expected_cardinality_one):
# For JSON we just want to aggregate the whole thing
# into a JSON array.
return aggregate_json_output(stmt, ir_set, env=env)
elif (
env.explicit_top_cast is not None
and (
env.output_format is context.OutputFormat.NATIVE
or env.output_format is context.OutputFormat.NATIVE_INTERNAL
)
):
typecast = pgast.TypeCast(
arg=stmt.target_list[0].val,
type_name=pgast.TypeName(
name=pgtypes.pg_type_from_ir_typeref(
env.explicit_top_cast,
persistent_tuples=True,
),
),
)
stmt.target_list[0] = pgast.ResTarget(
name=env.aliases.get('v'),
val=typecast,
)
return stmt
elif env.output_format is context.OutputFormat.NONE:
return wrap_script_stmt(stmt, env=env)
else:
# JSON_ELEMENTS and BINARY don't require any wrapping
return stmt | Finalize output serialization on the top level. | top_output_as_value | python | geldata/gel | edb/pgsql/compiler/output.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/output.py | Apache-2.0 |
def get_volatility_ref(
path_id: irast.PathId,
stmt: pgast.SelectStmt,
*,
ctx: context.CompilerContextLevel) -> Optional[pgast.BaseExpr]:
"""Produce an appropriate volatility_ref from a path_id."""
ref: Optional[pgast.BaseExpr] = relctx.maybe_get_path_var(
stmt, path_id, aspect=pgce.PathAspect.ITERATOR, ctx=ctx)
if not ref:
ref = relctx.maybe_get_path_var(
stmt, path_id, aspect=pgce.PathAspect.IDENTITY, ctx=ctx)
if not ref:
rvar = relctx.maybe_get_path_rvar(
stmt, path_id, aspect=pgce.PathAspect.VALUE, ctx=ctx)
if (
rvar
and isinstance(rvar.query, pgast.ReturningQuery)
# Expanded inhviews might be unions, which can't naively have
# a row_number stuck on; they should be safe to just grab
# the path_id value from, though
and rvar.tag != 'expanded-inhview'
):
# If we are selecting from a nontrivial subquery, manually
# add a volatility ref based on row_number. We do it
# manually because the row number isn't /really/ the
# identity of the set.
name = ctx.env.aliases.get('key')
rvar.query.target_list.append(
pgast.ResTarget(
name=name,
val=pgast.FuncCall(name=('row_number',), args=[],
over=pgast.WindowDef())
)
)
ref = pgast.ColumnRef(name=[rvar.alias.aliasname, name])
else:
ref = relctx.maybe_get_path_var(
stmt, path_id, aspect=pgce.PathAspect.VALUE, ctx=ctx)
return ref | Produce an appropriate volatility_ref from a path_id. | get_volatility_ref | python | geldata/gel | edb/pgsql/compiler/clauses.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/clauses.py | Apache-2.0 |
def find_rvar(
stmt: pgast.Query, *,
flavor: str='normal',
source_stmt: Optional[pgast.Query]=None,
path_id: irast.PathId,
ctx: context.CompilerContextLevel) -> \
Optional[pgast.PathRangeVar]:
"""Find an existing range var for a given *path_id* in stmt hierarchy.
If a range var is visible in a given SQL scope denoted by *stmt*, or,
optionally, *source_stmt*, record it on *stmt* for future reference.
:param stmt:
The statement to ensure range var visibility in.
:param flavor:
Whether to look for normal rvars or packed rvars
:param source_stmt:
An optional statement object which is used as the starting SQL scope
for range var search. If not specified, *stmt* is used as the
starting scope.
:param path_id:
The path ID of the range var being searched.
:param ctx:
Compiler context.
:return:
A range var instance if found, ``None`` otherwise.
"""
if source_stmt is None:
source_stmt = stmt
rvar = maybe_get_path_rvar(
source_stmt,
path_id=path_id,
aspect=pgce.PathAspect.VALUE,
flavor=flavor,
ctx=ctx,
)
if rvar is not None:
pathctx.put_path_rvar_if_not_exists(
stmt,
path_id,
rvar,
aspect=pgce.PathAspect.VALUE,
flavor=flavor,
)
src_rvar = maybe_get_path_rvar(
source_stmt,
path_id=path_id,
aspect=pgce.PathAspect.SOURCE,
flavor=flavor,
ctx=ctx
)
if src_rvar is not None:
pathctx.put_path_rvar_if_not_exists(
stmt,
path_id,
src_rvar,
aspect=pgce.PathAspect.SOURCE,
flavor=flavor,
)
return rvar | Find an existing range var for a given *path_id* in stmt hierarchy.
If a range var is visible in a given SQL scope denoted by *stmt*, or,
optionally, *source_stmt*, record it on *stmt* for future reference.
:param stmt:
The statement to ensure range var visibility in.
:param flavor:
Whether to look for normal rvars or packed rvars
:param source_stmt:
An optional statement object which is used as the starting SQL scope
for range var search. If not specified, *stmt* is used as the
starting scope.
:param path_id:
The path ID of the range var being searched.
:param ctx:
Compiler context.
:return:
A range var instance if found, ``None`` otherwise. | find_rvar | python | geldata/gel | edb/pgsql/compiler/relctx.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/relctx.py | Apache-2.0 |
def include_rvar(
stmt: pgast.SelectStmt,
rvar: pgast.PathRangeVar,
path_id: irast.PathId, *,
overwrite_path_rvar: bool=False,
pull_namespace: bool=True,
update_mask: bool=True,
flavor: str='normal',
aspects: Optional[
Tuple[pgce.PathAspect, ...]
| AbstractSet[pgce.PathAspect]
]=None,
ctx: context.CompilerContextLevel,
) -> pgast.PathRangeVar:
"""Ensure that *rvar* is visible in *stmt* as a value/source aspect.
:param stmt:
The statement to include *rel* in.
:param rvar:
The range var node to join.
:param join_type:
JOIN type to use when including *rel*.
:param flavor:
Whether this is a normal or packed rvar
:param aspect:
The reference aspect of the range var.
:param ctx:
Compiler context.
"""
if aspects is None:
aspects = (pgce.PathAspect.VALUE,)
if path_id.is_objtype_path():
if isinstance(rvar, pgast.RangeSubselect):
if pathctx.has_path_aspect(
rvar.query,
path_id,
aspect=pgce.PathAspect.SOURCE,
):
aspects += (pgce.PathAspect.SOURCE,)
else:
aspects += (pgce.PathAspect.SOURCE,)
elif path_id.is_tuple_path():
aspects += (pgce.PathAspect.SOURCE,)
return include_specific_rvar(
stmt, rvar=rvar, path_id=path_id,
overwrite_path_rvar=overwrite_path_rvar,
pull_namespace=pull_namespace,
update_mask=update_mask,
flavor=flavor,
aspects=aspects,
ctx=ctx) | Ensure that *rvar* is visible in *stmt* as a value/source aspect.
:param stmt:
The statement to include *rel* in.
:param rvar:
The range var node to join.
:param join_type:
JOIN type to use when including *rel*.
:param flavor:
Whether this is a normal or packed rvar
:param aspect:
The reference aspect of the range var.
:param ctx:
Compiler context. | include_rvar | python | geldata/gel | edb/pgsql/compiler/relctx.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/relctx.py | Apache-2.0 |
def include_specific_rvar(
stmt: pgast.SelectStmt,
rvar: pgast.PathRangeVar,
path_id: irast.PathId, *,
overwrite_path_rvar: bool=False,
pull_namespace: bool=True,
update_mask: bool=True,
flavor: str='normal',
aspects: Iterable[pgce.PathAspect]=(pgce.PathAspect.VALUE,),
ctx: context.CompilerContextLevel,
) -> pgast.PathRangeVar:
"""Make the *aspect* of *path_id* visible in *stmt* as *rvar*.
:param stmt:
The statement to include *rel* in.
:param rvar:
The range var node to join.
:param join_type:
JOIN type to use when including *rel*.
:param flavor:
Whether this is a normal or packed rvar
:param aspect:
The reference aspect of the range var.
:param ctx:
Compiler context.
"""
if not has_rvar(stmt, rvar, ctx=ctx):
rel_join(stmt, rvar, ctx=ctx)
# Make sure that the path namespace of *rvar* is mapped
# onto the path namespace of *stmt*.
if pull_namespace:
pull_path_namespace(target=stmt, source=rvar, ctx=ctx)
for aspect in aspects:
if overwrite_path_rvar:
pathctx.put_path_rvar(
stmt, path_id, rvar, flavor=flavor, aspect=aspect
)
else:
pathctx.put_path_rvar_if_not_exists(
stmt, path_id, rvar, flavor=flavor, aspect=aspect
)
if update_mask:
scopes = [ctx.scope_tree]
parent_scope = ctx.scope_tree.parent
if parent_scope is not None:
scopes.append(parent_scope)
tpath_id = path_id.tgt_path()
if not any(scope.path_id == tpath_id or
scope.find_child(tpath_id) for scope in scopes):
pathctx.put_path_id_mask(stmt, path_id)
return rvar | Make the *aspect* of *path_id* visible in *stmt* as *rvar*.
:param stmt:
The statement to include *rel* in.
:param rvar:
The range var node to join.
:param join_type:
JOIN type to use when including *rel*.
:param flavor:
Whether this is a normal or packed rvar
:param aspect:
The reference aspect of the range var.
:param ctx:
Compiler context. | include_specific_rvar | python | geldata/gel | edb/pgsql/compiler/relctx.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/relctx.py | Apache-2.0 |
def new_free_object_rvar(
typeref: irast.TypeRef,
path_id: irast.PathId,
*,
lateral: bool=False,
ctx: context.CompilerContextLevel,
) -> pgast.PathRangeVar:
"""Create a fake source rel for a free object
Free objects don't *really* have ids, but the compiler needs all
objects to have ids, so we just inject the type id as if it was an
id. It shouldn't get used for anything but NULL testing, so no
problem.
The only thing other than ids that need to come from a free object
is __type__, which we inject in a special case way in
pathctx.get_path_var.
We also have a special case in relgen.ensure_source_rvar to reuse an
existing value rvar instead of creating a new root rvar.
(We inject __type__ in get_path_var instead of injecting it here because
we don't have the pathid for it available to us here and because it
allows ensure_source_rvar to simply reuse a value rvar.)
"""
with ctx.subrel() as subctx:
qry = subctx.rel
id_expr = astutils.compile_typeref(typeref.real_material_type)
pathctx.put_path_identity_var(qry, path_id, id_expr)
pathctx.put_path_value_var(qry, path_id, id_expr)
return rvar_for_rel(qry, typeref=typeref, lateral=lateral, ctx=ctx) | Create a fake source rel for a free object
Free objects don't *really* have ids, but the compiler needs all
objects to have ids, so we just inject the type id as if it was an
id. It shouldn't get used for anything but NULL testing, so no
problem.
The only thing other than ids that need to come from a free object
is __type__, which we inject in a special case way in
pathctx.get_path_var.
We also have a special case in relgen.ensure_source_rvar to reuse an
existing value rvar instead of creating a new root rvar.
(We inject __type__ in get_path_var instead of injecting it here because
we don't have the pathid for it available to us here and because it
allows ensure_source_rvar to simply reuse a value rvar.) | new_free_object_rvar | python | geldata/gel | edb/pgsql/compiler/relctx.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/relctx.py | Apache-2.0 |
def deep_copy_primitive_rvar_path_var(
orig_id: irast.PathId, new_id: irast.PathId,
rvar: pgast.PathRangeVar, *,
env: context.Environment
) -> None:
"""Copy one identity path to another in a primitive rvar.
The trickiness here is because primitive rvars might have an
overlay stack, which means if they are joined on, it might be
using _lateral_union_join, which requires every component of
the union to have all the path bonds.
"""
if isinstance(rvar, pgast.RangeSubselect):
for component in astutils.each_query_in_set(rvar.query):
rref = pathctx.get_path_var(
component, orig_id, aspect=pgce.PathAspect.IDENTITY, env=env
)
pathctx.put_path_var(
component,
new_id,
rref,
aspect=pgce.PathAspect.IDENTITY,
)
else:
rref = pathctx.get_path_output(
rvar.query, orig_id, aspect=pgce.PathAspect.IDENTITY, env=env
)
pathctx.put_rvar_path_output(
rvar,
new_id,
aspect=pgce.PathAspect.IDENTITY,
var=rref,
) | Copy one identity path to another in a primitive rvar.
The trickiness here is because primitive rvars might have an
overlay stack, which means if they are joined on, it might be
using _lateral_union_join, which requires every component of
the union to have all the path bonds. | deep_copy_primitive_rvar_path_var | python | geldata/gel | edb/pgsql/compiler/relctx.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/relctx.py | Apache-2.0 |
def semi_join(
stmt: pgast.SelectStmt,
ir_set: irast.SetE[irast.Pointer], src_rvar: pgast.PathRangeVar, *,
ctx: context.CompilerContextLevel) -> pgast.PathRangeVar:
"""Join an IR Set using semi-join."""
rptr = ir_set.expr
# Target set range.
set_rvar = new_root_rvar(ir_set, lateral=True, ctx=ctx)
ptrref = rptr.ptrref
ptr_info = pg_types.get_ptrref_storage_info(
ptrref, resolve_type=False, allow_missing=True)
if ptr_info and ptr_info.table_type == 'ObjectType':
if rptr.is_inbound:
far_pid = ir_set.path_id.src_path()
assert far_pid is not None
else:
far_pid = ir_set.path_id
else:
far_pid = ir_set.path_id
# Link range.
map_rvar = new_pointer_rvar(ir_set, src_rvar=src_rvar, ctx=ctx)
include_rvar(
ctx.rel, map_rvar,
path_id=ir_set.path_id.ptr_path(), ctx=ctx)
tgt_ref = pathctx.get_rvar_path_identity_var(
set_rvar, far_pid, env=ctx.env)
pathctx.get_path_identity_output(
ctx.rel, far_pid, env=ctx.env)
cond = astutils.new_binop(tgt_ref, ctx.rel, 'IN')
stmt.where_clause = astutils.extend_binop(
stmt.where_clause, cond)
return set_rvar | Join an IR Set using semi-join. | semi_join | python | geldata/gel | edb/pgsql/compiler/relctx.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/relctx.py | Apache-2.0 |
def update_scope(
ir_set: irast.Set, stmt: pgast.SelectStmt, *,
ctx: context.CompilerContextLevel) -> None:
"""Update the scope of an ir set to be a pg stmt.
If ir_set has a scope node associated with it, update path_scope
so that any paths bound in that scope will be compiled in the
context of stmt.
This, combined with maybe_get_scope_stmt, is the mechanism by
which the scope tree influences the shape of the output query.
"""
scope_tree = get_scope(ir_set, ctx=ctx)
if scope_tree is None:
return
ctx.scope_tree = scope_tree
ctx.path_scope = ctx.path_scope.new_child()
# Register paths in the current scope to be compiled as a subrel
# of stmt.
for p in scope_tree.path_children:
assert p.path_id is not None
ctx.path_scope[p.path_id] = stmt | Update the scope of an ir set to be a pg stmt.
If ir_set has a scope node associated with it, update path_scope
so that any paths bound in that scope will be compiled in the
context of stmt.
This, combined with maybe_get_scope_stmt, is the mechanism by
which the scope tree influences the shape of the output query. | update_scope | python | geldata/gel | edb/pgsql/compiler/relctx.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/relctx.py | Apache-2.0 |
def set_to_array(
path_id: irast.PathId, query: pgast.Query, *,
for_group_by: bool=False,
ctx: context.CompilerContextLevel) -> pgast.Query:
"""Collapse a set into an array."""
subrvar = pgast.RangeSubselect(
subquery=query,
alias=pgast.Alias(
aliasname=ctx.env.aliases.get('aggw')
)
)
result = pgast.SelectStmt()
aspects = pathctx.list_path_aspects(subrvar.query, path_id)
include_rvar(result, subrvar, path_id=path_id, aspects=aspects, ctx=ctx)
val: Optional[pgast.BaseExpr] = (
pathctx.maybe_get_path_serialized_var(
result, path_id, env=ctx.env)
)
if val is None:
value_var = pathctx.get_path_value_var(result, path_id, env=ctx.env)
val = output.serialize_expr(value_var, path_id=path_id, env=ctx.env)
pathctx.put_path_serialized_var(result, path_id, val, force=True)
if isinstance(val, pgast.TupleVarBase):
val = output.serialize_expr(
val, path_id=path_id, env=ctx.env)
pg_type = output.get_pg_type(path_id.target, ctx=ctx)
agg_filter_safe = True
if for_group_by:
# When doing this as part of a GROUP, the stuff being aggregated
# needs to actually appear *inside* of the aggregate call...
result.target_list = [pgast.ResTarget(val=val, ser_safe=val.ser_safe)]
val = result
try_collapse = astutils.collapse_query(val)
if isinstance(try_collapse, pgast.ColumnRef):
val = try_collapse
else:
agg_filter_safe = False
result = pgast.SelectStmt()
orig_val = val
if (path_id.is_array_path()
and ctx.env.output_format is context.OutputFormat.NATIVE):
# We cannot aggregate arrays straight away, as
# they be of different length, so we have to
# encase each element into a record.
val = pgast.RowExpr(args=[val], ser_safe=val.ser_safe)
pg_type = ('record',)
array_agg = pgast.FuncCall(
name=('array_agg',),
args=[val],
agg_filter=(
astutils.new_binop(orig_val, pgast.NullConstant(),
'IS DISTINCT FROM')
if orig_val.nullable and agg_filter_safe else None
),
ser_safe=val.ser_safe,
)
# If this is for a group by, and the body isn't just a column ref,
# then we need to remove NULLs after the fact.
if orig_val.nullable and not agg_filter_safe:
array_agg = pgast.FuncCall(
name=('array_remove',),
args=[array_agg, pgast.NullConstant()]
)
agg_expr = pgast.CoalesceExpr(
args=[
array_agg,
pgast.TypeCast(
arg=pgast.ArrayExpr(elements=[]),
type_name=pgast.TypeName(name=pg_type, array_bounds=[-1])
)
],
ser_safe=array_agg.ser_safe,
nullable=False,
)
result.target_list = [
pgast.ResTarget(
name=ctx.env.aliases.get('v'),
val=agg_expr,
ser_safe=agg_expr.ser_safe,
)
]
return result | Collapse a set into an array. | set_to_array | python | geldata/gel | edb/pgsql/compiler/relctx.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/relctx.py | Apache-2.0 |
def _needs_cte(typeref: irast.TypeRef) -> bool:
"""Check whether a typeref needs to be forced into a materialized CTE.
The main use case here is for sys::SystemObjects which are stored as
views that populate their data by parsing JSON metadata embedded in
comments on the SQL system objects. The query plans when fetching multi
links from these objects wind up being pretty pathologically quadratic.
So instead we force the objects and links into materialized CTEs
so that they *can't* be shoved into nested loops.
"""
assert isinstance(typeref.name_hint, sn.QualName)
return typeref.name_hint.module == 'sys' | Check whether a typeref needs to be forced into a materialized CTE.
The main use case here is for sys::SystemObjects which are stored as
views that populate their data by parsing JSON metadata embedded in
comments on the SQL system objects. The query plans when fetching multi
links from these objects wind up being pretty pathologically quadratic.
So instead we force the objects and links into materialized CTEs
so that they *can't* be shoved into nested loops. | _needs_cte | python | geldata/gel | edb/pgsql/compiler/relctx.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/relctx.py | Apache-2.0 |
def anti_join(
lhs: pgast.SelectStmt, rhs: pgast.SelectStmt,
path_id: Optional[irast.PathId], *,
aspect: pgce.PathAspect=pgce.PathAspect.IDENTITY,
ctx: context.CompilerContextLevel,
) -> None:
"""Filter elements out of the LHS that appear on the RHS"""
if path_id:
# grab the identity from the LHS and do an
# anti-join against the RHS.
src_ref = pathctx.get_path_var(
lhs, path_id=path_id, aspect=aspect, env=ctx.env)
pathctx.get_path_output(
rhs, path_id=path_id, aspect=aspect, env=ctx.env)
cond_expr: pgast.BaseExpr = astutils.new_binop(
src_ref, rhs, 'NOT IN')
else:
# No path we care about. Just check existance.
cond_expr = pgast.SubLink(operator="NOT EXISTS", expr=rhs)
lhs.where_clause = astutils.extend_binop(lhs.where_clause, cond_expr) | Filter elements out of the LHS that appear on the RHS | anti_join | python | geldata/gel | edb/pgsql/compiler/relctx.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/relctx.py | Apache-2.0 |
def range_for_ptrref(
ptrref: irast.BasePointerRef, *,
dml_source: Sequence[irast.MutatingLikeStmt]=(),
for_mutation: bool=False,
only_self: bool=False,
path_id: Optional[irast.PathId]=None,
ctx: context.CompilerContextLevel,
) -> pgast.PathRangeVar:
""""Return a Range subclass corresponding to a given ptr step.
The return value may potentially be a UNION of all tables
corresponding to a set of specialized links computed from the given
`ptrref` taking source inheritance into account.
"""
if ptrref.union_components:
component_refs = ptrref.union_components
if only_self and len(component_refs) > 1:
raise errors.InternalServerError(
'unexpected union link'
)
elif ptrref.intersection_components:
# This is a little funky, but in an intersection, the pointer
# needs to appear in *all* of the tables, so we just pick any
# one of them.
component_refs = {next(iter((ptrref.intersection_components)))}
elif ptrref.computed_link_alias:
component_refs = {ptrref.computed_link_alias}
else:
component_refs = {ptrref}
assert isinstance(ptrref.out_source.name_hint, sn.QualName)
include_descendants = not ptrref.union_is_exhaustive
output_cols = ('source', 'target')
set_ops = []
for component_ref in component_refs:
assert isinstance(component_ref, irast.PointerRef), \
"expected regular PointerRef"
component_rvar = _range_for_component_ptrref(
component_ref,
output_cols,
dml_source=dml_source,
include_descendants=include_descendants,
for_mutation=for_mutation,
path_id=path_id,
ctx=ctx,
)
component_qry = pgast.SelectStmt(
target_list=[
pgast.ResTarget(
val=pgast.ColumnRef(
name=[output_colname]
),
name=output_colname
)
for output_colname in output_cols
],
from_clause=[component_rvar]
)
if path_id:
target_ref = pgast.ColumnRef(
name=[component_rvar.alias.aliasname, output_cols[1]]
)
pathctx.put_path_identity_var(
component_qry, path_id, var=target_ref
)
pathctx.put_path_source_rvar(
component_qry, path_id, component_rvar
)
set_ops.append((context.OverlayOp.UNION, component_qry))
return range_from_queryset(
set_ops,
ptrref.shortname,
path_id=path_id,
ctx=ctx,
) | Return a Range subclass corresponding to a given ptr step.
The return value may potentially be a UNION of all tables
corresponding to a set of specialized links computed from the given
`ptrref` taking source inheritance into account. | range_for_ptrref | python | geldata/gel | edb/pgsql/compiler/relctx.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/relctx.py | Apache-2.0 |
def _table_from_ptrref(
ptrref: irast.PointerRef,
ptr_info: pg_types.PointerStorageInfo,
*,
ctx: context.CompilerContextLevel,
) -> pgast.RelRangeVar:
"""Return a Table corresponding to a given Link."""
aspect = 'table'
table_schema_name, table_name = common.update_aspect(
ptr_info.table_name, aspect
)
typeref = ptrref.out_source if ptrref else None
relation = pgast.Relation(
schemaname=table_schema_name,
name=table_name,
type_or_ptr_ref=ptrref,
)
# Pseudo pointers (tuple and type intersection) have no schema id.
sobj_id = ptrref.id if isinstance(ptrref, irast.PointerRef) else None
rvar = pgast.RelRangeVar(
schema_object_id=sobj_id,
typeref=typeref,
relation=relation,
alias=pgast.Alias(
aliasname=ctx.env.aliases.get(ptrref.shortname.name)
)
)
return rvar | Return a Table corresponding to a given Link. | _table_from_ptrref | python | geldata/gel | edb/pgsql/compiler/relctx.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/relctx.py | Apache-2.0 |
def reuse_type_rel_overlays(
*,
dml_stmts: Iterable[irast.MutatingLikeStmt] = (),
dml_source: irast.MutatingLikeStmt,
ctx: context.CompilerContextLevel,
) -> None:
"""Update type rel overlays when a DML statement is reused.
When a WITH bound DML is used, we need to add it (and all of its
nested overlays) as an overlay for all the enclosing DML
statements.
"""
ref_overlays = ctx.rel_overlays.type.get(dml_source, immu.Map())
for tid, overlays in ref_overlays.items():
for op, rel, path_id in overlays:
_add_type_rel_overlay(
tid, op, rel, dml_stmts=dml_stmts, path_id=path_id, ctx=ctx
)
ptr_overlays = ctx.rel_overlays.ptr.get(dml_source, immu.Map())
for (obj, ptr), poverlays in ptr_overlays.items():
for op, rel, path_id in poverlays:
_add_ptr_rel_overlay(
obj, ptr, op, rel, path_id=path_id, dml_stmts=dml_stmts,
ctx=ctx
) | Update type rel overlays when a DML statement is reused.
When a WITH bound DML is used, we need to add it (and all of its
nested overlays) as an overlay for all the enclosing DML
statements. | reuse_type_rel_overlays | python | geldata/gel | edb/pgsql/compiler/relctx.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/relctx.py | Apache-2.0 |
def new_external_rvar(
*,
rel_name: Tuple[str, ...],
path_id: irast.PathId,
outputs: Mapping[Tuple[irast.PathId, Tuple[pgce.PathAspect, ...]], str],
) -> pgast.RelRangeVar:
"""Construct a ``RangeVar`` instance given a relation name and a path id.
Given an optionally-qualified relation name *rel_name* and a *path_id*,
return a ``RangeVar`` instance over the specified relation that is
then assumed to represent the *path_id* binding.
This is useful in situations where it is necessary to "prime" the compiler
with a list of external relations that exist in a larger SQL expression
that _this_ expression is being embedded into.
The *outputs* mapping optionally specifies a set of outputs in the
resulting range var as a ``(path_id, tuple-of-aspects): attribute name``
mapping.
"""
rel = new_external_rel(rel_name=rel_name, path_id=path_id)
assert rel.name
alias = pgast.Alias(aliasname=rel.name)
if not path_id.is_ptr_path():
rvar = pgast.RelRangeVar(
relation=rel, typeref=path_id.target, alias=alias)
else:
rvar = pgast.RelRangeVar(
relation=rel, alias=alias)
for (output_pid, output_aspects), colname in outputs.items():
var = pgast.ColumnRef(name=[colname])
for aspect in output_aspects:
rel.path_outputs[output_pid, aspect] = var
return rvar | Construct a ``RangeVar`` instance given a relation name and a path id.
Given an optionally-qualified relation name *rel_name* and a *path_id*,
return a ``RangeVar`` instance over the specified relation that is
then assumed to represent the *path_id* binding.
This is useful in situations where it is necessary to "prime" the compiler
with a list of external relations that exist in a larger SQL expression
that _this_ expression is being embedded into.
The *outputs* mapping optionally specifies a set of outputs in the
resulting range var as a ``(path_id, tuple-of-aspects): attribute name``
mapping. | new_external_rvar | python | geldata/gel | edb/pgsql/compiler/__init__.py | https://github.com/geldata/gel/blob/master/edb/pgsql/compiler/__init__.py | Apache-2.0 |
def _uncompile_dml_stmt(stmt: pgast.DMLQuery, *, ctx: Context):
"""
Takes an SQL DML query and produces an equivalent EdgeQL query plus a bunch
of metadata needed to extract associated CTEs from result of the EdgeQL
compiler.
In this context:
- subject is the object type/pointer being updated,
- source is the source of the subject (when subject is a pointer),
- value is the relation that provides new value to be inserted/updated,
- ptr-s are (usually) pointers on the subject.
"""
raise dispatch._raise_unsupported(stmt) | Takes an SQL DML query and produces an equivalent EdgeQL query plus a bunch
of metadata needed to extract associated CTEs from result of the EdgeQL
compiler.
In this context:
- subject is the object type/pointer being updated,
- source is the source of the subject (when subject is a pointer),
- value is the relation that provides new value to be inserted/updated,
- ptr-s are (usually) pointers on the subject. | _uncompile_dml_stmt | python | geldata/gel | edb/pgsql/resolver/command.py | https://github.com/geldata/gel/blob/master/edb/pgsql/resolver/command.py | Apache-2.0 |
def _uncompile_subject_columns(
sub: s_objtypes.ObjectType | s_links.Link | s_properties.Property,
sub_table: context.Table,
res: UncompiledDML,
*,
ctx: Context,
):
'''
Instruct UncompiledDML to wrap the EdgeQL DML into a select shape that
selects all pointers.
This is applied when a RETURNING clause is present and these columns might
be used in the clause.
'''
for column in sub_table.columns:
if column.hidden:
continue
_, ptr_name, _ = _get_pointer_for_column(column, sub, ctx)
res.subject_columns.append((column.name, ptr_name)) | Instruct UncompiledDML to wrap the EdgeQL DML into a select shape that
selects all pointers.
This is applied when a RETURNING clause is present and these columns might
be used in the clause. | _uncompile_subject_columns | python | geldata/gel | edb/pgsql/resolver/command.py | https://github.com/geldata/gel/blob/master/edb/pgsql/resolver/command.py | Apache-2.0 |
def _uncompile_insert_object_stmt(
stmt: pgast.InsertStmt,
sub: s_objtypes.ObjectType,
sub_table: context.Table,
expected_columns: List[context.Column],
*,
ctx: Context,
) -> UncompiledDML:
"""
Translates a 'SQL INSERT into an object type table' to an EdgeQL insert.
"""
# handle DEFAULT and prepare the value relation
value_relation, expected_columns = _uncompile_default_value(
stmt.select_stmt, stmt.ctes, expected_columns, sub, ctx=ctx
)
# if we are sure that we are inserting a single row
# we can skip for-loops and iterators, which produces better SQL
is_value_single = _has_at_most_one_row(stmt.select_stmt)
# prepare anchors for inserted value columns
value_name = ctx.alias_generator.get('ins_val')
iterator_name = ctx.alias_generator.get('ins_iter')
value_id = irast.PathId.from_type(
ctx.schema,
sub,
typename=sn.QualName('__derived__', value_name),
env=None,
)
value_ql: qlast.PathElement = (
qlast.IRAnchor(name=value_name)
if is_value_single
else qlast.ObjectRef(name=iterator_name)
)
# a phantom relation that is supposed to hold the inserted value
# (in the resolver, this will be replaced by the real value relation)
value_cte_name = ctx.alias_generator.get('ins_value')
value_rel = pgast.Relation(
name=value_cte_name,
strip_output_namespaces=True,
)
value_columns = []
insert_shape = []
stype_refs: Dict[uuid.UUID, List[qlast.Set]] = {}
for index, expected_col in enumerate(expected_columns):
ptr, ptr_name, is_link = _get_pointer_for_column(expected_col, sub, ctx)
value_columns.append((ptr_name, is_link))
# inject type annotation into value relation
_try_inject_ptr_type_cast(value_relation, index, ptr, ctx)
# prepare the outputs of the source CTE
ptr_id = _get_ptr_id(value_id, ptr, ctx)
output = pgast.ColumnRef(name=(ptr_name,), nullable=True)
if is_link:
value_rel.path_outputs[(ptr_id, pgce.PathAspect.IDENTITY)] = output
value_rel.path_outputs[(ptr_id, pgce.PathAspect.VALUE)] = output
else:
value_rel.path_outputs[(ptr_id, pgce.PathAspect.VALUE)] = output
if ptr_name == 'id':
value_rel.path_outputs[(value_id, pgce.PathAspect.VALUE)] = output
# prepare insert shape that will use the paths from source_outputs
insert_shape.append(
_construct_assign_element_for_ptr(
value_ql,
ptr_name,
ptr,
is_link,
ctx,
stype_refs,
)
)
# source needs an iterator column, so we need to invent one
# Here we only decide on the name of that iterator column, the actual column
# is generated later, when resolving the DML stmt.
value_iterator = ctx.alias_generator.get('iter')
output = pgast.ColumnRef(name=(value_iterator,))
value_rel.path_outputs[(value_id, pgce.PathAspect.ITERATOR)] = output
if not any(c.name == 'id' for c in expected_columns):
value_rel.path_outputs[(value_id, pgce.PathAspect.VALUE)] = output
# construct the EdgeQL DML AST
sub_name = sub.get_name(ctx.schema)
ql_stmt: qlast.Expr = qlast.InsertQuery(
subject=s_utils.name_to_ast_ref(sub_name),
shape=insert_shape,
)
if not is_value_single:
# value relation might contain multiple rows
# to express this in EdgeQL, we must wrap `insert` into a `for` query
ql_stmt = qlast.ForQuery(
iterator=qlast.Path(steps=[qlast.IRAnchor(name=value_name)]),
iterator_alias=iterator_name,
result=ql_stmt,
)
ql_returning_shape: List[qlast.ShapeElement] = []
if stmt.returning_list:
# construct the shape that will extract all needed column of the subject
# table (because they might be be used by RETURNING clause)
for column in sub_table.columns:
if column.hidden:
continue
_, ptr_name, _ = _get_pointer_for_column(column, sub, ctx)
ql_returning_shape.append(
qlast.ShapeElement(
expr=qlast.Path(steps=[qlast.Ptr(name=ptr_name)]),
)
)
return UncompiledDML(
input=stmt,
subject=sub,
ql_stmt=ql_stmt,
ql_returning_shape=ql_returning_shape,
ql_singletons={value_id},
ql_anchors={value_name: value_id},
external_rels={
value_id: (
value_rel,
(pgce.PathAspect.SOURCE,),
)
},
stype_refs=stype_refs,
early_result=context.CompiledDML(
value_cte_name=value_cte_name,
value_relation_input=value_relation,
value_columns=value_columns,
value_iterator_name=value_iterator,
# these will be populated after compilation
output_ctes=[],
output_relation_name='',
output_namespace={},
),
# these will be populated by _uncompile_dml_stmt
subject_columns=[],
) | Translates a 'SQL INSERT into an object type table' to an EdgeQL insert. | _uncompile_insert_object_stmt | python | geldata/gel | edb/pgsql/resolver/command.py | https://github.com/geldata/gel/blob/master/edb/pgsql/resolver/command.py | Apache-2.0 |
def _uncompile_insert_pointer_stmt(
stmt: pgast.InsertStmt,
sub: s_links.Link | s_properties.Property,
sub_table: context.Table,
expected_columns: List[context.Column],
*,
ctx: Context,
) -> UncompiledDML:
"""
Translates a SQL 'INSERT INTO a link / multi-property table' into
an `EdgeQL update SourceObject { subject: ... }`.
"""
if not any(c.name == 'source' for c in expected_columns):
raise errors.QueryError(
'column source is required when inserting into link tables',
span=stmt.span,
)
if not any(c.name == 'target' for c in expected_columns):
raise errors.QueryError(
'column target is required when inserting into link tables',
span=stmt.span,
)
sub_source = sub.get_source(ctx.schema)
assert isinstance(sub_source, s_objtypes.ObjectType)
sub_target = sub.get_target(ctx.schema)
assert sub_target
# handle DEFAULT and prepare the value relation
value_relation, expected_columns = _uncompile_default_value(
stmt.select_stmt, stmt.ctes, expected_columns, sub, ctx=ctx
)
# if we are sure that we are inserting a single row
# we can skip for-loops and iterators, which produces better SQL
# is_value_single = _has_at_most_one_row(stmt.select_stmt)
is_value_single = False
free_obj_ty = ctx.schema.get('std::FreeObject', type=s_objtypes.ObjectType)
ctx.schema, dummy_ty = free_obj_ty.derive_subtype(
ctx.schema,
name=sn.QualName('__derived__', ctx.alias_generator.get('ins_ty')),
mark_derived=True,
transient=True,
)
src_ptr = _add_pointer(dummy_ty, '__source__', sub_source, ctx=ctx)
tgt_ptr = _add_pointer(dummy_ty, '__target__', sub_target, ctx=ctx)
# prepare anchors for inserted value columns
value_name = ctx.alias_generator.get('ins_val')
iterator_name = ctx.alias_generator.get('ins_iter')
base_id = irast.PathId.from_type(
ctx.schema,
dummy_ty,
typename=sn.QualName('__derived__', value_name),
env=None,
)
source_id = _get_ptr_id(base_id, src_ptr, ctx=ctx)
target_id = _get_ptr_id(base_id, tgt_ptr, ctx=ctx)
value_ql: qlast.PathElement = (
qlast.IRAnchor(name=value_name)
if is_value_single
else qlast.ObjectRef(name=iterator_name)
)
# a phantom relation that is supposed to hold the inserted value
# (in the resolver, this will be replaced by the real value relation)
value_cte_name = ctx.alias_generator.get('ins_value')
value_rel = pgast.Relation(
name=value_cte_name,
strip_output_namespaces=True,
)
value_columns: List[Tuple[str, bool]] = []
for index, expected_col in enumerate(expected_columns):
ptr: Optional[s_pointers.Pointer] = None
if expected_col.name == 'source':
ptr_name = 'source'
is_link = True
ptr_id = source_id
elif expected_col.name == 'target':
ptr_name = 'target'
is_link = isinstance(sub, s_links.Link)
ptr = sub
ptr_id = target_id
else:
# link pointer
assert isinstance(sub, s_links.Link)
ptr_name = expected_col.name
ptr = sub.maybe_get_ptr(ctx.schema, sn.UnqualName(ptr_name))
assert ptr
lprop_tgt = not_none(ptr.get_target(ctx.schema))
lprop_ptr = _add_pointer(dummy_ty, ptr_name, lprop_tgt, ctx=ctx)
ptr_id = _get_ptr_id(base_id, lprop_ptr, ctx=ctx)
is_link = False
var = pgast.ColumnRef(name=(ptr_name,), nullable=True)
value_rel.path_outputs[(ptr_id, pgce.PathAspect.VALUE)] = var
# inject type annotation into value relation
if is_link:
_try_inject_type_cast(
value_relation, index, pgast.TypeName(name=('uuid',))
)
else:
assert ptr
_try_inject_ptr_type_cast(value_relation, index, ptr, ctx)
value_columns.append((ptr_name, is_link))
# source needs an iterator column, so we need to invent one
# Here we only decide on the name of that iterator column, the actual column
# is generated later, when resolving the DML stmt.
value_iterator = ctx.alias_generator.get('iter')
var = pgast.ColumnRef(name=(value_iterator,))
value_rel.path_outputs[(base_id, pgce.PathAspect.ITERATOR)] = var
value_rel.path_outputs[(base_id, pgce.PathAspect.VALUE)] = var
# construct the EdgeQL DML AST
stype_refs: Dict[uuid.UUID, List[qlast.Set]] = {}
sub_name = sub.get_shortname(ctx.schema)
target_ql: qlast.Expr = qlast.Path(
steps=[value_ql, qlast.Ptr(name='__target__')]
)
if isinstance(sub_target, s_objtypes.ObjectType):
assert isinstance(target_ql, qlast.Path)
target_ql = _construct_cast_from_uuid_to_obj_type(
target_ql, sub_target, stype_refs, optional=True, ctx=ctx
)
ql_ptr_val: qlast.Expr
if isinstance(sub, s_links.Link):
ql_ptr_val = qlast.Shape(
expr=target_ql,
elements=[
qlast.ShapeElement(
expr=qlast.Path(
steps=[qlast.Ptr(name=ptr_name, type='property')],
),
compexpr=qlast.Path(
steps=[
value_ql,
# qlast.Ptr(name=sub_name.name),
qlast.Ptr(name=ptr_name),
],
),
)
for ptr_name, _ in value_columns
if ptr_name not in ('source', 'target')
],
)
else:
# multi pointer
ql_ptr_val = target_ql
source_ql_p = qlast.Path(steps=[value_ql, qlast.Ptr(name='__source__')])
# XXX: rewrites are getting missed when we do this cast! Now, we
# *want* rewrites getting missed tbh, but I think it's a broader
# bug.
source_ql = _construct_cast_from_uuid_to_obj_type(
source_ql_p,
sub_source,
stype_refs,
optional=True,
ctx=ctx,
)
is_multi = sub.get_cardinality(ctx.schema) == qltypes.SchemaCardinality.Many
# Update the source_ql directly -- the filter is done there.
ql_stmt: qlast.Expr = qlast.UpdateQuery(
subject=source_ql,
shape=[
qlast.ShapeElement(
expr=qlast.Path(steps=[qlast.Ptr(name=sub_name.name)]),
operation=(
qlast.ShapeOperation(op=qlast.ShapeOp.APPEND)
if is_multi
else qlast.ShapeOperation(op=qlast.ShapeOp.ASSIGN)
),
compexpr=ql_ptr_val,
)
],
sql_mode_link_only=is_multi,
)
if not is_value_single:
# value relation might contain multiple rows
# to express this in EdgeQL, we must wrap `insert` into a `for` query
ql_stmt = qlast.ForQuery(
iterator=qlast.Path(steps=[qlast.IRAnchor(name=value_name)]),
iterator_alias=iterator_name,
result=ql_stmt,
)
ql_returning_shape: List[qlast.ShapeElement] = []
if stmt.returning_list:
# construct the shape that will extract all needed column of the subject
# table (because they might be be used by RETURNING clause)
for column in sub_table.columns:
if column.hidden:
continue
if column.name in ('source', 'target'):
# no need to include in shape, they will be present anyway
continue
ql_returning_shape.append(
qlast.ShapeElement(
expr=qlast.Path(steps=[qlast.Ptr(name=column.name)]),
compexpr=qlast.Path(
partial=True,
steps=[
qlast.Ptr(name=sub_name.name),
qlast.Ptr(name=column.name, type='property'),
],
),
)
)
return UncompiledDML(
input=stmt,
subject=sub,
ql_stmt=ql_stmt,
ql_returning_shape=ql_returning_shape,
ql_singletons={base_id},
ql_anchors={value_name: base_id},
external_rels={
base_id: (
value_rel,
(pgce.PathAspect.SOURCE,),
)
},
stype_refs=stype_refs,
early_result=context.CompiledDML(
value_cte_name=value_cte_name,
value_relation_input=value_relation,
value_columns=value_columns,
value_iterator_name=value_iterator,
# these will be populated after compilation
output_ctes=[],
output_relation_name='',
output_namespace={},
),
# these will be populated by _uncompile_dml_stmt
subject_columns=[],
) | Translates a SQL 'INSERT INTO a link / multi-property table' into
an `EdgeQL update SourceObject { subject: ... }`. | _uncompile_insert_pointer_stmt | python | geldata/gel | edb/pgsql/resolver/command.py | https://github.com/geldata/gel/blob/master/edb/pgsql/resolver/command.py | Apache-2.0 |
def _uncompile_delete_object_stmt(
stmt: pgast.DeleteStmt,
sub: s_objtypes.ObjectType,
sub_table: context.Table,
*,
ctx: Context,
) -> UncompiledDML:
"""
Translates a 'SQL DELETE of object type table' to an EdgeQL delete.
"""
# prepare value relation
# For deletes, value relation contains a single column of ids of all the
# objects that need to be deleted. We construct this relation from WHERE
# and USING clauses of DELETE.
assert isinstance(stmt.relation, pgast.RelRangeVar)
val_sub_rvar = stmt.relation.alias.aliasname or stmt.relation.relation.name
assert val_sub_rvar
value_relation = pgast.SelectStmt(
ctes=stmt.ctes,
target_list=[
pgast.ResTarget(
val=pgast.ColumnRef(
name=(val_sub_rvar, 'id'),
)
)
],
from_clause=[
pgast.RelRangeVar(
relation=stmt.relation.relation,
alias=pgast.Alias(aliasname=val_sub_rvar),
# DELETE ONLY
include_inherited=stmt.relation.include_inherited,
)
]
+ stmt.using_clause,
where_clause=stmt.where_clause,
)
stmt.ctes = []
# prepare anchors for inserted value columns
value_name = ctx.alias_generator.get('del_val')
value_id = irast.PathId.from_type(
ctx.schema,
sub,
typename=sn.QualName('__derived__', value_name),
env=None,
)
value_ql = qlast.IRAnchor(name=value_name)
# a phantom relation that contains a single column, which is the id of all
# the objects that should be deleted.
value_cte_name = ctx.alias_generator.get('del_value')
value_rel = pgast.Relation(
name=value_cte_name,
strip_output_namespaces=True,
)
value_columns = [('id', False)]
output_var = pgast.ColumnRef(name=('id',), nullable=False)
value_rel.path_outputs[(value_id, pgce.PathAspect.IDENTITY)] = output_var
value_rel.path_outputs[(value_id, pgce.PathAspect.VALUE)] = output_var
value_rel.path_outputs[(value_id, pgce.PathAspect.ITERATOR)] = output_var
# construct the EdgeQL DML AST
sub_name = sub.get_name(ctx.schema)
where = qlast.BinOp(
left=qlast.Path(partial=True, steps=[qlast.Ptr(name='id')]),
op='IN',
right=qlast.Path(steps=[value_ql, qlast.Ptr(name='id')]),
)
ql_stmt: qlast.Expr = qlast.DeleteQuery(
subject=qlast.Path(steps=[s_utils.name_to_ast_ref(sub_name)]),
where=where,
)
ql_returning_shape: List[qlast.ShapeElement] = []
if stmt.returning_list:
# construct the shape that will extract all needed column of the subject
# table (because they might be be used by RETURNING clause)
for column in sub_table.columns:
if column.hidden:
continue
_, ptr_name, _ = _get_pointer_for_column(column, sub, ctx)
ql_returning_shape.append(
qlast.ShapeElement(
expr=qlast.Path(steps=[qlast.Ptr(name=ptr_name)]),
)
)
return UncompiledDML(
input=stmt,
subject=sub,
ql_stmt=ql_stmt,
ql_returning_shape=ql_returning_shape,
ql_singletons={value_id},
ql_anchors={value_name: value_id},
external_rels={
value_id: (
value_rel,
(pgce.PathAspect.SOURCE,),
)
},
stype_refs={},
early_result=context.CompiledDML(
value_cte_name=value_cte_name,
value_relation_input=value_relation,
value_columns=value_columns,
value_iterator_name=None,
# these will be populated after compilation
output_ctes=[],
output_relation_name='',
output_namespace={},
),
# these will be populated by _uncompile_dml_stmt
subject_columns=[],
) | Translates a 'SQL DELETE of object type table' to an EdgeQL delete. | _uncompile_delete_object_stmt | python | geldata/gel | edb/pgsql/resolver/command.py | https://github.com/geldata/gel/blob/master/edb/pgsql/resolver/command.py | Apache-2.0 |
def _uncompile_delete_pointer_stmt(
stmt: pgast.DeleteStmt,
sub: s_links.Link | s_properties.Property,
sub_table: context.Table,
*,
ctx: Context,
) -> UncompiledDML:
"""
Translates a SQL 'DELETE FROM a link / multi-property table' into
an `EdgeQL update SourceObject { subject: ... }`.
"""
sub_source = sub.get_source(ctx.schema)
assert isinstance(sub_source, s_objtypes.ObjectType)
sub_target = sub.get_target(ctx.schema)
assert sub_target
# prepare value relation
# For link deletes, value relation contains two columns: source and target
# of all links that need to be deleted. We construct this relation from
# WHERE and USING clauses of DELETE.
assert isinstance(stmt.relation, pgast.RelRangeVar)
val_sub_rvar = stmt.relation.alias.aliasname or stmt.relation.relation.name
assert val_sub_rvar
value_relation = pgast.SelectStmt(
ctes=stmt.ctes,
target_list=[
pgast.ResTarget(
val=pgast.ColumnRef(
name=(val_sub_rvar, 'source'),
)
),
pgast.ResTarget(
val=pgast.ColumnRef(
name=(val_sub_rvar, 'target'),
)
),
],
from_clause=[
pgast.RelRangeVar(
relation=stmt.relation.relation,
alias=pgast.Alias(aliasname=val_sub_rvar),
)
]
+ stmt.using_clause,
where_clause=stmt.where_clause,
)
stmt.ctes = []
# if we are sure that we are updating a single source object,
# we can skip for-loops and iterators, which produces better SQL
is_value_single = False
# prepare anchors for inserted value columns
value_name = ctx.alias_generator.get('ins_val')
iterator_name = ctx.alias_generator.get('ins_iter')
source_id = irast.PathId.from_type(
ctx.schema,
sub_source,
typename=sn.QualName('__derived__', value_name),
env=None,
)
link_ref = irtypeutils.ptrref_from_ptrcls(
schema=ctx.schema, ptrcls=sub, cache=None, typeref_cache=None
)
value_id: irast.PathId = source_id.extend(ptrref=link_ref)
value_ql: qlast.PathElement = (
qlast.IRAnchor(name=value_name)
if is_value_single
else qlast.ObjectRef(name=iterator_name)
)
# a phantom relation that is supposed to hold the two source and target
# columns of rows that need to be deleted.
value_cte_name = ctx.alias_generator.get('del_value')
value_rel = pgast.Relation(
name=value_cte_name,
strip_output_namespaces=True,
)
value_columns = [('source', False), ('target', False)]
var = pgast.ColumnRef(name=('source',), nullable=True)
value_rel.path_outputs[(source_id, pgce.PathAspect.VALUE)] = var
value_rel.path_outputs[(source_id, pgce.PathAspect.IDENTITY)] = var
tgt_id = value_id.tgt_path()
var = pgast.ColumnRef(name=('target',), nullable=True)
value_rel.path_outputs[(tgt_id, pgce.PathAspect.VALUE)] = var
value_rel.path_outputs[(tgt_id, pgce.PathAspect.IDENTITY)] = var
# source needs an iterator column, so we need to invent one
# Here we only decide on the name of that iterator column, the actual column
# is generated later, when resolving the DML stmt.
value_iterator = ctx.alias_generator.get('iter')
var = pgast.ColumnRef(name=(value_iterator,))
value_rel.path_outputs[(source_id, pgce.PathAspect.ITERATOR)] = var
value_rel.path_outputs[(value_id, pgce.PathAspect.ITERATOR)] = var
# construct the EdgeQL DML AST
sub_name = sub.get_name(ctx.schema)
sub_source_name = sub_source.get_name(ctx.schema)
sub_target_name = sub_target.get_name(ctx.schema)
sub_name = sub.get_shortname(ctx.schema)
ql_sub_source_ref = s_utils.name_to_ast_ref(sub_source_name)
ql_sub_target_ref = s_utils.name_to_ast_ref(sub_target_name)
ql_ptr_val: qlast.Expr = qlast.Path(
steps=[value_ql, qlast.Ptr(name=sub_name.name)]
)
if isinstance(sub, s_links.Link):
ql_ptr_val = qlast.TypeCast(
expr=ql_ptr_val,
type=qlast.TypeName(maintype=ql_sub_target_ref),
)
ql_stmt: qlast.Expr = qlast.UpdateQuery(
subject=qlast.Path(steps=[ql_sub_source_ref]),
where=qlast.BinOp( # ObjectType == value.source
left=qlast.Path(steps=[ql_sub_source_ref]),
op='=',
right=qlast.Path(steps=[value_ql]),
),
shape=[
qlast.ShapeElement(
expr=qlast.Path(steps=[qlast.Ptr(name=sub_name.name)]),
operation=qlast.ShapeOperation(op=qlast.ShapeOp.SUBTRACT),
compexpr=ql_ptr_val,
)
],
)
if not is_value_single:
# value relation might contain multiple rows
# to express this in EdgeQL, we must wrap `delete` into a `for` query
ql_stmt = qlast.ForQuery(
iterator=qlast.Path(steps=[qlast.IRAnchor(name=value_name)]),
iterator_alias=iterator_name,
result=ql_stmt,
)
ql_returning_shape: List[qlast.ShapeElement] = []
if stmt.returning_list:
# construct the shape that will extract all needed column of the subject
# table (because they might be be used by RETURNING clause)
for column in sub_table.columns:
if column.hidden:
continue
if column.name in ('source', 'target'):
# no need to include in shape, they will be present anyway
continue
ql_returning_shape.append(
qlast.ShapeElement(
expr=qlast.Path(steps=[qlast.Ptr(name=column.name)]),
compexpr=qlast.Path(
partial=True,
steps=[
qlast.Ptr(name=sub_name.name),
qlast.Ptr(name=column.name, type='property'),
],
),
)
)
return UncompiledDML(
input=stmt,
subject=sub,
ql_stmt=ql_stmt,
ql_returning_shape=ql_returning_shape,
ql_singletons={source_id},
ql_anchors={value_name: source_id},
external_rels={
source_id: (
value_rel,
(pgce.PathAspect.SOURCE,),
)
},
stype_refs={},
early_result=context.CompiledDML(
value_cte_name=value_cte_name,
value_relation_input=value_relation,
value_columns=value_columns,
value_iterator_name=value_iterator,
# these will be populated after compilation
output_ctes=[],
output_relation_name='',
output_namespace={},
),
# these will be populated by _uncompile_dml_stmt
subject_columns=[],
) | Translates a SQL 'DELETE FROM a link / multi-property table' into
an `EdgeQL update SourceObject { subject: ... }`. | _uncompile_delete_pointer_stmt | python | geldata/gel | edb/pgsql/resolver/command.py | https://github.com/geldata/gel/blob/master/edb/pgsql/resolver/command.py | Apache-2.0 |
def _uncompile_update_object_stmt(
stmt: pgast.UpdateStmt,
sub: s_objtypes.ObjectType,
sub_table: context.Table,
column_updates: List[Tuple[context.Column, pgast.BaseExpr]],
*,
ctx: Context,
) -> UncompiledDML:
"""
Translates a 'SQL UPDATE into an object type table' to an EdgeQL update.
"""
def is_default(e: pgast.BaseExpr) -> bool:
return isinstance(e, pgast.Keyword) and e.name == 'DEFAULT'
# prepare value relation
# For updates, value relation contains:
# - `id` column, that contains the id of the subject,
# - one column for each of the pointers on the subject to be updated,
# We construct this relation from WHERE and FROM clauses of UPDATE.
assert isinstance(stmt.relation, pgast.RelRangeVar)
val_sub_rvar = stmt.relation.alias.aliasname or stmt.relation.relation.name
assert val_sub_rvar
value_relation = pgast.SelectStmt(
ctes=stmt.ctes,
target_list=[
pgast.ResTarget(
val=pgast.ColumnRef(
name=(val_sub_rvar, 'id'),
)
)
]
+ [
pgast.ResTarget(val=val, name=c.name)
for c, val in column_updates
if not is_default(val) # skip DEFAULT column updates
],
from_clause=[
pgast.RelRangeVar(
relation=stmt.relation.relation,
alias=pgast.Alias(aliasname=val_sub_rvar),
# UPDATE ONLY
include_inherited=stmt.relation.include_inherited,
)
]
+ stmt.from_clause,
where_clause=stmt.where_clause,
)
stmt.ctes = []
# prepare anchors for inserted value columns
value_name = ctx.alias_generator.get('upd_val')
iterator_name = ctx.alias_generator.get('upd_iter')
value_id = irast.PathId.from_type(
ctx.schema,
sub,
typename=sn.QualName('__derived__', value_name),
env=None,
)
value_ql: qlast.PathElement = qlast.ObjectRef(name=iterator_name)
# a phantom relation that is supposed to hold the inserted value
# (in the resolver, this will be replaced by the real value relation)
value_cte_name = ctx.alias_generator.get('upd_value')
value_rel = pgast.Relation(
name=value_cte_name,
strip_output_namespaces=True,
)
output_var = pgast.ColumnRef(name=('id',))
value_rel.path_outputs[(value_id, pgce.PathAspect.ITERATOR)] = output_var
value_rel.path_outputs[(value_id, pgce.PathAspect.VALUE)] = output_var
value_columns = [('id', False)]
update_shape = []
stype_refs: Dict[uuid.UUID, List[qlast.Set]] = {}
for index, (col, val) in enumerate(column_updates):
ptr, ptr_name, is_link = _get_pointer_for_column(col, sub, ctx)
if not is_default(val):
value_columns.append((ptr_name, is_link))
# inject type annotation into value relation
_try_inject_ptr_type_cast(value_relation, index + 1, ptr, ctx)
# prepare the outputs of the source CTE
ptr_id = _get_ptr_id(value_id, ptr, ctx)
output_var = pgast.ColumnRef(name=(ptr_name,), nullable=True)
if is_link:
value_rel.path_outputs[(ptr_id, pgce.PathAspect.IDENTITY)] = (
output_var
)
value_rel.path_outputs[(ptr_id, pgce.PathAspect.VALUE)] = output_var
else:
value_rel.path_outputs[(ptr_id, pgce.PathAspect.VALUE)] = output_var
# prepare insert shape that will use the paths from source_outputs
if is_default(val):
# special case: DEFAULT
default_ql: qlast.Expr
if ptr.get_default(ctx.schema) is None:
default_ql = qlast.Set(elements=[]) # NULL
else:
default_ql = qlast.Path(
steps=[qlast.SpecialAnchor(name='__default__')]
)
update_shape.append(
qlast.ShapeElement(
expr=qlast.Path(steps=[qlast.Ptr(name=ptr_name)]),
operation=qlast.ShapeOperation(op=qlast.ShapeOp.ASSIGN),
compexpr=default_ql,
)
)
else:
# base case
update_shape.append(
_construct_assign_element_for_ptr(
value_ql,
ptr_name,
ptr,
is_link,
ctx,
stype_refs,
)
)
# construct the EdgeQL DML AST
sub_name = sub.get_name(ctx.schema)
ql_sub_ref = s_utils.name_to_ast_ref(sub_name)
where = qlast.BinOp( # ObjectType == value.source
left=qlast.Path(steps=[ql_sub_ref]),
op='=',
right=qlast.Path(steps=[value_ql]),
)
ql_stmt: qlast.Expr = qlast.UpdateQuery(
subject=qlast.Path(steps=[ql_sub_ref]),
where=where,
shape=update_shape,
)
# value relation might contain multiple rows
# to express this in EdgeQL, we must wrap `update` into a `for` query
ql_stmt = qlast.ForQuery(
iterator=qlast.Path(steps=[qlast.IRAnchor(name=value_name)]),
iterator_alias=iterator_name,
result=ql_stmt,
)
ql_returning_shape: List[qlast.ShapeElement] = []
if stmt.returning_list:
# construct the shape that will extract all needed column of the subject
# table (because they might be be used by RETURNING clause)
for column in sub_table.columns:
if column.hidden:
continue
_, ptr_name, _ = _get_pointer_for_column(column, sub, ctx)
ql_returning_shape.append(
qlast.ShapeElement(
expr=qlast.Path(steps=[qlast.Ptr(name=ptr_name)]),
)
)
return UncompiledDML(
input=stmt,
subject=sub,
ql_stmt=ql_stmt,
ql_returning_shape=ql_returning_shape,
ql_singletons={value_id},
ql_anchors={value_name: value_id},
external_rels={
value_id: (
value_rel,
(pgce.PathAspect.SOURCE,),
)
},
stype_refs=stype_refs,
early_result=context.CompiledDML(
value_cte_name=value_cte_name,
value_relation_input=value_relation,
value_columns=value_columns,
value_iterator_name=None,
# these will be populated after compilation
output_ctes=[],
output_relation_name='',
output_namespace={},
),
# these will be populated by _uncompile_dml_stmt
subject_columns=[],
) | Translates a 'SQL UPDATE into an object type table' to an EdgeQL update. | _uncompile_update_object_stmt | python | geldata/gel | edb/pgsql/resolver/command.py | https://github.com/geldata/gel/blob/master/edb/pgsql/resolver/command.py | Apache-2.0 |
def _try_inject_type_cast(
rel: pgast.BaseRelation,
pos: int,
ty: pgast.TypeName,
):
"""
If a relation is simple, injects type annotation for a column.
This is needed for Postgres to correctly infer the type so it will be able
to bind to correct parameter types. For example:
INSERT x (a, b) VALUES ($1, $2)
is compiled into something like:
WITH cte AS (VALUES ($1, $2))
INSERT x (a, b) SELECT * FROM cte
This function adds type casts into `cte`.
"""
if not isinstance(rel, pgast.SelectStmt):
return
if rel.values:
for row_i, row in enumerate(rel.values):
if isinstance(row, pgast.ImplicitRowExpr) and pos < len(row.args):
args = list(row.args)
args[pos] = pgast.TypeCast(arg=args[pos], type_name=ty)
rel.values[row_i] = row.replace(args=args)
elif rel.target_list and pos < len(rel.target_list):
target = rel.target_list[pos]
rel.target_list[pos] = target.replace(
val=pgast.TypeCast(arg=target.val, type_name=ty)
) | If a relation is simple, injects type annotation for a column.
This is needed for Postgres to correctly infer the type so it will be able
to bind to correct parameter types. For example:
INSERT x (a, b) VALUES ($1, $2)
is compiled into something like:
WITH cte AS (VALUES ($1, $2))
INSERT x (a, b) SELECT * FROM cte
This function adds type casts into `cte`. | _try_inject_type_cast | python | geldata/gel | edb/pgsql/resolver/command.py | https://github.com/geldata/gel/blob/master/edb/pgsql/resolver/command.py | Apache-2.0 |
def eval(expr: pgast.BaseExpr, *, ctx: Context) -> Optional[pgast.BaseExpr]:
"""
Tries to statically evaluate expr, recursing into sub-expressions.
Returns None if that is not possible.
"""
return None | Tries to statically evaluate expr, recursing into sub-expressions.
Returns None if that is not possible. | eval | python | geldata/gel | edb/pgsql/resolver/static.py | https://github.com/geldata/gel/blob/master/edb/pgsql/resolver/static.py | Apache-2.0 |
def eval_list(
exprs: List[pgast.BaseExpr], *, ctx: Context
) -> Optional[List[pgast.BaseExpr]]:
"""
Tries to statically evaluate exprs, recursing into sub-expressions.
Returns None if that is not possible.
Raises DisableNormalization if param refs are encountered.
"""
res = []
for expr in exprs:
r = eval(expr, ctx=ctx)
if not r:
return None
res.append(r)
return res | Tries to statically evaluate exprs, recursing into sub-expressions.
Returns None if that is not possible.
Raises DisableNormalization if param refs are encountered. | eval_list | python | geldata/gel | edb/pgsql/resolver/static.py | https://github.com/geldata/gel/blob/master/edb/pgsql/resolver/static.py | Apache-2.0 |
def name_in_pg_catalog(name: Sequence[str]) -> Optional[str]:
"""
Strips `pg_catalog.` schema name from an SQL ident. Because pg_catalog is
always the first schema in search_path, every ident without schema name
defaults to is treaded
"""
if len(name) == 1 or name[0] == 'pg_catalog':
return name[-1]
return None | Strips `pg_catalog.` schema name from an SQL ident. Because pg_catalog is
always the first schema in search_path, every ident without schema name
defaults to is treaded | name_in_pg_catalog | python | geldata/gel | edb/pgsql/resolver/static.py | https://github.com/geldata/gel/blob/master/edb/pgsql/resolver/static.py | Apache-2.0 |
def cast_to_regclass(param: pgast.BaseExpr, ctx: Context) -> pgast.BaseExpr:
"""
Equivalent to `::regclass` in SQL.
Converts a string constant or a oid to a "registered class"
(fully-qualified name of the table/index/sequence).
In practice, type of resulting expression is oid.
"""
expr = eval(param, ctx=ctx)
res: pgast.BaseExpr
if isinstance(expr, pgast.NullConstant):
res = pgast.NullConstant()
elif isinstance(expr, pgast.StringConstant) and expr.val.isnumeric():
# We need to treat numeric string constants as numbers, apparently.
res = pgast.NumericConstant(val=expr.val)
elif isinstance(expr, pgast.StringConstant):
res = to_regclass(expr.val, ctx=ctx)
elif isinstance(expr, pgast.NumericConstant):
res = expr
else:
# This is a complex expression of unknown type.
# If we knew the type is numeric, we could lookup the internal oid by
# the public oid.
# But if the type if string, we'd have to implement to_regclass in SQL.
# The problem is that we don't know the type statically.
# So let's insert a runtime type check with an 'unsupported' message for
# strings.
param = dispatch.resolve(param, ctx=ctx)
res = pgast.CaseExpr(
args=[
pgast.CaseWhen(
expr=pgast.Expr(
lexpr=pgast.FuncCall(
name=('pg_typeof',),
args=[param]
),
name='IN',
rexpr=pgast.ImplicitRowExpr(
args=[
pgast.StringConstant(val='integer'),
pgast.StringConstant(val='smallint'),
pgast.StringConstant(val='bigint'),
pgast.StringConstant(val='oid'),
]
)
),
result=param
)
],
defresult=pgast.FuncCall(
name=(V('edgedb'), 'raise'),
args=[
pgast.NumericConstant(val='1'),
pgast.StringConstant(
val=pgerror.ERROR_FEATURE_NOT_SUPPORTED
),
pgast.StringConstant(val='cannot cast text to regclass'),
]
)
)
return pgast.TypeCast(
arg=res,
type_name=pgast.TypeName(name=('pg_catalog', 'regclass')),
) | Equivalent to `::regclass` in SQL.
Converts a string constant or a oid to a "registered class"
(fully-qualified name of the table/index/sequence).
In practice, type of resulting expression is oid. | cast_to_regclass | python | geldata/gel | edb/pgsql/resolver/static.py | https://github.com/geldata/gel/blob/master/edb/pgsql/resolver/static.py | Apache-2.0 |
def to_regclass(reg_class_name: str, ctx: Context) -> pgast.BaseExpr:
"""
Equivalent to `to_regclass(text reg_class_name)` in SQL.
Parses a string as an SQL identifier (with optional schema name and
database name) and returns an SQL expression that evaluates to the
"registered class" of that ident.
"""
from edb.pgsql.common import quote_literal as ql
try:
[stmt] = pgparser.parse(f'SELECT {reg_class_name}')
assert isinstance(stmt, pgast.SelectStmt)
[target] = stmt.target_list
assert isinstance(target.val, pgast.ColumnRef)
name = target.val.name
except Exception:
return pgast.NullConstant()
if len(name) < 2:
name = (ctx.options.search_path[0], name[0])
namespace, rel_name = name
assert isinstance(namespace, str)
assert isinstance(rel_name, str)
# A bit hacky to parse SQL here, but I don't want to construct pgast
[stmt] = pgparser.parse(
f'''
SELECT pc.oid
FROM {V('edgedbsql')}.pg_class pc
JOIN {V('edgedbsql')}.pg_namespace pn ON pn.oid = pc.relnamespace
WHERE {ql(namespace)} = pn.nspname AND pc.relname = {ql(rel_name)}
'''
)
assert isinstance(stmt, pgast.SelectStmt)
return pgast.SubLink(operator=None, expr=stmt) | Equivalent to `to_regclass(text reg_class_name)` in SQL.
Parses a string as an SQL identifier (with optional schema name and
database name) and returns an SQL expression that evaluates to the
"registered class" of that ident. | to_regclass | python | geldata/gel | edb/pgsql/resolver/static.py | https://github.com/geldata/gel/blob/master/edb/pgsql/resolver/static.py | Apache-2.0 |
def empty(
self,
) -> compiler.CompilerContextManager[ResolverContextLevel]:
"""Create a new empty context"""
return self.new(ContextSwitchMode.EMPTY) | Create a new empty context | empty | python | geldata/gel | edb/pgsql/resolver/context.py | https://github.com/geldata/gel/blob/master/edb/pgsql/resolver/context.py | Apache-2.0 |
def child(self) -> compiler.CompilerContextManager[ResolverContextLevel]:
"""Clone current context, prevent changes from leaking to parent"""
return self.new(ContextSwitchMode.CHILD) | Clone current context, prevent changes from leaking to parent | child | python | geldata/gel | edb/pgsql/resolver/context.py | https://github.com/geldata/gel/blob/master/edb/pgsql/resolver/context.py | Apache-2.0 |
def lateral(self) -> compiler.CompilerContextManager[ResolverContextLevel]:
"""Clone current context, prevent changes from leaking to parent"""
return self.new(ContextSwitchMode.LATERAL) | Clone current context, prevent changes from leaking to parent | lateral | python | geldata/gel | edb/pgsql/resolver/context.py | https://github.com/geldata/gel/blob/master/edb/pgsql/resolver/context.py | Apache-2.0 |
def must_fail(exc_type, exc_msg_re=None, **kwargs):
"""A decorator to ensure that the test fails with a specific exception.
If exc_msg_re is passed, assertRaisesRegex will be used to match the
exception message.
Example:
@must_fail(EdgeQLSyntaxError,
'non-default argument follows', line=2, col=61)
def test_edgeql_syntax_1(self):
...
"""
def wrap(func):
args = (exc_type,)
if exc_msg_re is not None:
args += (exc_msg_re,)
_set_spec(func, 'must_fail', (args, kwargs))
return func
return wrap | A decorator to ensure that the test fails with a specific exception.
If exc_msg_re is passed, assertRaisesRegex will be used to match the
exception message.
Example:
@must_fail(EdgeQLSyntaxError,
'non-default argument follows', line=2, col=61)
def test_edgeql_syntax_1(self):
... | must_fail | python | geldata/gel | edb/testbase/lang.py | https://github.com/geldata/gel/blob/master/edb/testbase/lang.py | Apache-2.0 |
def try_until_succeeds(
*,
ignore: Union[Type[Exception], Tuple[Type[Exception]]] | None = None,
ignore_regexp: str | None = None,
delay: float=0.5,
timeout: float=5
):
"""Retry a block of code a few times ignoring the specified errors.
Example:
async for tr in self.try_until_succeeds(
ignore=edgedb.AuthenticationError):
async with tr:
await edgedb.connect(...)
"""
if ignore is None and ignore_regexp is None:
raise ValueError('Expect at least one of ignore or ignore_regexp')
return retryloop.RetryLoop(
backoff=retryloop.const_backoff(delay),
timeout=timeout,
ignore=ignore,
ignore_regexp=ignore_regexp,
) | Retry a block of code a few times ignoring the specified errors.
Example:
async for tr in self.try_until_succeeds(
ignore=edgedb.AuthenticationError):
async with tr:
await edgedb.connect(...) | try_until_succeeds | python | geldata/gel | edb/testbase/server.py | https://github.com/geldata/gel/blob/master/edb/testbase/server.py | Apache-2.0 |
def try_until_fails(
*,
wait_for: Union[Type[Exception], Tuple[Type[Exception]]] | None = None,
wait_for_regexp: str | None = None,
delay: float=0.5,
timeout: float=5
):
"""Retry a block of code a few times until the specified error happens.
Example:
async for tr in self.try_until_fails(
wait_for=edgedb.AuthenticationError):
async with tr:
await edgedb.connect(...)
"""
if wait_for is None and wait_for_regexp is None:
raise ValueError(
'Expect at least one of wait_for or wait_for_regexp'
)
return retryloop.RetryLoop(
backoff=retryloop.const_backoff(delay),
timeout=timeout,
wait_for=wait_for,
wait_for_regexp=wait_for_regexp,
) | Retry a block of code a few times until the specified error happens.
Example:
async for tr in self.try_until_fails(
wait_for=edgedb.AuthenticationError):
async with tr:
await edgedb.connect(...) | try_until_fails | python | geldata/gel | edb/testbase/server.py | https://github.com/geldata/gel/blob/master/edb/testbase/server.py | Apache-2.0 |
async def assertRaisesRegexTx(self, exception, regex, msg=None, **kwargs):
"""A version of assertRaisesRegex with automatic transaction recovery
"""
with super().assertRaisesRegex(exception, regex, msg=msg, **kwargs):
try:
tx = self.con.transaction()
await tx.start()
yield
finally:
await tx.rollback() | A version of assertRaisesRegex with automatic transaction recovery | assertRaisesRegexTx | python | geldata/gel | edb/testbase/server.py | https://github.com/geldata/gel/blob/master/edb/testbase/server.py | Apache-2.0 |
def repl(self):
"""Open interactive EdgeQL REPL right in the test.
This is obviously only for debugging purposes. Just add
`self.repl()` at any point in your test.
"""
conargs = self.get_connect_args()
cmd = [
'python', '-m', 'edb.cli',
'--database', self.con.dbname,
'--user', conargs['user'],
'--tls-ca-file', conargs['tls_ca_file'],
]
env = os.environ.copy()
env['EDGEDB_HOST'] = conargs['host']
env['EDGEDB_PORT'] = str(conargs['port'])
if password := conargs.get('password'):
env['EDGEDB_PASSWORD'] = password
if secret_key := conargs.get('secret_key'):
env['EDGEDB_SECRET_KEY'] = secret_key
proc = subprocess.Popen(
cmd, stdin=sys.stdin, stdout=sys.stdout, env=env)
while proc.returncode is None:
try:
proc.wait()
except KeyboardInterrupt:
pass | Open interactive EdgeQL REPL right in the test.
This is obviously only for debugging purposes. Just add
`self.repl()` at any point in your test. | repl | python | geldata/gel | edb/testbase/server.py | https://github.com/geldata/gel/blob/master/edb/testbase/server.py | Apache-2.0 |
async def get_backend_sql_connection(cls, dbname=None):
"""Get a raw connection to the underlying SQL server, if possible
This is useful when we want to do things like querying the pg_catalog
of the underlying database.
"""
try:
import asyncpg
except ImportError:
raise unittest.SkipTest(
'SQL test skipped: asyncpg not installed')
pgdsn = cls.get_backend_sql_dsn(dbname=dbname)
return await asyncpg.connect(pgdsn) | Get a raw connection to the underlying SQL server, if possible
This is useful when we want to do things like querying the pg_catalog
of the underlying database. | get_backend_sql_connection | python | geldata/gel | edb/testbase/server.py | https://github.com/geldata/gel/blob/master/edb/testbase/server.py | Apache-2.0 |
def assert_shape(self, res: Any, rows: int, columns: int | List[str]):
"""
Fail if query result does not confront the specified shape, defined in
terms of:
- number of rows,
- number of columns (not checked if there are not rows)
- column names.
"""
self.assertEqual(len(res), rows)
if isinstance(columns, int):
if rows > 0:
self.assertEqual(len(res[0]), columns)
elif isinstance(columns, list):
self.assertListEqual(columns, list(res[0].keys())) | Fail if query result does not confront the specified shape, defined in
terms of:
- number of rows,
- number of columns (not checked if there are not rows)
- column names. | assert_shape | python | geldata/gel | edb/testbase/server.py | https://github.com/geldata/gel/blob/master/edb/testbase/server.py | Apache-2.0 |
def assert_shape(
self,
sqlres: Iterable[Any],
eqlres: Iterable[asyncpg.Record],
) -> None:
"""
Compare the shape of results produced by a SQL query and an EdgeQL
query.
"""
assert_data_shape.assert_data_shape(
list(sqlres),
[dataclasses.asdict(r) for r in eqlres],
self.fail,
from_sql=True,
) | Compare the shape of results produced by a SQL query and an EdgeQL
query. | assert_shape | python | geldata/gel | edb/testbase/server.py | https://github.com/geldata/gel/blob/master/edb/testbase/server.py | Apache-2.0 |
def single_link_subquery(
self,
source: str,
link: str,
target: str,
link_props: Optional[Iterable[str]] = None
) -> str:
"""Propduce a subquery fetching a single link as a record.
If no link properties are specified then the array of records will be
made up of target types.
If the link properties are specified then the array of records will be
made up of link records.
"""
if link_props:
return (
f'(SELECT x FROM "{target}"'
f' JOIN "{source}.{link}" x ON x.target = "{target}".id'
f' WHERE x.source = "{source}".id) AS _{link}'
)
else:
return (
f'(SELECT "{target}" FROM "{target}"'
f' WHERE "{target}".id = "{source}".{link}_id) AS {link}'
) | Propduce a subquery fetching a single link as a record.
If no link properties are specified then the array of records will be
made up of target types.
If the link properties are specified then the array of records will be
made up of link records. | single_link_subquery | python | geldata/gel | edb/testbase/server.py | https://github.com/geldata/gel/blob/master/edb/testbase/server.py | Apache-2.0 |
def multi_link_subquery(
self,
source: str,
link: str,
target: str,
link_props: Optional[Iterable[str]] = None
) -> str:
"""Propduce a subquery fetching a multi link as an array or records.
If no link properties are specified then the array of records will be
made up of target types.
If the link properties are specified then the array of records will be
made up of link records.
"""
if link_props:
return (
f'(SELECT array_agg(x) FROM "{target}"'
f' JOIN "{source}.{link}" x ON x.target = "{target}".id'
f' WHERE x.source = "{source}".id) AS _{link}'
)
else:
return (
f'(SELECT array_agg("{target}") FROM "{target}"'
f' JOIN "{source}.{link}" x ON x.target = "{target}".id'
f' WHERE x.source = "{source}".id) AS {link}'
) | Propduce a subquery fetching a multi link as an array or records.
If no link properties are specified then the array of records will be
made up of target types.
If the link properties are specified then the array of records will be
made up of link records. | multi_link_subquery | python | geldata/gel | edb/testbase/server.py | https://github.com/geldata/gel/blob/master/edb/testbase/server.py | Apache-2.0 |
def max_colors():
"""Max colors current terminal supports.
:returns: Integer. For instance, for 'xterm' it is usually 256
.. note:: Uses :mod:`curses`
"""
global _COLORS
if _COLORS is None:
try:
import curses
try:
curses.setupterm()
_COLORS = curses.tigetnum('colors')
except (OSError, curses.error):
pass
except ImportError:
pass
if _COLORS is None:
_COLORS = 1
return _COLORS | Max colors current terminal supports.
:returns: Integer. For instance, for 'xterm' it is usually 256
.. note:: Uses :mod:`curses` | max_colors | python | geldata/gel | edb/common/term.py | https://github.com/geldata/gel/blob/master/edb/common/term.py | Apache-2.0 |
def supports_colors(fileno):
"""Check if ``fileno`` file-descriptor supports colored output.
:params int fileno: file-descriptor
:returns: bool
"""
return (
isatty(fileno) and os.getenv('TERM') != 'dumb' and
os.getenv('ANSI_COLORS_DISABLED') is None) | Check if ``fileno`` file-descriptor supports colored output.
:params int fileno: file-descriptor
:returns: bool | supports_colors | python | geldata/gel | edb/common/term.py | https://github.com/geldata/gel/blob/master/edb/common/term.py | Apache-2.0 |
def size(fileno):
"""Current terminal height and width (lines and columns).
:params int fileno: file-descriptor
:returns: Tuple of two integers - lines and columns respectively.
``(None, None)`` if ``fileno`` is not a terminal
"""
if not isatty(fileno):
return None, None
try:
size = struct.unpack(
'2h', fcntl.ioctl(fileno, termios.TIOCGWINSZ, ' '))
except Exception:
size = (os.getenv('LINES', 25), os.getenv('COLUMNS', 80))
return size | Current terminal height and width (lines and columns).
:params int fileno: file-descriptor
:returns: Tuple of two integers - lines and columns respectively.
``(None, None)`` if ``fileno`` is not a terminal | size | python | geldata/gel | edb/common/term.py | https://github.com/geldata/gel/blob/master/edb/common/term.py | Apache-2.0 |
def use_colors(fileno=None):
"""Check on whether use colored output or not.
Checks ``shell.MainCommand.colorize`` config setting and
``fileno`` for being capable of displaying colors.
:param int fileno: File-descriptor. If ``None``, checks on ``sys.stdout``
:returns bool: Whether you can or can not use color terminal output
"""
if _colorize == 'on':
return True
if _colorize == 'off':
return False
assert _colorize == 'auto'
if fileno is None:
try:
fileno = sys.stdout.fileno()
except OSError:
return False
return supports_colors(fileno) | Check on whether use colored output or not.
Checks ``shell.MainCommand.colorize`` config setting and
``fileno`` for being capable of displaying colors.
:param int fileno: File-descriptor. If ``None``, checks on ``sys.stdout``
:returns bool: Whether you can or can not use color terminal output | use_colors | python | geldata/gel | edb/common/term.py | https://github.com/geldata/gel/blob/master/edb/common/term.py | Apache-2.0 |
def apply(self, str):
"""Apply ANSI escape sequences to :param:str.
If the result can be printed to a terminal that supports styling.
"""
return self._term_prefix + str + self._term_postfix | Apply ANSI escape sequences to :param:str.
If the result can be printed to a terminal that supports styling. | apply | python | geldata/gel | edb/common/term.py | https://github.com/geldata/gel/blob/master/edb/common/term.py | Apache-2.0 |
async def deferred_shield(arg: Awaitable[_T]) -> _T:
'''Wait for a future, deferring cancellation until it is complete.
If you do
await deferred_shield(something())
it is approximately equivalent to
await something()
except that if the coroutine containing it is cancelled,
something() is protected from cancellation, and *additionally*
CancelledError is not raised in the caller until something()
completes.
This can be useful if something() contains something that
shouldn't be interrupted but also can't be safely left running
asynchronously.
'''
task = asyncio.ensure_future(arg)
ex = None
while not task.done():
try:
await asyncio.shield(task)
except asyncio.CancelledError as cex:
if ex is not None:
cex.__context__ = ex
ex = cex
except Exception:
if ex:
raise ex from None
raise
if ex:
raise ex
return task.result() | Wait for a future, deferring cancellation until it is complete.
If you do
await deferred_shield(something())
it is approximately equivalent to
await something()
except that if the coroutine containing it is cancelled,
something() is protected from cancellation, and *additionally*
CancelledError is not raised in the caller until something()
completes.
This can be useful if something() contains something that
shouldn't be interrupted but also can't be safely left running
asynchronously. | deferred_shield | python | geldata/gel | edb/common/asyncutil.py | https://github.com/geldata/gel/blob/master/edb/common/asyncutil.py | Apache-2.0 |
async def debounce(
input: Callable[[], Awaitable[_T]],
output: Callable[[list[_T]], Awaitable[None]],
*,
max_wait: float,
delay_amt: float,
max_batch_size: int,
) -> None:
'''Debounce and batch async events.
Loops forever unless an operation fails, so should probably be run
from a task.
The basic algorithm is that if an event comes in less than
`delay_amt` since the previous one, then instead of sending it
immediately, we wait an additional `delay_amt` from then. If we are
already waiting, any message also extends the wait, up to
`max_wait`.
Also, cap the maximum batch size to `max_batch_size`.
'''
# I think the algorithm reads more clearly with the params
# capitalized as constants, though we don't want them like that in
# the argument list, so reassign them.
MAX_WAIT, DELAY_AMT, MAX_BATCH_SIZE = max_wait, delay_amt, max_batch_size
loop = asyncio.get_running_loop()
batch = []
last_signal = -MAX_WAIT
target_time = None
while True:
try:
if target_time is None:
v = await input()
else:
async with asyncio.timeout_at(target_time):
v = await input()
except TimeoutError:
t = loop.time()
else:
batch.append(v)
t = loop.time()
# If we aren't current waiting, and we got a
# notification recently, arrange to wait some before
# sending it.
if (
target_time is None
and t - last_signal < DELAY_AMT
):
target_time = t + DELAY_AMT
# If we were already waiting, wait a little longer, though
# not longer than MAX_WAIT.
elif (
target_time is not None
):
target_time = min(
max(t + DELAY_AMT, target_time),
last_signal + MAX_WAIT,
)
# Skip sending the event if we need to wait longer.
if (
target_time is not None
and t < target_time
and len(batch) < MAX_BATCH_SIZE
):
continue
await output(batch)
batch = []
last_signal = t
target_time = None | Debounce and batch async events.
Loops forever unless an operation fails, so should probably be run
from a task.
The basic algorithm is that if an event comes in less than
`delay_amt` since the previous one, then instead of sending it
immediately, we wait an additional `delay_amt` from then. If we are
already waiting, any message also extends the wait, up to
`max_wait`.
Also, cap the maximum batch size to `max_batch_size`. | debounce | python | geldata/gel | edb/common/asyncutil.py | https://github.com/geldata/gel/blob/master/edb/common/asyncutil.py | Apache-2.0 |
def schedule(self) -> None:
"""Schedule to run the task as soon as possible.
If already scheduled, nothing happens; it won't queue up.
If the task is already running, it will be scheduled to run again as
soon as the running task is done.
"""
if not self._stop_requested:
self._scheduled = True
if self._task is None:
self._task = asyncio.create_task(self._run()) | Schedule to run the task as soon as possible.
If already scheduled, nothing happens; it won't queue up.
If the task is already running, it will be scheduled to run again as
soon as the running task is done. | schedule | python | geldata/gel | edb/common/asyncutil.py | https://github.com/geldata/gel/blob/master/edb/common/asyncutil.py | Apache-2.0 |
async def stop(self) -> None:
"""Cancel scheduled task and wait for the running one to finish.
After an ExclusiveTask is stopped, no more new schedules are allowed.
Note: "cancel scheduled task" only means setting self._scheduled to
False; if an asyncio task is scheduled, stop() will still wait for it.
"""
self._scheduled = False
self._stop_requested = True
if self._task is not None:
await self._task | Cancel scheduled task and wait for the running one to finish.
After an ExclusiveTask is stopped, no more new schedules are allowed.
Note: "cancel scheduled task" only means setting self._scheduled to
False; if an asyncio task is scheduled, stop() will still wait for it. | stop | python | geldata/gel | edb/common/asyncutil.py | https://github.com/geldata/gel/blob/master/edb/common/asyncutil.py | Apache-2.0 |
def exclusive_task(
handler: HandlerFunction | HandlerMethod | None = None,
*,
slot: str | None = None,
) -> ExclusiveTask | ExclusiveTaskProperty | ExclusiveTaskDecorator:
"""Convert an async function into an ExclusiveTask.
This decorator can be applied to either top-level functions or methods
in a class. In the latter case, the exclusiveness is bound to each object
of the owning class. If the owning class defines __slots__, you must also
define an extra slot to store the exclusive state and tell exclusive_task()
by providing the `slot` argument.
"""
if handler is None:
def decorator(
handler: HandlerFunction | HandlerMethod,
) -> ExclusiveTask | ExclusiveTaskProperty:
return _exclusive_task(handler, slot=slot)
return decorator
return _exclusive_task(handler, slot=slot) | Convert an async function into an ExclusiveTask.
This decorator can be applied to either top-level functions or methods
in a class. In the latter case, the exclusiveness is bound to each object
of the owning class. If the owning class defines __slots__, you must also
define an extra slot to store the exclusive state and tell exclusive_task()
by providing the `slot` argument. | exclusive_task | python | geldata/gel | edb/common/asyncutil.py | https://github.com/geldata/gel/blob/master/edb/common/asyncutil.py | Apache-2.0 |
def lru_method_cache(size: int | None=128) -> Callable[[Tf], Tf]:
"""A version of lru_cache for methods that shouldn't leak memory.
Basically the idea is that we generate a per-object lru-cached
partially applied method.
Since pickling an lru_cache of a lambda or a functools.partial
doesn't work, we wrap it in a _NoPickle object that doesn't pickle
its contents.
"""
def transformer(f: Tf) -> Tf:
key = f'__{f.__name__}_cached'
def func(self, *args, **kwargs):
_m = getattr(self, key, None)
if not _m:
_m = _NoPickle(
functools.lru_cache(size)(functools.partial(f, self))
)
setattr(self, key, _m)
return _m.obj(*args, **kwargs)
return func # type: ignore
return transformer | A version of lru_cache for methods that shouldn't leak memory.
Basically the idea is that we generate a per-object lru-cached
partially applied method.
Since pickling an lru_cache of a lambda or a functools.partial
doesn't work, we wrap it in a _NoPickle object that doesn't pickle
its contents. | lru_method_cache | python | geldata/gel | edb/common/lru.py | https://github.com/geldata/gel/blob/master/edb/common/lru.py | Apache-2.0 |
def distance(s: str, t: str) -> int:
"""Calculates Levenshtein distance between s and t."""
m, n = len(s), len(t)
if m > n:
s, t = t, s
m, n = n, m
ri = list(range(m + 1))
for i in range(1, n + 1):
ri_1, ri = ri, [i] + [0] * m
for j in range(1, m + 1):
ri[j] = min(ri_1[j] + 1,
ri[j - 1] + 1,
ri_1[j - 1] + int(s[j - 1] != t[i - 1]))
return ri[m] | Calculates Levenshtein distance between s and t. | distance | python | geldata/gel | edb/common/levenshtein.py | https://github.com/geldata/gel/blob/master/edb/common/levenshtein.py | Apache-2.0 |
def _check_type(cls, value: Any) -> T:
"""Ensure `value` is of type T and return it."""
if not isinstance(value, cls.type):
raise ValueError(
f"{cls!r} accepts only values of type {cls.type!r}, "
f"got {type(value)!r}"
)
return cast(T, value) | Ensure `value` is of type T and return it. | _check_type | python | geldata/gel | edb/common/checked.py | https://github.com/geldata/gel/blob/master/edb/common/checked.py | Apache-2.0 |
def _check_type(cls, value: Any) -> T:
"""Ensure `value` is of type T and return it."""
if not isinstance(value, cls.type):
raise ValueError(
f"{cls!r} accepts only values of type {cls.type!r}, "
f"got {type(value)!r}"
)
return cast(T, value) | Ensure `value` is of type T and return it. | _check_type | python | geldata/gel | edb/common/checked.py | https://github.com/geldata/gel/blob/master/edb/common/checked.py | Apache-2.0 |
def _check_key_type(cls, key: Any) -> K:
"""Ensure `key` is of type K and return it."""
if not isinstance(key, cls.keytype):
raise KeyError(
f"{cls!r} accepts only keys of type {cls.keytype!r}, "
f"got {type(key)!r}"
)
return cast(K, key) | Ensure `key` is of type K and return it. | _check_key_type | python | geldata/gel | edb/common/checked.py | https://github.com/geldata/gel/blob/master/edb/common/checked.py | Apache-2.0 |
def _check_value_type(cls, value: Any) -> V:
"""Ensure `value` is of type V and return it."""
if not isinstance(value, cls.valuetype):
raise ValueError(
f"{cls!r} accepts only values of type "
"{cls.valuetype!r}, got {type(value)!r}"
)
return cast(V, value) | Ensure `value` is of type V and return it. | _check_value_type | python | geldata/gel | edb/common/checked.py | https://github.com/geldata/gel/blob/master/edb/common/checked.py | Apache-2.0 |
def uuid1mc() -> uuid.UUID:
"""Generate a v1 UUID using a pseudo-random multicast node address."""
# Note: cannot use pgproto.UUID since it's UUID v1
node = int.from_bytes(os.urandom(6), byteorder='little') | (1 << 40)
return UUID(uuid.uuid1(node=node).bytes) | Generate a v1 UUID using a pseudo-random multicast node address. | uuid1mc | python | geldata/gel | edb/common/uuidgen.py | https://github.com/geldata/gel/blob/master/edb/common/uuidgen.py | Apache-2.0 |
def uuid4() -> uuid.UUID:
"""Generate a random UUID."""
return UUID(uuid.uuid4().bytes) | Generate a random UUID. | uuid4 | python | geldata/gel | edb/common/uuidgen.py | https://github.com/geldata/gel/blob/master/edb/common/uuidgen.py | Apache-2.0 |
def uuid5_bytes(namespace: uuid.UUID, name: bytes | bytearray) -> uuid.UUID:
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
# Do the hashing ourselves because the stdlib version only supports str
hasher = hashlib.sha1(namespace.bytes)
hasher.update(name)
return UUID(uuid.UUID(bytes=hasher.digest()[:16], version=5).bytes) | Generate a UUID from the SHA-1 hash of a namespace UUID and a name. | uuid5_bytes | python | geldata/gel | edb/common/uuidgen.py | https://github.com/geldata/gel/blob/master/edb/common/uuidgen.py | Apache-2.0 |
def uuid5(namespace: uuid.UUID, name: str) -> uuid.UUID:
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
return uuid5_bytes(namespace, name.encode("utf-8")) | Generate a UUID from the SHA-1 hash of a namespace UUID and a name. | uuid5 | python | geldata/gel | edb/common/uuidgen.py | https://github.com/geldata/gel/blob/master/edb/common/uuidgen.py | Apache-2.0 |
def rgb_to_xyz(r, g, b):
"""Converts RGB color to XYZ
:param float r: Red value in ``0..1`` range
:param float g: Green value in ``0..1`` range
:param float b: Blue value in ``0..1`` range
:returns: ``(x, y, z)``, all values normalized to
the ``(0..1, 0..1, 0..1)`` range
"""
# Formulae from http://www.easyrgb.com/index.php?X=MATH
if r > 0.04045:
r = ((r + 0.055) / 1.055) ** 2.4
else:
r /= 12.92
if g > 0.04045:
g = ((g + 0.055) / 1.055) ** 2.4
else:
g /= 12.92
if b > 0.04045:
b = ((b + 0.055) / 1.055) ** 2.4
else:
b /= 12.92
r *= 100.0
g *= 100.0
b *= 100.0
x = min((r * 0.4124 + g * 0.3576 + b * 0.1805) / XYZ_MAX_X, 1.0)
y = min((r * 0.2126 + g * 0.7152 + b * 0.0722) / XYZ_MAX_Y, 1.0)
z = min((r * 0.0193 + g * 0.1192 + b * 0.9505) / XYZ_MAX_Z, 1.0)
return (x, y, z) | Converts RGB color to XYZ
:param float r: Red value in ``0..1`` range
:param float g: Green value in ``0..1`` range
:param float b: Blue value in ``0..1`` range
:returns: ``(x, y, z)``, all values normalized to
the ``(0..1, 0..1, 0..1)`` range | rgb_to_xyz | python | geldata/gel | edb/common/colorsys.py | https://github.com/geldata/gel/blob/master/edb/common/colorsys.py | Apache-2.0 |
def xyz_to_lab(x, y, z):
"""Converts XYZ color to LAB
:param float x: Value from ``0..1``
:param float y: Value from ``0..1``
:param float z: Value from ``0..1``
:returns: ``(L, a, b)``, values in
range ``(0..100, -127..128, -127..128)``
"""
# Formulae from http://www.easyrgb.com/index.php?X=MATH
if x > 0.008856:
x **= _1_3
else:
x = (7.787 * x) + _16_116
if y > 0.008856:
y **= _1_3
else:
y = (7.787 * y) + _16_116
if z > 0.008856:
z **= _1_3
else:
z = (7.787 * z) + _16_116
lum = 116.0 * y - 16.0
a = 500 * (x - y)
b = 200 * (y - z)
return (lum, a, b) | Converts XYZ color to LAB
:param float x: Value from ``0..1``
:param float y: Value from ``0..1``
:param float z: Value from ``0..1``
:returns: ``(L, a, b)``, values in
range ``(0..100, -127..128, -127..128)`` | xyz_to_lab | python | geldata/gel | edb/common/colorsys.py | https://github.com/geldata/gel/blob/master/edb/common/colorsys.py | Apache-2.0 |
def rgb_distance(r1, g1, b1, r2, g2, b2):
"""Calculates numerical distance between two colors in RGB color space.
The distance is calculated by CIE94 formula.
:params: Two colors with ``r, g, b`` values in ``0..1`` range
:returns: A number in ``0..100`` range. The lesser - the
closer colors are.
"""
# Formulae from wikipedia article re CIE94
L1, A1, B1 = xyz_to_lab(*rgb_to_xyz(r1, b1, g1))
L2, A2, B2 = xyz_to_lab(*rgb_to_xyz(r2, b2, g2))
dL = L1 - L2
C1 = _sqrt(A1 * A1 + B1 * B1)
C2 = _sqrt(A2 * A2 + B2 * B2)
dCab = C1 - C2
dA = A1 - A2
dB = B1 - B2
dEab = _sqrt(dL ** 2 + dA ** 2 + dB ** 2)
dHab = _sqrt(max(dEab ** 2 - dL ** 2 - dCab ** 2, 0.0))
dE = _sqrt((dL ** 2) + ((dCab / (1 + 0.045 * C1)) ** 2) + (
dHab / (1 + 0.015 * C1)) ** 2)
return dE | Calculates numerical distance between two colors in RGB color space.
The distance is calculated by CIE94 formula.
:params: Two colors with ``r, g, b`` values in ``0..1`` range
:returns: A number in ``0..100`` range. The lesser - the
closer colors are. | rgb_distance | python | geldata/gel | edb/common/colorsys.py | https://github.com/geldata/gel/blob/master/edb/common/colorsys.py | Apache-2.0 |
def wrap_function_to_infer_spans(func):
"""Provide automatic span for Nonterm production rules."""
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
obj, *args = args
if len(args) == 1:
# apparently it's a production rule that just returns its
# only arg, so don't need to change the context
#
arg = args[0]
if getattr(arg, 'val', None) is obj.val:
if hasattr(arg, 'span'):
obj.span = arg.span
if hasattr(obj.val, 'span'):
obj.val.span = obj.span
return result
# Avoid mangling existing span
if getattr(obj, 'span', None) is None:
obj.span = get_span(*args)
# we have the span for the nonterminal, but now we need to
# enforce span in the obj.val, recursively, in case it was
# a complex production with nested AST nodes
infer_span_from_children(obj.val, obj.span)
return result
return wrapper | Provide automatic span for Nonterm production rules. | wrap_function_to_infer_spans | python | geldata/gel | edb/common/span.py | https://github.com/geldata/gel/blob/master/edb/common/span.py | Apache-2.0 |
def __init__(
self,
type_: Type[T],
default: Union[T, NoDefaultT] = NoDefault,
*,
coerce: bool = False,
str_formatter: Callable[[T], str] = str,
repr_formatter: Callable[[T], str] = repr,
frozen: bool = False,
) -> None:
"""
:param type:
The type of the value in the field.
:param default:
Default field value. If not specified, the field would
be considered required and a failure to specify its
value when initializing a ``Struct`` will raise
:exc:`TypeError`. `default` can be a callable taking
no arguments.
:param bool coerce:
If set to ``True`` - coerce field's value to its type.
"""
self.type = type_
self.default = default
self.coerce = coerce
self.frozen = frozen
self.formatters = {'str': str_formatter, 'repr': repr_formatter} | :param type:
The type of the value in the field.
:param default:
Default field value. If not specified, the field would
be considered required and a failure to specify its
value when initializing a ``Struct`` will raise
:exc:`TypeError`. `default` can be a callable taking
no arguments.
:param bool coerce:
If set to ``True`` - coerce field's value to its type. | __init__ | python | geldata/gel | edb/common/struct.py | https://github.com/geldata/gel/blob/master/edb/common/struct.py | Apache-2.0 |
def __init__(self, **kwargs: Any) -> None:
"""
:raises: TypeError if invalid field value was provided or a value was
not provided for a field without a default value.
"""
self._check_init_argnames(kwargs)
self._init_fields(kwargs) | :raises: TypeError if invalid field value was provided or a value was
not provided for a field without a default value. | __init__ | python | geldata/gel | edb/common/struct.py | https://github.com/geldata/gel/blob/master/edb/common/struct.py | Apache-2.0 |
def update(self, *args: Any, **kwargs: Any) -> None:
"""Update the field values."""
values: Dict[str, Any] = {}
values.update(*args, **kwargs)
self._check_init_argnames(values)
for k, v in values.items():
setattr(self, k, v) | Update the field values. | update | python | geldata/gel | edb/common/struct.py | https://github.com/geldata/gel/blob/master/edb/common/struct.py | Apache-2.0 |
def setdefaults(self) -> List[str]:
"""Initialize unset fields with default values."""
fields_set = []
for field_name, field in self.__class__._fields.items():
value = getattr(self, field_name)
if value is None and field.default is not None:
value = self._getdefault(field_name, field)
self.set_default_value(field_name, value)
fields_set.append(field_name)
return fields_set | Initialize unset fields with default values. | setdefaults | python | geldata/gel | edb/common/struct.py | https://github.com/geldata/gel/blob/master/edb/common/struct.py | Apache-2.0 |
def formatfields(
self,
formatter: str = 'str',
) -> Iterator[Tuple[str, str]]:
"""Return an iterator over fields formatted using `formatter`."""
for name, field in self.__class__._fields.items():
formatter_obj = field.formatters.get(formatter)
if formatter_obj:
yield (name, formatter_obj(getattr(self, name))) | Return an iterator over fields formatted using `formatter`. | formatfields | python | geldata/gel | edb/common/struct.py | https://github.com/geldata/gel/blob/master/edb/common/struct.py | Apache-2.0 |
def __init__(self, **kwargs: Any) -> None:
"""
:raises: TypeError if invalid field value was provided or a value was
not provided for a field without a default value.
"""
self._check_init_argnames(kwargs)
self._in_init_ = True
try:
self._init_fields(kwargs)
finally:
self._in_init_ = False | :raises: TypeError if invalid field value was provided or a value was
not provided for a field without a default value. | __init__ | python | geldata/gel | edb/common/struct.py | https://github.com/geldata/gel/blob/master/edb/common/struct.py | Apache-2.0 |
def _init_parametric_base(cls) -> None:
"""Initialize a direct subclass of ParametricType"""
# Direct subclasses of ParametricType must declare
# ClassVar attributes corresponding to the Generic type vars.
# For example:
# class P(ParametricType, Generic[T, V]):
# t: ClassVar[Type[T]]
# v: ClassVar[Type[V]]
params = getattr(cls, '__parameters__', None)
if not params:
raise TypeError(
f'{cls} must be declared as Generic'
)
mod = sys.modules[cls.__module__]
annos = get_type_hints(cls, mod.__dict__)
param_map = {}
for attr, t in annos.items():
if not typing_inspect.is_classvar(t):
continue
args = typing_inspect.get_args(t)
# ClassVar constructor should have the check, but be extra safe.
assert len(args) == 1
arg = args[0]
if typing_inspect.get_origin(arg) is not type:
continue
arg_args = typing_inspect.get_args(arg)
# Likewise, rely on Type checking its stuff in the constructor
assert len(arg_args) == 1
if not typing_inspect.is_typevar(arg_args[0]):
continue
if arg_args[0] in params:
param_map[arg_args[0]] = attr
for param in params:
if param not in param_map:
raise TypeError(
f'{cls.__name__}: missing ClassVar for'
f' generic parameter {param}'
)
cls._type_param_map = param_map | Initialize a direct subclass of ParametricType | _init_parametric_base | python | geldata/gel | edb/common/parametric.py | https://github.com/geldata/gel/blob/master/edb/common/parametric.py | Apache-2.0 |
def _init_parametric_user(cls) -> None:
"""Initialize an indirect descendant of ParametricType."""
# For ParametricType grandchildren we have to deal with possible
# TypeVar remapping and generally check for type sanity.
ob = getattr(cls, '__orig_bases__', ())
generic_params: list[type] = []
for b in ob:
if (
isinstance(b, type)
and not isinstance(b, GenericAlias)
and issubclass(b, ParametricType)
and b is not ParametricType
):
raise TypeError(
f'{cls.__name__}: missing one or more type arguments for'
f' base {b.__name__!r}'
)
if not typing_inspect.is_generic_type(b):
continue
org = typing_inspect.get_origin(b)
if not isinstance(org, type):
continue
if not issubclass(org, ParametricType):
generic_params.extend(getattr(b, '__parameters__', ()))
continue
base_params = getattr(org, '__parameters__', ())
base_non_type_params = getattr(org, '_non_type_params', {})
args = typing_inspect.get_args(b)
expected = len(base_params)
if len(args) != expected:
raise TypeError(
f'{b.__name__} expects {expected} type arguments'
f' got {len(args)}'
)
base_map = dict(cls._type_param_map)
subclass_map = {}
for i, arg in enumerate(args):
if i in base_non_type_params:
continue
if not typing_inspect.is_typevar(arg):
raise TypeError(
f'{b.__name__} expects all arguments to be'
f' TypeVars'
)
base_typevar = base_params[i]
attr = base_map.get(base_typevar)
if attr is not None:
subclass_map[arg] = attr
if len(subclass_map) != len(base_map):
raise TypeError(
f'{cls.__name__}: missing one or more type arguments for'
f' base {org.__name__!r}'
)
cls._type_param_map = subclass_map
cls._non_type_params = {
i: p for i, p in enumerate(generic_params)
if p not in cls._type_param_map
} | Initialize an indirect descendant of ParametricType. | _init_parametric_user | python | geldata/gel | edb/common/parametric.py | https://github.com/geldata/gel/blob/master/edb/common/parametric.py | Apache-2.0 |
def __class_getitem__(
cls, params: Union[Union[type, str], Tuple[Union[type, str], ...]]
) -> Type[ParametricType]:
"""Return a dynamic subclass parametrized with `params`.
We cannot use `_GenericAlias` provided by `Generic[T]` because the
default `__class_getitem__` on `_GenericAlias` is not a real type and
so it doesn't retain information on generics on the class. Even on
the object, it adds the relevant `__orig_class__` link too late, after
`__init__()` is called. That means we wouldn't be able to type-check
in the initializer using built-in `Generic[T]`.
"""
if cls.types is not None:
raise TypeError(f"{cls!r} is already parametrized")
if not isinstance(params, tuple):
params = (params,)
all_params = params
type_params = []
for i, param in enumerate(all_params):
if i not in cls._non_type_params:
type_params.append(param)
params_str = ", ".join(_type_repr(a) for a in all_params)
name = f"{cls.__name__}[{params_str}]"
bases = (cls,)
type_dict: Dict[str, Any] = {
"types": tuple(type_params),
"orig_args": all_params,
"__module__": cls.__module__,
}
forward_refs: Dict[str, Tuple[int, str]] = {}
tuple_to_attr: Dict[int, str] = {}
if cls._type_param_map:
gen_params = getattr(cls, '__parameters__', ())
for i, gen_param in enumerate(gen_params):
attr = cls._type_param_map.get(gen_param)
if attr:
tuple_to_attr[i] = attr
expected = len(gen_params)
actual = len(params)
if expected != actual:
raise TypeError(
f"type {cls.__name__!r} expects {expected} type"
f" parameter{'s' if expected != 1 else ''},"
f" got {actual}"
)
for i, attr in tuple_to_attr.items():
type_dict[attr] = all_params[i]
if not all(isinstance(param, type) for param in type_params):
if all(
type(param) is TypeVar # type: ignore[comparison-overlap]
for param in type_params
):
# All parameters are type variables: return the regular generic
# alias to allow proper subclassing.
generic = super(ParametricType, cls)
return generic.__class_getitem__(all_params) # type: ignore
else:
forward_refs = {
param: (i, tuple_to_attr[i])
for i, param in enumerate(type_params)
if isinstance(param, str)
}
if not forward_refs:
raise TypeError(
f"{cls!r} expects types as type parameters")
result = type(name, bases, type_dict)
assert issubclass(result, ParametricType)
result._forward_refs = forward_refs
return result | Return a dynamic subclass parametrized with `params`.
We cannot use `_GenericAlias` provided by `Generic[T]` because the
default `__class_getitem__` on `_GenericAlias` is not a real type and
so it doesn't retain information on generics on the class. Even on
the object, it adds the relevant `__orig_class__` link too late, after
`__init__()` is called. That means we wouldn't be able to type-check
in the initializer using built-in `Generic[T]`. | __class_getitem__ | python | geldata/gel | edb/common/parametric.py | https://github.com/geldata/gel/blob/master/edb/common/parametric.py | Apache-2.0 |
def calc_buckets(
start: float, upper_bound: float, /, *, increment_ratio: float = 1.20
) -> tuple[float, ...]:
"""Calculate histogram buckets on a logarithmic scale."""
# See https://amplitude.com/blog/2014/08/06/optimal-streaming-histograms
# for more details.
# (Says a long standing comment, but this isn't what that post recommends!)
result: list[float] = []
while start <= upper_bound:
result.append(start)
start *= increment_ratio
return tuple(result) | Calculate histogram buckets on a logarithmic scale. | calc_buckets | python | geldata/gel | edb/common/prometheus.py | https://github.com/geldata/gel/blob/master/edb/common/prometheus.py | Apache-2.0 |
def _into_list_of_frames(a_list: StackSummaryLike):
"""
Create a StackSummary object from a supplied list of
FrameSummary objects or old-style list of tuples.
"""
# While doing a fast-path check for isinstance(a_list, StackSummary) is
# appealing, idlelib.run.cleanup_traceback and other similar code may
# break this by making arbitrary frames plain tuples, so we need to
# check on a frame by frame basis.
result = []
for frame in a_list:
if isinstance(frame, traceback.FrameSummary):
result.append(frame)
else:
filename, lineno, name, line = frame
result.append(
traceback.FrameSummary(filename, lineno, name, line=line)
)
return result | Create a StackSummary object from a supplied list of
FrameSummary objects or old-style list of tuples. | _into_list_of_frames | python | geldata/gel | edb/common/traceback.py | https://github.com/geldata/gel/blob/master/edb/common/traceback.py | Apache-2.0 |
def _format_stack_summary(stack: typing.List[traceback.FrameSummary]):
"""Format the stack ready for printing.
Returns a list of strings ready for printing. Each string in the
resulting list corresponds to a single frame from the stack.
Each string ends in a newline; the strings may contain internal
newlines as well, for those items with source text lines.
For long sequences of the same frame and line, the first few
repetitions are shown, followed by a summary line stating the exact
number of further repetitions.
"""
result = []
last_file = None
last_line = None
last_name = None
count = 0
for frame_summary in stack:
formatted_frame = _format_frame_summary(frame_summary)
if formatted_frame is None:
continue
if (
last_file is None
or last_file != frame_summary.filename
or last_line is None
or last_line != frame_summary.lineno
or last_name is None
or last_name != frame_summary.name
):
if count > traceback._RECURSIVE_CUTOFF:
count -= traceback._RECURSIVE_CUTOFF
result.append(
f' [Previous line repeated {count} more '
f'time{"s" if count > 1 else ""}]\n'
)
last_file = frame_summary.filename
last_line = frame_summary.lineno
last_name = frame_summary.name
count = 0
count += 1
if count > traceback._RECURSIVE_CUTOFF:
continue
result.append(formatted_frame)
if count > traceback._RECURSIVE_CUTOFF:
count -= traceback._RECURSIVE_CUTOFF
result.append(
f' [Previous line repeated {count} more '
f'time{"s" if count > 1 else ""}]\n'
)
return result | Format the stack ready for printing.
Returns a list of strings ready for printing. Each string in the
resulting list corresponds to a single frame from the stack.
Each string ends in a newline; the strings may contain internal
newlines as well, for those items with source text lines.
For long sequences of the same frame and line, the first few
repetitions are shown, followed by a summary line stating the exact
number of further repetitions. | _format_stack_summary | python | geldata/gel | edb/common/traceback.py | https://github.com/geldata/gel/blob/master/edb/common/traceback.py | Apache-2.0 |
def _format_frame_summary(frame: traceback.FrameSummary):
"""Format the lines for a single FrameSummary.
Returns a string representing one frame involved in the stack. This
gets called for every frame to be printed in the stack summary.
"""
row = [f' {frame.filename}:{frame.lineno}, in {frame.name}\n']
if frame.line:
stripped_line = frame.line.strip()
row.append(' {}\n'.format(stripped_line))
orig_line_len = len(frame._original_line)
frame_line_len = len(frame.line.lstrip())
stripped_characters = orig_line_len - frame_line_len
if frame.colno is not None and frame.end_colno is not None:
start_offset = (
traceback._byte_offset_to_character_offset(
frame._original_line, frame.colno
)
+ 1
)
end_offset = (
traceback._byte_offset_to_character_offset(
frame._original_line, frame.end_colno
)
+ 1
)
anchors = None
if frame.lineno == frame.end_lineno:
with suppress(Exception):
anchors = (
traceback._extract_caret_anchors_from_line_segment(
frame._original_line[
start_offset - 1 : end_offset - 1
]
)
)
else:
end_offset = stripped_characters + len(stripped_line)
# show indicators if primary char doesn't span the frame line
if end_offset - start_offset < len(stripped_line) or (
anchors
and anchors.right_start_offset - anchors.left_end_offset > 0
):
row.append(' ')
row.append(' ' * (start_offset - stripped_characters))
if anchors:
row.append(anchors.primary_char * (anchors.left_end_offset))
row.append(
anchors.secondary_char
* (anchors.right_start_offset - anchors.left_end_offset)
)
row.append(
anchors.primary_char
* (
end_offset
- start_offset
- anchors.right_start_offset
)
)
else:
row.append('^' * (end_offset - start_offset))
row.append('\n')
if frame.locals:
for name, value in sorted(frame.locals.items()):
row.append(' {name} = {value}\n'.format(name=name, value=value))
return ''.join(row) | Format the lines for a single FrameSummary.
Returns a string representing one frame involved in the stack. This
gets called for every frame to be printed in the stack summary. | _format_frame_summary | python | geldata/gel | edb/common/traceback.py | https://github.com/geldata/gel/blob/master/edb/common/traceback.py | Apache-2.0 |
def inline(argument_index: int):
"""
When added to grammar productions, it makes the method equivalent to:
self.val = kids[argument_index].val
"""
def decorator(func: Any):
func.inline_index = argument_index
return func
return decorator | When added to grammar productions, it makes the method equivalent to:
self.val = kids[argument_index].val | inline | python | geldata/gel | edb/common/parsing.py | https://github.com/geldata/gel/blob/master/edb/common/parsing.py | Apache-2.0 |
def __init_subclass__(cls, *, is_internal=False, **kwargs):
"""Add docstrings to class and reduce functions
If no class docstring is present, set it to '%nonterm'.
If any reduce function (ie. of the form `reduce(_\\w+)+` does not
have a docstring, a new one is generated based on the function name.
See https://github.com/MagicStack/parsing for more information.
Keyword arguments:
is_internal -- internal classes do not need docstrings and processing
can be skipped
"""
super().__init_subclass__(**kwargs)
if is_internal:
return
if not cls.__doc__:
cls.__doc__ = '%nonterm'
for name, attr in cls.__dict__.items():
if (name.startswith('reduce_') and
isinstance(attr, types.FunctionType)):
inline_index = getattr(attr, 'inline_index', None)
if attr.__doc__ is None:
tokens = name.split('_')
if name == 'reduce_empty':
tokens = ['reduce', '<e>']
doc = r'%reduce {}'.format(' '.join(tokens[1:]))
prec = getattr(attr, '__parsing_precedence__', None)
if prec is not None:
doc += ' [{}]'.format(prec)
attr = lambda self, *args, meth=attr: meth(self, *args)
attr.__doc__ = doc
a = span.wrap_function_to_infer_spans(attr)
a.__doc__ = attr.__doc__
a.inline_index = inline_index
setattr(cls, name, a) | Add docstrings to class and reduce functions
If no class docstring is present, set it to '%nonterm'.
If any reduce function (ie. of the form `reduce(_\\w+)+` does not
have a docstring, a new one is generated based on the function name.
See https://github.com/MagicStack/parsing for more information.
Keyword arguments:
is_internal -- internal classes do not need docstrings and processing
can be skipped | __init_subclass__ | python | geldata/gel | edb/common/parsing.py | https://github.com/geldata/gel/blob/master/edb/common/parsing.py | Apache-2.0 |
def __init_subclass__(
cls,
*,
element,
separator=None,
is_internal=False,
allow_trailing_separator=False,
**kwargs,
):
"""Create reductions for list classes.
If trailing separator is not allowed, the class can handle all
reductions directly.
L := E
L := L S E
If trailing separator is allowed, create an inner class to handle
all non-trailing reductions. Then the class handles the trailing
separator.
I := E
I := I S E
L := I
L := I S
The inner class is added to the same module as the class.
"""
if not is_internal:
if not allow_trailing_separator:
# directly handle the list
ListNonterm.add_list_reductions(
cls, element=element, separator=separator
)
else:
# create inner list class and add to same module
mod = sys.modules[cls.__module__]
def inner_cls_exec(ns):
ns['__module__'] = mod.__name__
return ns
inner_cls_name = cls.__name__ + 'Inner'
inner_cls_kwds = dict(element=element, separator=separator)
inner_cls = types.new_class(inner_cls_name, (ListNonterm,),
inner_cls_kwds, inner_cls_exec)
setattr(mod, inner_cls_name, inner_cls)
# create reduce_inner function
separator_name = ListNonterm.component_name(separator)
setattr(cls,
'reduce_{}'.format(inner_cls_name),
lambda self, inner: (
ListNonterm._reduce_inner(self, inner)
))
setattr(cls,
'reduce_{}_{}'.format(inner_cls_name, separator_name),
lambda self, inner, sep: (
ListNonterm._reduce_inner(self, inner)
))
# reduce functions must be present before calling superclass
super().__init_subclass__(is_internal=is_internal, **kwargs) | Create reductions for list classes.
If trailing separator is not allowed, the class can handle all
reductions directly.
L := E
L := L S E
If trailing separator is allowed, create an inner class to handle
all non-trailing reductions. Then the class handles the trailing
separator.
I := E
I := I S E
L := I
L := I S
The inner class is added to the same module as the class. | __init_subclass__ | python | geldata/gel | edb/common/parsing.py | https://github.com/geldata/gel/blob/master/edb/common/parsing.py | Apache-2.0 |
def precedence(precedence):
"""Decorator to set production precedence."""
def decorator(func):
func.__parsing_precedence__ = precedence.__name__
return func
return decorator | Decorator to set production precedence. | precedence | python | geldata/gel | edb/common/parsing.py | https://github.com/geldata/gel/blob/master/edb/common/parsing.py | Apache-2.0 |
def value_dispatch(func: Callable[..., _T]) -> _ValueDispatchCallable[_T]:
"""Like singledispatch() but dispatches by value of the first arg.
Example:
@value_dispatch
def eat(fruit):
return f"I don't want a {fruit}..."
@eat.register('apple')
def _eat_apple(fruit):
return "I love apples!"
@eat.register('eggplant')
@eat.register('squash')
def _eat_what(fruit):
return f"I didn't know {fruit} is a fruit!"
An alternative to applying multuple `register` decorators is to
use the `register_for_all` helper:
@eat.register_for_all({'eggplant', 'squash'})
def _eat_what(fruit):
return f"I didn't know {fruit} is a fruit!"
"""
registry: dict[Any, Callable[..., _T]] = {}
@functools.wraps(func)
def wrapper(arg0: Any, *args: Any, **kwargs: Any) -> _T:
try:
delegate = registry[arg0]
except KeyError:
pass
else:
return delegate(arg0, *args, **kwargs)
return func(arg0, *args, **kwargs)
def register(
value: Any,
) -> Callable[[Callable[..., _T]], Callable[..., _T]]:
if inspect.isfunction(value):
raise TypeError(
"value_dispatch.register() decorator requires a value")
def wrap(func: Callable[..., _T]) -> Callable[..., _T]:
if value in registry:
raise ValueError(
f'@value_dispatch: there is already a handler '
f'registered for {value!r}'
)
registry[value] = func
return func
return wrap
def register_for_all(
values: Iterable[Any],
) -> Callable[[Callable[..., _T]], Callable[..., _T]]:
def wrap(func: Callable[..., _T]) -> Callable[..., _T]:
for value in values:
if value in registry:
raise ValueError(
f'@value_dispatch: there is already a handler '
f'registered for {value!r}'
)
registry[value] = func
return func
return wrap
wrapper.register = register # type: ignore [attr-defined]
wrapper.register_for_all = register_for_all # type: ignore [attr-defined]
return wrapper # type: ignore [return-value] | Like singledispatch() but dispatches by value of the first arg.
Example:
@value_dispatch
def eat(fruit):
return f"I don't want a {fruit}..."
@eat.register('apple')
def _eat_apple(fruit):
return "I love apples!"
@eat.register('eggplant')
@eat.register('squash')
def _eat_what(fruit):
return f"I didn't know {fruit} is a fruit!"
An alternative to applying multuple `register` decorators is to
use the `register_for_all` helper:
@eat.register_for_all({'eggplant', 'squash'})
def _eat_what(fruit):
return f"I didn't know {fruit} is a fruit!" | value_dispatch | python | geldata/gel | edb/common/value_dispatch.py | https://github.com/geldata/gel/blob/master/edb/common/value_dispatch.py | Apache-2.0 |
def set_trace(**kwargs):
"""Debugger hook that works inside worker processes.
Set PYTHONBREAKPOINT=edb.common.debug.set_trace, and this will be triggered
by `breakpoint()`.
Unfortunately readline doesn't work when not using stdin itself,
so try running the server wrapped with `rlwrap.`
"""
from pdb import Pdb
new_stdin = open("/dev/tty", "r")
Pdb(stdin=new_stdin, stdout=sys.stdout).set_trace(
sys._getframe().f_back, **kwargs) | Debugger hook that works inside worker processes.
Set PYTHONBREAKPOINT=edb.common.debug.set_trace, and this will be triggered
by `breakpoint()`.
Unfortunately readline doesn't work when not using stdin itself,
so try running the server wrapped with `rlwrap.` | set_trace | python | geldata/gel | edb/common/debug.py | https://github.com/geldata/gel/blob/master/edb/common/debug.py | Apache-2.0 |
def xrepr(obj, *, max_len=None):
"""Extended ``builtins.repr`` function.
Examples:
.. code-block:: pycon
>>> xrepr('1234567890', max_len=7)
'12'...
:param int max_len: When defined limits maximum length of the result
string representation.
:returns str:
"""
result = str(repr(obj))
if max_len is not None and len(result) > max_len:
ext = '...'
if result[0] in ('"', "'"):
ext = result[0] + ext
elif result[0] == '<':
ext = '>' + ext
result = result[:(max_len - len(ext))] + ext
return result | Extended ``builtins.repr`` function.
Examples:
.. code-block:: pycon
>>> xrepr('1234567890', max_len=7)
'12'...
:param int max_len: When defined limits maximum length of the result
string representation.
:returns str: | xrepr | python | geldata/gel | edb/common/markup/format.py | https://github.com/geldata/gel/blob/master/edb/common/markup/format.py | Apache-2.0 |
def no_ref_detect(func: T) -> T:
"""Serializer decorated with ``no_ref_detect`` will be executed without
prior checking the memo if object was already serialized"""
func.no_ref_detect = True # type: ignore
return func | Serializer decorated with ``no_ref_detect`` will be executed without
prior checking the memo if object was already serialized | no_ref_detect | python | geldata/gel | edb/common/markup/serializer/base.py | https://github.com/geldata/gel/blob/master/edb/common/markup/serializer/base.py | Apache-2.0 |
def serializer(obj, *, ctx):
"""Markup serializers dispatcher"""
raise NotImplementedError | Markup serializers dispatcher | serializer | python | geldata/gel | edb/common/markup/serializer/base.py | https://github.com/geldata/gel/blob/master/edb/common/markup/serializer/base.py | Apache-2.0 |
def serialize(obj, *, ctx):
"""Serialize arbitrary python object to Markup elements"""
tobj = type(obj)
sr = serializer.dispatch(tobj)
if sr is serializer:
raise LookupError(f'unable to find serializer for object {obj!r}')
if (sr is serialize_unknown_object and
hasattr(tobj, '__dataclass_fields__')):
sr = serialize_dataclass
ctx.level += 1
ctx.run_cnt += 1
try:
if ctx.level >= OVERFLOW_BARIER or ctx.run_cnt >= RUN_OVERFLOW_BARIER:
return elements.base.OverflowBarier()
ref_detect = True
try:
# Was the serializer decorated with ``@no_ref_detect``?
#
ref_detect = not sr.no_ref_detect
except AttributeError:
pass
if ref_detect:
# OK, so if we've already serialized obj, don't do that again, just
# return ``markup.Ref`` element.
#
obj_id = id(obj)
if obj_id in ctx.memo:
return elements.lang.Ref(ref=obj_id, refname=repr(obj))
else:
ctx.memo.add(obj_id)
ctx.keep_alive.append(obj)
try:
return sr(obj, ctx=ctx)
except Exception as ex:
return elements.base.SerializationError(
text=str(ex), cls='{}.{}'.format(
ex.__class__.__module__, ex.__class__.__name__))
finally:
ctx.level -= 1 | Serialize arbitrary python object to Markup elements | serialize | python | geldata/gel | edb/common/markup/serializer/base.py | https://github.com/geldata/gel/blob/master/edb/common/markup/serializer/base.py | Apache-2.0 |
def main() -> None:
f = open('edb/edgeql-parser/src/ast.rs', 'w')
f.write(
textwrap.dedent(
'''\
// DO NOT EDIT. This file was generated with:
//
// $ edb gen-rust-ast
//! Abstract Syntax Tree for EdgeQL
#![allow(non_camel_case_types)]
use indexmap::IndexMap;
#[cfg(feature = "python")]
use edgeql_parser_derive::IntoPython;
'''
)
)
# discover all nodes
for name, typ in qlast.__dict__.items():
if not isinstance(typ, type) or not hasattr(typ, '_direct_fields'):
continue
if not issubclass(typ, qlast.Base):
continue
# re-run field collection to correctly handle forward-references
typ = typ._collect_direct_fields() # type: ignore
ast_classes[typ.__name__] = ASTClass(name=name, typ=typ)
# build inheritance graph
for ast_class in ast_classes.values():
for base in ast_class.typ.__bases__:
if base.__name__ not in ast_classes:
continue
ast_classes[base.__name__].children.append(ast_class.typ)
# generate structs
for ast_class in ast_classes.values():
f.write(codegen_struct(ast_class))
while len(union_types) > 0:
f.write(codegen_union(union_types.pop(0)))
# generate enums
for name, typ in chain(qlast.__dict__.items(), qltypes.__dict__.items()):
if not isinstance(typ, type) or not issubclass(typ, s_enum.StrEnum):
continue
f.write(codegen_enum(name, typ)) | \
// DO NOT EDIT. This file was generated with:
//
// $ edb gen-rust-ast
//! Abstract Syntax Tree for EdgeQL
#![allow(non_camel_case_types)]
use indexmap::IndexMap;
#[cfg(feature = "python")]
use edgeql_parser_derive::IntoPython; | main | python | geldata/gel | edb/tools/gen_rust_ast.py | https://github.com/geldata/gel/blob/master/edb/tools/gen_rust_ast.py | Apache-2.0 |
def config(make_include: bool, pg_config: bool) -> None:
'''Query certain parameters about an edgedb environment'''
if make_include:
share = buildmeta.get_extension_dir_path()
base = share.parent.parent.parent
# XXX: It should not be here.
if not devmode.is_in_dev_mode():
base = base / 'share'
mk = (
base / 'tests' / 'extension-testing' / 'exts.mk'
)
print(mk)
if pg_config:
print(buildmeta.get_pg_config_path()) | Query certain parameters about an edgedb environment | config | python | geldata/gel | edb/tools/config.py | https://github.com/geldata/gel/blob/master/edb/tools/config.py | Apache-2.0 |
def print_header(f):
print(
textwrap.dedent(
'''
# AUTOGENERATED FROM _localdev postgres instance WITH
# $ edb gen-sql-introspection
"""Declarations of information schema and pg_catalog"""
from typing import Tuple, Dict, List
ColumnName = str
ColumnType = str | None
'''
)[1:],
file=f,
) | # AUTOGENERATED FROM _localdev postgres instance WITH
# $ edb gen-sql-introspection
"""Declarations of information schema and pg_catalog | print_header | python | geldata/gel | edb/tools/gen_sql_introspection.py | https://github.com/geldata/gel/blob/master/edb/tools/gen_sql_introspection.py | Apache-2.0 |
def print_schema(
f,
schema_name: str,
schemas_by_version: Mapping[
int, Mapping[str, Mapping[str, List[Tuple[str, str]]]]
],
):
"""
Generates Python dict source for tables of a given PostgreSQL schema and
writes it into a file. Param `tables` can contain data for more than one
version of PostgreSQL. This function will generate code for the latest
PostgreSQL version and search previous versions to determine the first
version that contains each column.
"""
print(f'Code generation of schema "{schema_name}"')
version_latest = max(iter(schemas_by_version.keys()))
versions_desc = list(schemas_by_version.keys())
versions_desc.sort(reverse=True)
versions_desc = versions_desc[1:]
schemas_latest = schemas_by_version[version_latest]
tables_latest = schemas_latest[schema_name]
typ = ': Dict[str, List[Tuple[ColumnName, ColumnType, int]]]'
print(schema_name.upper() + typ + " = {", file=f)
for index, (table, columns) in enumerate(tables_latest.items()):
print(f' "{table}": [', file=f)
for [col_name, col_typ] in columns:
ver_since = version_latest
for v in versions_desc:
schema = schemas_by_version.get(v)
assert schema
tbls = schema.get(schema_name)
assert tbls
tbl = tbls.get(table, None)
if tbl is None:
break
c = next((True for c, _ in tbl if c == col_name), False)
if not c:
break
ver_since = v
if col_typ == "ARRAY" or col_typ.startswith("any"):
col_typ = "None"
else:
col_typ = col_typ.replace('"', '\\"')
col_typ = f'"{col_typ}"'
print(f' ("{col_name}", {col_typ}, {ver_since}),', file=f)
last = index == len(tables_latest) - 1
comma = ',' if not last else ''
print(f' ]{comma}', file=f)
print('}', file=f) | Generates Python dict source for tables of a given PostgreSQL schema and
writes it into a file. Param `tables` can contain data for more than one
version of PostgreSQL. This function will generate code for the latest
PostgreSQL version and search previous versions to determine the first
version that contains each column. | print_schema | python | geldata/gel | edb/tools/gen_sql_introspection.py | https://github.com/geldata/gel/blob/master/edb/tools/gen_sql_introspection.py | Apache-2.0 |
def lift(f: Callable[..., Data]) -> LiftedFunc:
"""Lifts a function operating on base data to operator on sets.
The result is the usual cartesian product."""
def inner(*args: Result) -> Result:
out = []
for args1 in itertools.product(*args):
val = f(*args1)
out.append(val)
return out
return inner | Lifts a function operating on base data to operator on sets.
The result is the usual cartesian product. | lift | python | geldata/gel | edb/tools/toy_eval_model.py | https://github.com/geldata/gel/blob/master/edb/tools/toy_eval_model.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.