code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def simplify_union_types( schema: s_schema.Schema, types: Sequence[s_types.Type], ) -> Sequence[s_types.Type]: """Minimize the types used to create a union of types. Any unions types are unwrapped. Then, any unnecessary subclasses are removed. """ from edb.schema import types as s_types components: Set[s_types.Type] = set() for t in types: union_of = t.get_union_of(schema) if union_of: components.update(union_of.objects(schema)) else: components.add(t) if all(isinstance(c, s_types.InheritingType) for c in components): return list(minimize_class_set_by_most_generic( schema, cast(Set[s_types.InheritingType], components), )) else: return list(components)
Minimize the types used to create a union of types. Any unions types are unwrapped. Then, any unnecessary subclasses are removed.
simplify_union_types
python
geldata/gel
edb/schema/utils.py
https://github.com/geldata/gel/blob/master/edb/schema/utils.py
Apache-2.0
def simplify_union_types_preserve_derived( schema: s_schema.Schema, types: Sequence[s_types.Type], ) -> Sequence[s_types.Type]: """Minimize the types used to create a union of types. Any unions types are unwrapped. Then, any unnecessary subclasses are removed. Derived types are always preserved for 'std::UNION', 'std::IF', and 'std::??'. """ from edb.schema import types as s_types components: Set[s_types.Type] = set() for t in types: union_of = t.get_union_of(schema) if union_of: components.update(union_of.objects(schema)) else: components.add(t) derived = set( t for t in components if ( isinstance(t, s_types.InheritingType) and t.get_is_derived(schema) ) ) nonderived: Sequence[s_types.Type] = [ t for t in components if t not in derived ] nonderived = minimize_class_set_by_most_generic( schema, cast(Set[s_types.InheritingType], nonderived), ) return list(nonderived) + list(derived)
Minimize the types used to create a union of types. Any unions types are unwrapped. Then, any unnecessary subclasses are removed. Derived types are always preserved for 'std::UNION', 'std::IF', and 'std::??'.
simplify_union_types_preserve_derived
python
geldata/gel
edb/schema/utils.py
https://github.com/geldata/gel/blob/master/edb/schema/utils.py
Apache-2.0
def get_type_expr_non_overlapping_union( type: s_types.Type, schema: s_schema.Schema, ) -> Tuple[FrozenSet[s_types.Type], bool]: """Get a non-overlapping set of the type's descendants""" from edb.schema import types as s_types expanded_types = expand_type_expr_descendants(type, schema) # filter out subclasses expanded_types = { type for type in expanded_types if not any( type is not other and type.issubclass(schema, other) for other in expanded_types ) } non_overlapping, union_is_exhaustive = get_non_overlapping_union( schema, cast(set[so.InheritingObject], expanded_types) ) return cast(FrozenSet[s_types.Type], non_overlapping), union_is_exhaustive
Get a non-overlapping set of the type's descendants
get_type_expr_non_overlapping_union
python
geldata/gel
edb/schema/utils.py
https://github.com/geldata/gel/blob/master/edb/schema/utils.py
Apache-2.0
def expand_type_expr_descendants( type: s_types.Type, schema: s_schema.Schema, *, expand_opaque_union: bool = True, ) -> set[s_types.Type]: """Expand types and type expressions to get descendants""" from edb.schema import types as s_types if sub_union := type.get_union_of(schema): # Expanding a union # Get the union of the component descendants return set.union(*( expand_type_expr_descendants( component, schema, ) for component in sub_union.objects(schema) )) elif sub_intersection := type.get_intersection_of(schema): # Expanding an intersection # Get the intersection of component descendants return set.intersection(*( expand_type_expr_descendants( component, schema ) for component in sub_intersection.objects(schema) )) elif type.is_view(schema): # When expanding a view, simply unpeel the view. return expand_type_expr_descendants( type.peel_view(schema), schema ) # Return simple type and all its descendants. # Some types (eg. BaseObject) have non-simple descendants, filter them out. return {type} | { c for c in cast( set[s_types.Type], set(cast(so.InheritingObject, type).descendants(schema)) ) if ( not c.is_union_type(schema) and not c.is_intersection_type(schema) and not c.is_view(schema) ) }
Expand types and type expressions to get descendants
expand_type_expr_descendants
python
geldata/gel
edb/schema/utils.py
https://github.com/geldata/gel/blob/master/edb/schema/utils.py
Apache-2.0
def simplify_intersection_types( schema: s_schema.Schema, types: Sequence[s_types.Type], ) -> Sequence[s_types.Type]: """Minimize the types used to create an intersection of types. Any intersection types are unwrapped. Then, any unnecessary superclasses are removed. """ from edb.schema import types as s_types components: Set[s_types.Type] = set() for t in types: intersection_of = t.get_intersection_of(schema) if intersection_of: components.update(intersection_of.objects(schema)) else: components.add(t) if all(isinstance(c, s_types.InheritingType) for c in components): return minimize_class_set_by_least_generic( schema, cast(Set[s_types.InheritingType], components), ) else: return list(components)
Minimize the types used to create an intersection of types. Any intersection types are unwrapped. Then, any unnecessary superclasses are removed.
simplify_intersection_types
python
geldata/gel
edb/schema/utils.py
https://github.com/geldata/gel/blob/master/edb/schema/utils.py
Apache-2.0
def test_polymorphic(self, schema: s_schema.Schema, poly: Type) -> bool: """Check if this type can be matched by a polymorphic type. Examples: - `array<anyscalar>`.test_polymorphic(`array<anytype>`) -> True - `array<str>`.test_polymorphic(`array<anytype>`) -> True - `array<int64>`.test_polymorphic(`anyscalar`) -> False - `float32`.test_polymorphic(`anyint`) -> False - `int32`.test_polymorphic(`anyint`) -> True """ if not poly.is_polymorphic(schema): raise TypeError('expected a polymorphic type as a second argument') if poly.is_any(schema): return True if poly.is_anyobject(schema) and self.is_object_type(): return True return self._test_polymorphic(schema, poly)
Check if this type can be matched by a polymorphic type. Examples: - `array<anyscalar>`.test_polymorphic(`array<anytype>`) -> True - `array<str>`.test_polymorphic(`array<anytype>`) -> True - `array<int64>`.test_polymorphic(`anyscalar`) -> False - `float32`.test_polymorphic(`anyint`) -> False - `int32`.test_polymorphic(`anyint`) -> True
test_polymorphic
python
geldata/gel
edb/schema/types.py
https://github.com/geldata/gel/blob/master/edb/schema/types.py
Apache-2.0
def resolve_polymorphic( self, schema: s_schema.Schema, other: Type ) -> Optional[Type]: """Resolve the polymorphic type component. Examples: - `array<anytype>`.resolve_polymorphic(`array<int>`) -> `int` - `array<anytype>`.resolve_polymorphic(`tuple<int>`) -> None """ if not self.is_polymorphic(schema): return None return self._resolve_polymorphic(schema, other)
Resolve the polymorphic type component. Examples: - `array<anytype>`.resolve_polymorphic(`array<int>`) -> `int` - `array<anytype>`.resolve_polymorphic(`tuple<int>`) -> None
resolve_polymorphic
python
geldata/gel
edb/schema/types.py
https://github.com/geldata/gel/blob/master/edb/schema/types.py
Apache-2.0
def to_nonpolymorphic( self: TypeT, schema: s_schema.Schema, concrete_type: Type ) -> typing.Tuple[s_schema.Schema, Type]: """Produce an non-polymorphic version of self. Example: `array<anytype>`.to_nonpolymorphic(`int`) -> `array<int>` `tuple<int, anytype>`.to_nonpolymorphic(`str`) -> `tuple<int, str>` """ if not self.is_polymorphic(schema): raise TypeError('non-polymorphic type') return self._to_nonpolymorphic(schema, concrete_type)
Produce an non-polymorphic version of self. Example: `array<anytype>`.to_nonpolymorphic(`int`) -> `array<int>` `tuple<int, anytype>`.to_nonpolymorphic(`str`) -> `tuple<int, str>`
to_nonpolymorphic
python
geldata/gel
edb/schema/types.py
https://github.com/geldata/gel/blob/master/edb/schema/types.py
Apache-2.0
def as_type_delete_if_unused( self: TypeT, schema: s_schema.Schema, ) -> Optional[sd.DeleteObject[TypeT]]: """If this is type is owned by other objects, delete it if unused. For types that get created behind the scenes as part of another object, such as collection types and union types, this should generate an appropriate deletion. Otherwise, it should return None. """ return None
If this is type is owned by other objects, delete it if unused. For types that get created behind the scenes as part of another object, such as collection types and union types, this should generate an appropriate deletion. Otherwise, it should return None.
as_type_delete_if_unused
python
geldata/gel
edb/schema/types.py
https://github.com/geldata/gel/blob/master/edb/schema/types.py
Apache-2.0
def get_generated_name(self, schema: s_schema.Schema) -> s_name.UnqualName: """Return collection type name generated from element types. Unlike get_name(), which might return a custom name, this will always return a name derived from the names of the collection element type(s). """ raise NotImplementedError
Return collection type name generated from element types. Unlike get_name(), which might return a custom name, this will always return a name derived from the names of the collection element type(s).
get_generated_name
python
geldata/gel
edb/schema/types.py
https://github.com/geldata/gel/blob/master/edb/schema/types.py
Apache-2.0
def get_underlying_schema_class(cls) -> typing.Type[Collection]: """Return the concrete collection class for this ExprAlias class.""" raise NotImplementedError
Return the concrete collection class for this ExprAlias class.
get_underlying_schema_class
python
geldata/gel
edb/schema/types.py
https://github.com/geldata/gel/blob/master/edb/schema/types.py
Apache-2.0
def as_underlying_type_delete_if_unused( self, schema: s_schema.Schema, ) -> sd.DeleteObject[Type]: """Return a conditional deletion command for the underlying type object """ return sd.get_object_delta_command( objtype=type(self).get_underlying_schema_class(), cmdtype=sd.DeleteObject, schema=schema, name=self.get_generated_name(schema), if_unused=True, if_exists=True, )
Return a conditional deletion command for the underlying type object
as_underlying_type_delete_if_unused
python
geldata/gel
edb/schema/types.py
https://github.com/geldata/gel/blob/master/edb/schema/types.py
Apache-2.0
def is_type_compatible( type_a: Type, type_b: Type, *, schema: s_schema.Schema, ) -> bool: """Check whether two types have compatible SQL representations. EdgeQL implicit casts need to be turned into explicit casts in some places, since the semantics differ from SQL's. """ schema, material_type_a = type_a.material_type(schema) schema, material_type_b = type_b.material_type(schema) def labels_compatible(t_a: Type, t_b: Type) -> bool: if t_a == t_b: return True if isinstance(t_a, Tuple) and isinstance(t_b, Tuple): if t_a.get_is_persistent(schema) and t_b.get_is_persistent(schema): return False # For tuples, we also (recursively) check that the element # names match return all( name_a == name_b and labels_compatible(st_a, st_b) for (name_a, st_a), (name_b, st_b) in zip(t_a.iter_subtypes(schema), t_b.iter_subtypes(schema)) ) elif isinstance(t_a, Array) and isinstance(t_b, Array): t_as = t_a.get_element_type(schema) t_bs = t_b.get_element_type(schema) return ( not isinstance(t_as, Tuple) and labels_compatible(t_as, t_bs) ) elif isinstance(t_a, Range) and isinstance(t_b, Range): t_as = t_a.get_element_type(schema) t_bs = t_b.get_element_type(schema) return labels_compatible(t_as, t_bs) elif isinstance(t_a, MultiRange) and isinstance(t_b, MultiRange): t_as = t_a.get_element_type(schema) t_bs = t_b.get_element_type(schema) return labels_compatible(t_as, t_bs) else: return True return ( material_type_b.issubclass(schema, material_type_a) and labels_compatible(material_type_a, material_type_b) )
Check whether two types have compatible SQL representations. EdgeQL implicit casts need to be turned into explicit casts in some places, since the semantics differ from SQL's.
is_type_compatible
python
geldata/gel
edb/schema/types.py
https://github.com/geldata/gel/blob/master/edb/schema/types.py
Apache-2.0
def get_addon_columns( self, schema: s_schema.Schema ) -> Sequence[Tuple[str, str, Tuple[str, str]]]: """ Returns a list of columns that are present in the backing table of this source, apart from the columns for pointers. """ res = [] from edb.common import debug if not debug.flags.zombodb: fts_index, _ = indexes.get_effective_object_index( schema, self, sn.QualName("std::fts", "index") ) if fts_index: res.append( ( '__fts_document__', '__fts_document__', ( 'pg_catalog', 'tsvector', ), ) ) ext_ai_index, _ = indexes.get_effective_object_index( schema, self, sn.QualName("ext::ai", "index") ) if ext_ai_index: idx_id = indexes.get_ai_index_id(schema, ext_ai_index) dimensions = ext_ai_index.must_get_json_annotation( schema, sn.QualName( "ext::ai", "embedding_dimensions"), int, ) res.append( ( f'__ext_ai_{idx_id}_embedding__', f'__ext_ai_{idx_id}_embedding__', ( 'edgedb', f'vector({dimensions})', ), ) ) return res
Returns a list of columns that are present in the backing table of this source, apart from the columns for pointers.
get_addon_columns
python
geldata/gel
edb/schema/sources.py
https://github.com/geldata/gel/blob/master/edb/schema/sources.py
Apache-2.0
def _delete_to_delist( delete: sd.DeleteObject[so.Object], schema: s_schema.Schema, ) -> s_schema.Schema: """Delist all of the objects mentioned in a delete tree. This removes their names from the schema but preserves the actual objects. """ schema = schema.delist(delete.classname) for sub in delete.get_subcommands(type=sd.DeleteObject): schema = _delete_to_delist(sub, schema) return schema
Delist all of the objects mentioned in a delete tree. This removes their names from the schema but preserves the actual objects.
_delete_to_delist
python
geldata/gel
edb/schema/objtypes.py
https://github.com/geldata/gel/blob/master/edb/schema/objtypes.py
Apache-2.0
def validate_object( self, schema: s_schema.Schema, context: sd.CommandContext, ) -> None: """Check that link definition is sound.""" super().validate_object(schema, context) scls = self.scls assert isinstance(scls, Link) if not scls.get_owned(schema): return target = scls.get_target(schema) assert target is not None if not target.is_object_type(): span = self.get_attribute_span('target') if isinstance(target, s_types.Array): # Custom error message for link -> array<...> link_dn = scls.get_displayname(schema) el_dn = target.get_subtypes(schema)[0].get_displayname(schema) hint = f"did you mean 'multi link {link_dn} -> {el_dn}'?" else: hint = None raise errors.InvalidLinkTargetError( f'invalid link target type, expected object type, got ' f'{target.get_verbosename(schema)}', span=span, hint=hint, ) if target.is_free_object_type(schema): span = self.get_attribute_span('target') raise errors.InvalidLinkTargetError( f'{target.get_verbosename(schema)} is not a valid link target', span=span, ) if ( not scls.is_pure_computable(schema) and not scls.get_from_alias(schema) and target.is_view(schema) ): span = self.get_attribute_span('target') raise errors.InvalidLinkTargetError( f'invalid link type: {target.get_displayname(schema)!r}' f' is an expression alias, not a proper object type', span=span, ) if ( scls.get_required(schema) and scls.get_on_target_delete(schema) == qltypes.LinkTargetDeleteAction.DeferredRestrict ): raise errors.InvalidLinkTargetError( 'required links may not use `on target delete ' 'deferred restrict`', span=self.span, )
Check that link definition is sound.
validate_object
python
geldata/gel
edb/schema/links.py
https://github.com/geldata/gel/blob/master/edb/schema/links.py
Apache-2.0
def _cmd_tree_from_ast( cls, schema: s_schema.Schema, astnode: qlast.DDLOperation, context: sd.CommandContext, ) -> sd.Command: """ Converts a single `qlast.RewriteCommand` into multiple `schema.RewriteCommand`s, one for each kind. """ group = sd.CommandGroup() assert isinstance(astnode, qlast.RewriteCommand) for kind in astnode.kinds: # use kind for the name newnode = astnode.replace( name=qlast.ObjectRef(module='__', name=str(kind)), kinds=kind, ) cmd = super()._cmd_tree_from_ast(schema, newnode, context) assert isinstance(cmd, RewriteCommand) cmd.set_attribute_value('kind', kind) group.add(cmd) return group
Converts a single `qlast.RewriteCommand` into multiple `schema.RewriteCommand`s, one for each kind.
_cmd_tree_from_ast
python
geldata/gel
edb/schema/rewrites.py
https://github.com/geldata/gel/blob/master/edb/schema/rewrites.py
Apache-2.0
def merge_required( ptr: Pointer, bases: List[Pointer], field_name: str, *, ignore_local: bool = False, schema: s_schema.Schema, ) -> Optional[bool]: """Merge function for the REQUIRED qualifier on links and properties.""" local_required = ptr.get_explicit_local_field_value( schema, field_name, None) if ignore_local or local_required is None: # No explicit local declaration, so True if any of the bases # have it as required, and False otherwise. return utils.merge_reduce( ptr, bases, field_name=field_name, ignore_local=ignore_local, schema=schema, f=operator.or_, type=bool, ) elif local_required: # If set locally and True, just use that. assert isinstance(local_required, bool) return local_required else: # Explicitly set locally as False, check if any of the bases # are REQUIRED, and if so, raise. for base in bases: base_required = base.get_field_value(schema, field_name) if base_required: ptr_repr = ptr.get_verbosename(schema, with_parent=True) base_repr = base.get_verbosename(schema, with_parent=True) raise errors.SchemaDefinitionError( f'cannot make {ptr_repr} optional: its parent {base_repr} ' f'is defined as required' ) return False
Merge function for the REQUIRED qualifier on links and properties.
merge_required
python
geldata/gel
edb/schema/pointers.py
https://github.com/geldata/gel/blob/master/edb/schema/pointers.py
Apache-2.0
def _get_target_name_in_diff( *, schema: s_schema.Schema, orig_schema: Optional[s_schema.Schema], object: Optional[so.Object], orig_object: Optional[so.Object], ) -> sn.Name: """Compute the target type name for a fill/conv expr Called from record_diff_annotations to produce annotation information for migrations. The trickiness here is that this information is generated when producing the diff, where we have somewhat limited information. """ # Prefer getting the target type from the original object instead # of the new one, for a cheesy reason: if we change both # required/cardinality and target type, we do the cardinality # change before the cast, for reasons of alphabetical order. if isinstance(orig_object, Pointer): assert orig_schema target = orig_object.get_target(orig_schema) return not_none(target).get_name(orig_schema) else: assert isinstance(object, Pointer) target = object.get_target(schema) return not_none(target).get_name(schema)
Compute the target type name for a fill/conv expr Called from record_diff_annotations to produce annotation information for migrations. The trickiness here is that this information is generated when producing the diff, where we have somewhat limited information.
_get_target_name_in_diff
python
geldata/gel
edb/schema/pointers.py
https://github.com/geldata/gel/blob/master/edb/schema/pointers.py
Apache-2.0
def get_nearest_defined(self, schema: s_schema.Schema) -> Pointer: """ Find the pointer definition site. For view pointers, find the place where the pointer is "really" defined that is, either its schema definition site or where it last had a expression defining it. """ ptrcls = self while ( ptrcls.get_is_derived(schema) and not ptrcls.get_defined_here(schema) # schema defined computeds don't have the ephemeral defined_here # set, but they do have expr set, so we check that also. and not ptrcls.get_expr(schema) and (bases := ptrcls.get_bases(schema).objects(schema)) and len(bases) == 1 and bases[0].get_source(schema) ): ptrcls = bases[0] return ptrcls
Find the pointer definition site. For view pointers, find the place where the pointer is "really" defined that is, either its schema definition site or where it last had a expression defining it.
get_nearest_defined
python
geldata/gel
edb/schema/pointers.py
https://github.com/geldata/gel/blob/master/edb/schema/pointers.py
Apache-2.0
def get_schema_reflection_default( self, schema: s_schema.Schema, ) -> Optional[str]: """Return the default expression if this is a reflection of a schema class field and the field has a defined default value. """ ptr = self.get_nearest_non_derived_parent(schema) src = ptr.get_source(schema) if src is None: # This is an abstract pointer return None ptr_name = ptr.get_name(schema) if ptr_name.module not in {'schema', 'sys', 'cfg'}: # This isn't a reflection type return None if isinstance(src, Pointer): # This is a link property tgt = src.get_target(schema) assert tgt is not None schema_objtype = tgt else: assert isinstance(src, s_types.Type) schema_objtype = src assert isinstance(schema_objtype, so.QualifiedObject) src_name = schema_objtype.get_name(schema) mcls = so.ObjectMeta.maybe_get_schema_class(src_name.name) if mcls is None: # This schema class is not (publicly) reflected. return None fname = ptr.get_shortname(schema).name if not mcls.has_field(fname): # This pointer is not a schema field. return None field = mcls.get_field(fname) if not isinstance(field, so.SchemaField): # Not a schema field, no default possible. return None f_default = field.default if ( f_default is None or f_default is so.NoDefault ): # No explicit default value. return None tgt = ptr.get_target(schema) assert tgt is not None if f_default is so.DEFAULT_CONSTRUCTOR: if ( issubclass( field.type, (collections.abc.Set, collections.abc.Sequence), ) and not issubclass(field.type, (str, bytes)) ): return f'<{tgt.get_displayname(schema)}>[]' else: return None default = qlquote.quote_literal(json.dumps(f_default)) if tgt.is_enum(schema): return f'<{tgt.get_displayname(schema)}><str>to_json({default})' else: return f'<{tgt.get_displayname(schema)}>to_json({default})'
Return the default expression if this is a reflection of a schema class field and the field has a defined default value.
get_schema_reflection_default
python
geldata/gel
edb/schema/pointers.py
https://github.com/geldata/gel/blob/master/edb/schema/pointers.py
Apache-2.0
def validate_object( self, schema: s_schema.Schema, context: sd.CommandContext, ) -> None: """Check that pointer definition is sound.""" from edb.ir import ast as irast referrer_ctx = self.get_referrer_context(context) if referrer_ctx is None: return self._validate_computables(schema, context) scls: Pointer = self.scls if not scls.get_owned(schema): return default_expr: Optional[s_expr.Expression] = scls.get_default(schema) if default_expr is not None: if not default_expr.irast: default_expr = self._compile_expr( schema, context, default_expr, detached=True, ) assert default_expr.irast if scls.is_id_pointer(schema): self._check_id_default( schema, context, default_expr.irast.expr) span = self.get_attribute_span('default') ir = default_expr.irast default_schema = ir.schema default_type = ir.stype assert default_type is not None ptr_target = scls.get_target(schema) assert ptr_target is not None if ( default_type.is_view(default_schema) # Using an alias/global always creates a new subtype view, # but we want to allow those here, so check whether there # is a shape more directly. and not ( len(shape := ir.view_shapes.get(default_type, [])) == 1 and shape[0].is_id_pointer(default_schema) ) ): raise errors.SchemaDefinitionError( f'default expression may not include a shape', span=span, ) if not default_type.assignment_castable_to( ptr_target, default_schema): raise errors.SchemaDefinitionError( f'default expression is of invalid type: ' f'{default_type.get_displayname(default_schema)}, ' f'expected {ptr_target.get_displayname(schema)}', span=span, ) # "required" status of defaults should not be enforced # because it's impossible to actually guarantee that any # SELECT involving a path is non-empty ptr_cardinality = scls.get_cardinality(schema) _default_required, default_cardinality = \ default_expr.irast.cardinality.to_schema_value() if (ptr_cardinality is qltypes.SchemaCardinality.One and default_cardinality != ptr_cardinality): raise errors.SchemaDefinitionError( f'possibly more than one element returned by ' f'the default expression for ' f'{scls.get_verbosename(schema)} declared as ' f"'single'", span=span, ) # prevent references to local links, only properties pointers = ast.find_children(default_expr.irast, irast.Pointer) scls_source = scls.get_source(schema) assert scls_source for pointer in pointers: if pointer.source.typeref.id != scls_source.id: continue if not isinstance(pointer.ptrref, irast.PointerRef): continue s_pointer = schema.get_by_id(pointer.ptrref.id, type=Pointer) card = s_pointer.get_cardinality(schema) if s_pointer.is_property(schema) and card.is_multi(): raise errors.SchemaDefinitionError( f"default expression cannot refer to multi properties " "of inserted object", span=span, hint="this is a temporary implementation restriction", ) if not s_pointer.is_property(schema): raise errors.SchemaDefinitionError( f"default expression cannot refer to links " "of inserted object", span=span, hint='this is a temporary implementation restriction' ) if ( self.scls.get_rewrite(schema, qltypes.RewriteKind.Update) or self.scls.get_rewrite(schema, qltypes.RewriteKind.Insert) ): if self.scls.get_cardinality(schema).is_multi(): raise errors.SchemaDefinitionError( f"cannot specify a rewrite for " f"{scls.get_verbosename(schema, with_parent=True)} " f"because it is multi", span=self.span, hint='this is a temporary implementation restriction' ) if self.scls.has_user_defined_properties(schema): raise errors.SchemaDefinitionError( f"cannot specify a rewrite for " f"{scls.get_verbosename(schema, with_parent=True)} " f"because it has link properties", span=self.span, hint='this is a temporary implementation restriction' )
Check that pointer definition is sound.
validate_object
python
geldata/gel
edb/schema/pointers.py
https://github.com/geldata/gel/blob/master/edb/schema/pointers.py
Apache-2.0
def _check_id_default( self, schema: s_schema.Schema, context: sd.CommandContext, expr: irast.Base, ) -> None: """If default is being set on id, check it against a whitelist""" from edb.ir import ast as irast from edb.ir import utils as irutils # If we add more, we probably want a better mechanism ID_ALLOWLIST = ( 'std::uuid_generate_v1mc', 'std::uuid_generate_v4', ) while ( isinstance(expr, irast.Set) and expr.expr and irutils.is_trivial_select(expr.expr) ): expr = expr.expr.result if not ( isinstance(expr, irast.Set) and isinstance(expr.expr, irast.FunctionCall) and str(expr.expr.func_shortname) in ID_ALLOWLIST ): span = self.get_attribute_span('default') options = ', '.join(ID_ALLOWLIST) raise errors.SchemaDefinitionError( "invalid default value for 'id' property", hint=f'default must be a call to one of: {options}', span=span, )
If default is being set on id, check it against a whitelist
_check_id_default
python
geldata/gel
edb/schema/pointers.py
https://github.com/geldata/gel/blob/master/edb/schema/pointers.py
Apache-2.0
def _process_create_or_alter_ast( self, schema: s_schema.Schema, astnode: qlast.CreateConcretePointer, context: sd.CommandContext, ) -> None: """Handle the CREATE {PROPERTY|LINK} AST node. This may be called in the context of either Create or Alter. """ from edb.schema import sources as s_sources if astnode.is_required is not None: self.set_attribute_value( 'required', astnode.is_required, span=astnode.span, ) if astnode.cardinality is not None: if isinstance(self, sd.CreateObject): self.set_attribute_value( 'cardinality', astnode.cardinality, span=astnode.span, ) else: handler = sd.get_special_field_alter_handler_for_context( 'cardinality', context) assert handler is not None set_field = qlast.SetField( name='cardinality', value=qlast.Constant.string( str(astnode.cardinality), ), special_syntax=True, span=astnode.span, ) apc = handler._cmd_tree_from_ast(schema, set_field, context) self.add(apc) parent_ctx = self.get_referrer_context_or_die(context) source_name = context.get_referrer_name(parent_ctx) self.set_attribute_value( 'source', so.ObjectShell(name=source_name, schemaclass=s_sources.Source), ) target_ref: Union[None, s_types.TypeShell[s_types.Type], ComputableRef] if astnode.target: if isinstance(astnode.target, qlast.TypeExpr): target_ref = utils.ast_to_type_shell( astnode.target, metaclass=s_types.Type, modaliases=context.modaliases, module=source_name.module, schema=schema, ) else: # computable qlcompiler.normalize( astnode.target, schema=schema, modaliases=context.modaliases ) target_ref = ComputableRef(astnode.target) else: # Target is inherited. target_ref = None if isinstance(self, sd.CreateObject): assert astnode.target is not None self.set_attribute_value( 'target', target_ref, span=astnode.target.span, ) elif target_ref is not None: assert astnode.target is not None self.set_attribute_value( 'target', target_ref, span=astnode.target.span, )
Handle the CREATE {PROPERTY|LINK} AST node. This may be called in the context of either Create or Alter.
_process_create_or_alter_ast
python
geldata/gel
edb/schema/pointers.py
https://github.com/geldata/gel/blob/master/edb/schema/pointers.py
Apache-2.0
def _process_alter_ast( self, schema: s_schema.Schema, astnode: qlast.DDLOperation, context: sd.CommandContext, ) -> None: """Handle the ALTER {PROPERTY|LINK} AST node.""" expr_cmd = qlast.get_ddl_field_command(astnode, 'expr') if expr_cmd is not None: expr = expr_cmd.value if expr is not None: assert isinstance(expr, qlast.Expr) qlcompiler.normalize( expr, schema=schema, modaliases=context.modaliases ) target_ref = ComputableRef( expr, specified_type=self.get_attribute_value('target'), ) self.set_attribute_value( 'target', target_ref, span=expr.span, ) self.discard_attribute('expr')
Handle the ALTER {PROPERTY|LINK} AST node.
_process_alter_ast
python
geldata/gel
edb/schema/pointers.py
https://github.com/geldata/gel/blob/master/edb/schema/pointers.py
Apache-2.0
def is_parent_ref( self, schema: s_schema.Schema, reference: so.Object, ) -> bool: """Return True if *reference* is a structural ancestor of self""" obj = self.get_referrer(schema) while obj is not None: if obj == reference: return True elif isinstance(obj, ReferencedObject): obj = obj.get_referrer(schema) else: break return False
Return True if *reference* is a structural ancestor of self
is_parent_ref
python
geldata/gel
edb/schema/referencing.py
https://github.com/geldata/gel/blob/master/edb/schema/referencing.py
Apache-2.0
def should_propagate(self, schema: s_schema.Schema) -> bool: """Whether this object should be propagated to subtypes of the owner""" return True
Whether this object should be propagated to subtypes of the owner
should_propagate
python
geldata/gel
edb/schema/referencing.py
https://github.com/geldata/gel/blob/master/edb/schema/referencing.py
Apache-2.0
def get_name_impacting_ancestors( self: ReferencedInheritingObjectT, schema: s_schema.Schema, ) -> List[ReferencedInheritingObjectT]: """Return ancestors that have an impact on the name of this object. For most types this is the same as implicit ancestors. (For constraints it is not.) """ return self.get_implicit_ancestors(schema)
Return ancestors that have an impact on the name of this object. For most types this is the same as implicit ancestors. (For constraints it is not.)
get_name_impacting_ancestors
python
geldata/gel
edb/schema/referencing.py
https://github.com/geldata/gel/blob/master/edb/schema/referencing.py
Apache-2.0
def get_referrer_context( cls, context: sd.CommandContext, ) -> Optional[sd.ObjectCommandContext[so.Object]]: """Get the context of the command for the referring object, if any. E.g. for a `create/alter/etc concrete link` command this would be the context of the `create/alter/etc type` command. """ ctxcls = cls.get_referrer_context_class() return context.get(ctxcls)
Get the context of the command for the referring object, if any. E.g. for a `create/alter/etc concrete link` command this would be the context of the `create/alter/etc type` command.
get_referrer_context
python
geldata/gel
edb/schema/referencing.py
https://github.com/geldata/gel/blob/master/edb/schema/referencing.py
Apache-2.0
def _propagate_ref_field_alter_in_inheritance( self, schema: s_schema.Schema, context: sd.CommandContext, field_name: str, require_inheritance_consistency: bool = True, ) -> None: """Validate and propagate a field alteration to children. This method also performs consistency checks against base objects to ensure that the new value matches that of the parents. """ scls = self.scls currently_altered = context.change_log[type(scls), field_name] currently_altered.add(scls) if require_inheritance_consistency: implicit_bases = scls.get_implicit_bases(schema) non_altered_bases = [] value = scls.get_field_value(schema, field_name) for base in { x for x in implicit_bases if x not in currently_altered}: base_value = base.get_field_value(schema, field_name) if isinstance(value, so.SubclassableObject): if not value.issubclass(schema, base_value): non_altered_bases.append(base) else: if value != base_value: non_altered_bases.append(base) # This object is inherited from one or more ancestors that # are not altered in the same op, and this is an error. if non_altered_bases: bases_str = ', '.join( b.get_verbosename(schema, with_parent=True) for b in non_altered_bases ) vn = scls.get_verbosename(schema, with_parent=True) desc = self.get_friendly_description( schema=schema, object_desc=f'inherited {vn}', ) raise errors.SchemaDefinitionError( f'cannot {desc}', details=( f'{vn} is inherited from ' f'{bases_str}' ), span=self.span, ) value = self.get_attribute_value(field_name) def _propagate( alter_cmd: sd.ObjectCommand[so.Object], refname: sn.Name, ) -> None: assert isinstance(alter_cmd, sd.QualifiedObjectCommand) s_t: sd.ObjectCommand[ReferencedInheritingObjectT] if isinstance(self, sd.AlterSpecialObjectField): s_t = self.clone(alter_cmd.classname) else: s_t = type(self)(classname=alter_cmd.classname) orig_value = scls.get_explicit_field_value( schema, field_name, default=None) s_t.set_attribute_value( field_name, value, orig_value=orig_value, inherited=True, ) alter_cmd.add(s_t) self._propagate_ref_op(schema, context, scls, cb=_propagate)
Validate and propagate a field alteration to children. This method also performs consistency checks against base objects to ensure that the new value matches that of the parents.
_propagate_ref_field_alter_in_inheritance
python
geldata/gel
edb/schema/referencing.py
https://github.com/geldata/gel/blob/master/edb/schema/referencing.py
Apache-2.0
def validate_object( self, schema: s_schema.Schema, context: sd.CommandContext, ) -> None: """Check that property definition is sound.""" super().validate_object(schema, context) scls = self.scls if not scls.get_owned(schema): return if scls.is_special_pointer(schema): return if ( scls.is_link_property(schema) and not scls.is_pure_computable(schema) ): # link properties cannot be required or multi if self.get_attribute_value('required'): raise errors.InvalidPropertyDefinitionError( 'link properties cannot be required', span=self.span, ) if (self.get_attribute_value('cardinality') is qltypes.SchemaCardinality.Many): raise errors.InvalidPropertyDefinitionError( "multi properties aren't supported for links", span=self.span, ) target_type = scls.get_target(schema) if target_type is None: raise TypeError(f'missing target type in scls {scls}') if target_type.is_polymorphic(schema): span = self.get_attribute_span('target') raise errors.InvalidPropertyTargetError( f'invalid property type: ' f'{target_type.get_verbosename(schema)} ' f'is a generic type', span=span, ) if (target_type.is_object_type() or (isinstance(target_type, s_types.Collection) and target_type.contains_object(schema))): span = self.get_attribute_span('target') raise errors.InvalidPropertyTargetError( f'invalid property type: expected a scalar type, ' f'or a scalar collection, got ' f'{target_type.get_verbosename(schema)}', span=span, )
Check that property definition is sound.
validate_object
python
geldata/gel
edb/schema/properties.py
https://github.com/geldata/gel/blob/master/edb/schema/properties.py
Apache-2.0
def parse(self) -> qlast_.Expr: """Parse the expression text into an AST. Cached.""" if self._qlast is None: self._qlast = qlparser.parse_fragment( self.text, filename=f'<{self.origin}>' if self.origin else "") return self._qlast
Parse the expression text into an AST. Cached.
parse
python
geldata/gel
edb/schema/expr.py
https://github.com/geldata/gel/blob/master/edb/schema/expr.py
Apache-2.0
def set_origin(self, id: uuid.UUID, field: str) -> None: """ Set the origin of the expression based on field and enclosing object. We base the origin on the id of the object, not on its name, because these strings should be useful to a client, which can't do a lookup based on the mangled internal names. """ self.origin = f'{id} {field}'
Set the origin of the expression based on field and enclosing object. We base the origin on the id of the object, not on its name, because these strings should be useful to a client, which can't do a lookup based on the mangled internal names.
set_origin
python
geldata/gel
edb/schema/expr.py
https://github.com/geldata/gel/blob/master/edb/schema/expr.py
Apache-2.0
def ir_statement(self) -> irast_.Statement: """Assert this expr is a compiled EdgeQL statement and return its IR""" from edb.ir import ast as irast_ if not self.is_compiled(): raise AssertionError('expected a compiled expression') ir = self.irast if not isinstance(ir, irast_.Statement): raise AssertionError( 'expected the result of an expression to be a Statement') return ir
Assert this expr is a compiled EdgeQL statement and return its IR
ir_statement
python
geldata/gel
edb/schema/expr.py
https://github.com/geldata/gel/blob/master/edb/schema/expr.py
Apache-2.0
def compare_values( cls: Type[ExpressionList], ours: Optional[ExpressionList], theirs: Optional[ExpressionList], *, our_schema: s_schema.Schema, their_schema: s_schema.Schema, context: so.ComparisonContext, compcoef: float, ) -> float: """See the comment in Object.compare_values""" if not ours and not theirs: basecoef = 1.0 elif (not ours or not theirs) or (len(ours) != len(theirs)): basecoef = 0.2 else: similarity = [] for expr1, expr2 in zip(ours, theirs): similarity.append( Expression.compare_values( expr1, expr2, our_schema=our_schema, their_schema=their_schema, context=context, compcoef=compcoef)) basecoef = sum(similarity) / len(similarity) return basecoef + (1 - basecoef) * compcoef
See the comment in Object.compare_values
compare_values
python
geldata/gel
edb/schema/expr.py
https://github.com/geldata/gel/blob/master/edb/schema/expr.py
Apache-2.0
def compare_values( cls: Type[ExpressionDict], ours: Optional[ExpressionDict], theirs: Optional[ExpressionDict], *, our_schema: s_schema.Schema, their_schema: s_schema.Schema, context: so.ComparisonContext, compcoef: float, ) -> float: """See the comment in Object.compare_values""" if not ours and not theirs: basecoef = 1.0 elif (not ours or not theirs) or (len(ours) != len(theirs)): basecoef = 0.2 elif set(ours.keys()) != set(theirs.keys()): # Same length dicts can still have different keys, which is # similar to having mismatched length. basecoef = 0.2 else: # We have the same keys, so just compare the values. similarity = [] for ((_, expr1), (_, expr2)) in zip( sorted(ours.items()), sorted(theirs.items()) ): similarity.append( Expression.compare_values( expr1, expr2, our_schema=our_schema, their_schema=their_schema, context=context, compcoef=compcoef)) basecoef = sum(similarity) / len(similarity) return basecoef + (1 - basecoef) * compcoef
See the comment in Object.compare_values
compare_values
python
geldata/gel
edb/schema/expr.py
https://github.com/geldata/gel/blob/master/edb/schema/expr.py
Apache-2.0
def get_expr_referrers( schema: s_schema.Schema, obj: so.Object ) -> Dict[so.Object, List[str]]: """Return schema referrers with refs in expressions.""" refs: Dict[Tuple[Type[so.Object], str], FrozenSet[so.Object]] = ( schema.get_referrers_ex(obj)) result: Dict[so.Object, List[str]] = {} for (mcls, fn), referrers in refs.items(): field = mcls.get_field(fn) if issubclass(field.type, (Expression, ExpressionList)): for ref in referrers: result.setdefault(ref, []).append(fn) return result
Return schema referrers with refs in expressions.
get_expr_referrers
python
geldata/gel
edb/schema/expr.py
https://github.com/geldata/gel/blob/master/edb/schema/expr.py
Apache-2.0
def schema_reduce(self) -> typing.Any: """Return a primitive representation of the object. The return value must consist of primitive Python objects. """ raise NotImplementedError
Return a primitive representation of the object. The return value must consist of primitive Python objects.
schema_reduce
python
geldata/gel
edb/schema/abc.py
https://github.com/geldata/gel/blob/master/edb/schema/abc.py
Apache-2.0
def schema_restore( cls, data: typing.Any, ) -> Reducible: """Restore object from data returned by *schema_reduce*.""" raise NotImplementedError
Restore object from data returned by *schema_reduce*.
schema_restore
python
geldata/gel
edb/schema/abc.py
https://github.com/geldata/gel/blob/master/edb/schema/abc.py
Apache-2.0
def find_object_param_overloads( self, schema: s_schema.Schema, *, span: Optional[parsing.Span] = None, ) -> Optional[Tuple[List[Function], int]]: """Find if this function overloads another in object parameter. If so, check the following rules: - in the signatures of functions, only the overloaded object parameter must differ, the number and the types of other parameters must be the same across all object-overloaded functions; - the names of arguments in object-overloaded functions must match. If there are object overloads, return a tuple containing the list of overloaded functions and the position of the overloaded parameter. """ params = self.get_params(schema) if not params.has_objects(schema): return None new_params = params.objects(schema) new_pt = tuple(p.get_type(schema) for p in new_params) diff_param = -1 overloads = [] sn = self.get_shortname(schema) for f in schema.get_functions(sn): if f == self: continue f_params = f.get_params(schema) if not f_params.has_objects(schema): continue ext_params = f_params.objects(schema) ext_pt = (p.get_type(schema) for p in ext_params) this_diff_param = -1 non_obj_param_diff = False multi_overload = False for i, (new_t, ext_t) in enumerate(zip(new_pt, ext_pt)): if new_t != ext_t: if new_t.is_object_type() and ext_t.is_object_type(): if ( this_diff_param != -1 or ( this_diff_param != -1 and diff_param != -1 and diff_param != this_diff_param ) or non_obj_param_diff ): multi_overload = True break else: this_diff_param = i else: non_obj_param_diff = True if this_diff_param != -1: multi_overload = True break if this_diff_param != -1: if not multi_overload: multi_overload = len(new_params) != len(ext_params) if multi_overload: # Multiple dispatch of object-taking functions is # not supported. my_sig = self.get_signature_as_str(schema) other_sig = f.get_signature_as_str(schema) raise errors.UnsupportedFeatureError( f'cannot create the `{my_sig}` function: ' f'overloading an object type-receiving ' f'function with differences in the remaining ' f'parameters is not supported', span=span, details=( f"Other function is defined as `{other_sig}`" ) ) if not all( new_p.get_parameter_name(schema) == ext_p.get_parameter_name(schema) for new_p, ext_p in zip(new_params, ext_params) ): # And also _all_ parameter names must match due to # current implementation constraints. my_sig = self.get_signature_as_str(schema) other_sig = f.get_signature_as_str(schema) raise errors.UnsupportedFeatureError( f'cannot create the `{my_sig}` ' f'function: overloading an object type-receiving ' f'function with differences in the names of ' f'parameters is not supported', span=span, details=( f"Other function is defined as `{other_sig}`" ) ) if not all( new_p.get_typemod(schema) == ext_p.get_typemod(schema) for new_p, ext_p in zip(new_params, ext_params) ): # And also _all_ parameter names must match due to # current implementation constraints. my_sig = self.get_signature_as_str(schema) other_sig = f.get_signature_as_str(schema) raise errors.UnsupportedFeatureError( f'cannot create the `{my_sig}` ' f'function: overloading an object type-receiving ' f'function with differences in the type modifiers of ' f'parameters is not supported', span=span, details=( f"Other function is defined as `{other_sig}`" ) ) if ( new_params[this_diff_param].get_typemod(schema) != ft.TypeModifier.SingletonType ): my_sig = self.get_signature_as_str(schema) raise errors.UnsupportedFeatureError( f'cannot create the `{my_sig}` function: ' f'object type-receiving ' f'functions may not be overloaded on an OPTIONAL ' f'parameter', span=span, ) diff_param = this_diff_param overloads.append(f) if diff_param == -1: return None else: return (overloads, diff_param)
Find if this function overloads another in object parameter. If so, check the following rules: - in the signatures of functions, only the overloaded object parameter must differ, the number and the types of other parameters must be the same across all object-overloaded functions; - the names of arguments in object-overloaded functions must match. If there are object overloads, return a tuple containing the list of overloaded functions and the position of the overloaded parameter.
find_object_param_overloads
python
geldata/gel
edb/schema/functions.py
https://github.com/geldata/gel/blob/master/edb/schema/functions.py
Apache-2.0
def compile_function_inline( schema: s_schema.Schema, context: sd.CommandContext, *, body: s_expr.Expression, func_name: sn.QualName, params: FuncParameterList, language: qlast.Language, return_type: s_types.Type, return_typemod: ft.TypeModifier, track_schema_ref_exprs: bool=False, inlining_context: qlcontext.ContextLevel, ) -> irast.Set: """Compile a function body to be inlined.""" assert language is qlast.Language.EdgeQL from edb.edgeql.compiler import dispatch from edb.edgeql.compiler import pathctx from edb.edgeql.compiler import setgen from edb.edgeql.compiler import stmtctx ctx = stmtctx.init_context( schema=schema, options=get_compiler_options( schema, context, func_name=func_name, params=params, track_schema_ref_exprs=track_schema_ref_exprs, inlining_context=inlining_context, ), inlining_context=inlining_context, ) ql_expr = body.parse() # Wrap argument paths param_names: set[str] = { param.get_parameter_name(inlining_context.env.schema) for param in params.objects(inlining_context.env.schema) } argument_path_wrapper = ArgumentPathWrapper(param_names) ql_expr = argument_path_wrapper.visit(ql_expr) # Add implicit limit if present if ctx.implicit_limit: ql_expr = qlast.SelectQuery(result=ql_expr, implicit=True) ql_expr.limit = qlast.Constant.integer(ctx.implicit_limit) ir_set: irast.Set = dispatch.compile(ql_expr, ctx=ctx) # Copy schema back to inlining context if inlining_context: inlining_context.env.schema = ctx.env.schema # Create scoped set if necessary if pathctx.get_set_scope(ir_set, ctx=ctx) is None: ir_set = setgen.scoped_set(ir_set, ctx=ctx) return ir_set
Compile a function body to be inlined.
compile_function_inline
python
geldata/gel
edb/schema/functions.py
https://github.com/geldata/gel/blob/master/edb/schema/functions.py
Apache-2.0
def linearize_delta( delta: sd.DeltaRoot, old_schema: Optional[s_schema.Schema], new_schema: s_schema.Schema, ) -> sd.DeltaRoot: """Reorder the *delta* tree in-place to satisfy command dependency order. Args: delta: Input delta command tree. old_schema: Schema used to resolve original object state. new_schema: Schema used to resolve final schema state. Returns: Input delta tree reordered according to topological ordering of commands. """ # We take the scatter-sort-gather approach here, where the original # tree is broken up into linear branches, which are then sorted # and reassembled back into a tree. # A map of commands to root->command paths through the tree. # Nodes are duplicated so the interior nodes of the path are # distinct. opmap: Dict[sd.Command, List[sd.Command]] = {} strongrefs: Dict[sn.Name, sn.Name] = {} for op in _get_sorted_subcommands(delta): _break_down(opmap, strongrefs, [delta, op]) depgraph: DepGraph = {} renames: Dict[sn.Name, sn.Name] = {} renames_r: Dict[sn.Name, sn.Name] = {} deletions: Set[sn.Name] = set() for op in opmap: if isinstance(op, sd.RenameObject): renames[op.classname] = op.new_name renames_r[op.new_name] = op.classname elif isinstance(op, sd.DeleteObject): deletions.add(op.classname) for op, opbranch in opmap.items(): if isinstance(op, sd.AlterObject) and not op.get_subcommands(): continue _trace_op(op, opbranch, depgraph, renames, renames_r, strongrefs, old_schema, new_schema) depgraph = dict(filter(lambda i: i[1].item != (), depgraph.items())) everything = set(depgraph) for item in depgraph.values(): item.deps &= everything item.weak_deps &= everything try: sortedlist = [i[1] for i in topological.sort_ex(depgraph)] except topological.CycleError as ex: cycle = [depgraph[k].item for k in (ex.item,) + ex.path + (ex.item,)] messages = [ ' ' + nodes[-1].get_friendly_description(parent_op=nodes[-2]) for nodes in cycle ] raise errors.SchemaDefinitionError( 'cannot produce migration because of a dependency cycle:\n' + ' depends on\n'.join(messages) ) from None reconstructed = reconstruct_tree(sortedlist, depgraph) delta.replace_all(reconstructed.get_subcommands()) return delta
Reorder the *delta* tree in-place to satisfy command dependency order. Args: delta: Input delta command tree. old_schema: Schema used to resolve original object state. new_schema: Schema used to resolve final schema state. Returns: Input delta tree reordered according to topological ordering of commands.
linearize_delta
python
geldata/gel
edb/schema/ordering.py
https://github.com/geldata/gel/blob/master/edb/schema/ordering.py
Apache-2.0
def ok_to_attach_to( op_to_attach: sd.Command, op_to_attach_to: sd.ObjectCommand[so.Object], only_if_confident: bool = False, ) -> bool: """Determine if a given command can be attached to another. Returns True, if *op_to_attach* can be attached to *op_to_attach_to* without violating the dependency order. """ if only_if_confident and isinstance(op_to_attach, sd.ObjectCommand): # Avoid reattaching the subcommand if confidence is below 100%, # so that granular prompts can be generated. confidence = op_to_attach.get_annotation('confidence') if confidence is not None and confidence < 1.0: return False tgt_offset = offsets[op_to_attach_to] tgt_offset_len = len(tgt_offset) deps = dependencies[op_to_attach] return all(offsets[dep][:tgt_offset_len] <= tgt_offset for dep in deps)
Determine if a given command can be attached to another. Returns True, if *op_to_attach* can be attached to *op_to_attach_to* without violating the dependency order.
reconstruct_tree.ok_to_attach_to
python
geldata/gel
edb/schema/ordering.py
https://github.com/geldata/gel/blob/master/edb/schema/ordering.py
Apache-2.0
def attach( opbranch: Tuple[sd.Command, ...], new_parent: sd.Command, slice_start: int = 1, as_implicit: bool = False, ) -> None: """Attach a portion of a given command branch to another parent. Args: opbranch: Command branch to attach to *new_parent*. new_parent: Command node to attach the specified portion of *opbranch* to. slice_start: Offset into *opbranch* that determines which commands get attached. as_implicit: If True, the command branch is considered to be implicit, i.e. it is not recorded in the command index. """ parent = opbranch[slice_start] op = opbranch[-1] offset_within_parent = new_parent.get_nonattr_subcommand_count() if not isinstance(new_parent, sd.DeltaRoot): parent_offset = offsets[new_parent] + (offset_within_parent,) else: parent_offset = (offset_within_parent,) old_parent = parents[parent] old_parent.discard(parent) new_parent.add_caused(parent) parents[parent] = new_parent for i in range(slice_start, len(opbranch)): op = opbranch[i] if isinstance(op, sd.ObjectCommand): ancestor_key = (type(op), op.classname, as_implicit) opindex[ancestor_key] = op if op in offsets: op_offset = offsets[op][slice_start:] else: op_offset = (0,) * (i - slice_start) offsets[op] = parent_offset + op_offset
Attach a portion of a given command branch to another parent. Args: opbranch: Command branch to attach to *new_parent*. new_parent: Command node to attach the specified portion of *opbranch* to. slice_start: Offset into *opbranch* that determines which commands get attached. as_implicit: If True, the command branch is considered to be implicit, i.e. it is not recorded in the command index.
reconstruct_tree.attach
python
geldata/gel
edb/schema/ordering.py
https://github.com/geldata/gel/blob/master/edb/schema/ordering.py
Apache-2.0
def maybe_replace_preceding( op: sd.Command, ) -> bool: """Possibly merge and replace an earlier command with *op*. If *op* is a DELETE command, or an ALTER command that has no subcommands, and there is an earlier ALTER command operating on the same object as *op*, merge that command into *op* and replace it with *op*. Returns: True if merge and replace happened, False otherwise. """ if not ( isinstance(op, sd.DeleteObject) or ( isinstance(op, sd.AlterObject) and op.get_nonattr_subcommand_count() == 0 ) ): return False alter_cmd_cls = sd.get_object_command_class( sd.AlterObject, op.get_schema_metaclass()) if alter_cmd_cls is None: # ALTER isn't even defined for this object class, bail. return False alter_key = ((alter_cmd_cls), op.classname, False) alter_op = opindex.get(alter_key) if alter_op is None: # No preceding ALTER, bail. return False if ( not ok_to_attach_to(op, alter_op) or ( isinstance(parents[op], sd.DeltaRoot) != isinstance(parents[alter_op], sd.DeltaRoot) ) or bool(alter_op.get_subcommands(type=sd.RenameObject)) ): return False for alter_sub in reversed(alter_op.get_prerequisites()): op.prepend_prerequisite(alter_sub) parents[alter_sub] = op for alter_sub in reversed( alter_op.get_subcommands(include_prerequisites=False) ): op.prepend(alter_sub) parents[alter_sub] = op attached_root = parents[alter_op] attached_root.replace(alter_op, op) opindex[alter_key] = op opindex[type(op), op.classname, False] = op offsets[op] = offsets[alter_op] parents[op] = attached_root return True
Possibly merge and replace an earlier command with *op*. If *op* is a DELETE command, or an ALTER command that has no subcommands, and there is an earlier ALTER command operating on the same object as *op*, merge that command into *op* and replace it with *op*. Returns: True if merge and replace happened, False otherwise.
reconstruct_tree.maybe_replace_preceding
python
geldata/gel
edb/schema/ordering.py
https://github.com/geldata/gel/blob/master/edb/schema/ordering.py
Apache-2.0
def maybe_attach_to_preceding( opbranch: Tuple[sd.Command, ...], parent_candidates: List[sn.Name], allowed_op_types: List[Type[sd.ObjectCommand[so.Object]]], as_implicit: bool = False, slice_start: int = 1, ) -> bool: """Find a parent and attach a given portion of command branch to it. Args: opbranch: Command branch to consider. parent_candidates: A list of parent object names to consider when looking for a parent command. allowed_op_types: A list of command types to consider when looking for a parent command. as_implicit: If True, the command branch is considered to be implicit, i.e. it is not recorded in the command index. slice_start: Offset into *opbranch* that determines which commands get attached. """ for candidate in parent_candidates: for op_type in allowed_op_types: parent_op = opindex.get((op_type, candidate, False)) # implicit ops are allowed to attach to other implicit # ops. (Since we want them to chain properly in # inheritance order.) if parent_op is None and as_implicit: parent_op = opindex.get((op_type, candidate, True)) if ( parent_op is not None and ok_to_attach_to( op, parent_op, only_if_confident=not as_implicit, ) ): attach( opbranch, parent_op, as_implicit=as_implicit, slice_start=slice_start, ) return True return False
Find a parent and attach a given portion of command branch to it. Args: opbranch: Command branch to consider. parent_candidates: A list of parent object names to consider when looking for a parent command. allowed_op_types: A list of command types to consider when looking for a parent command. as_implicit: If True, the command branch is considered to be implicit, i.e. it is not recorded in the command index. slice_start: Offset into *opbranch* that determines which commands get attached.
reconstruct_tree.maybe_attach_to_preceding
python
geldata/gel
edb/schema/ordering.py
https://github.com/geldata/gel/blob/master/edb/schema/ordering.py
Apache-2.0
def reconstruct_tree( sortedlist: List[DepGraphEntry], depgraph: DepGraph, ) -> sd.DeltaRoot: result = sd.DeltaRoot() # Child to parent mapping. parents: Dict[sd.Command, sd.Command] = {} # A mapping of commands to their dependencies. dependencies: Dict[sd.Command, Set[sd.Command]] = ( collections.defaultdict(set)) # Current address of command within a tree in the form of # a tuple of indexes where each index represents relative # position within the tree rank. offsets: Dict[sd.Command, Tuple[int, ...]] = {} # Object commands indexed by command type and object name and # implicitness, where each entry represents the latest seen # command of the type for a particular object. Implicit commands # are included, but can only be attached to by other implicit # commands. opindex: Dict[ Tuple[Type[sd.ObjectCommand[so.Object]], sn.Name, bool], sd.ObjectCommand[so.Object] ] = {} def ok_to_attach_to( op_to_attach: sd.Command, op_to_attach_to: sd.ObjectCommand[so.Object], only_if_confident: bool = False, ) -> bool: """Determine if a given command can be attached to another. Returns True, if *op_to_attach* can be attached to *op_to_attach_to* without violating the dependency order. """ if only_if_confident and isinstance(op_to_attach, sd.ObjectCommand): # Avoid reattaching the subcommand if confidence is below 100%, # so that granular prompts can be generated. confidence = op_to_attach.get_annotation('confidence') if confidence is not None and confidence < 1.0: return False tgt_offset = offsets[op_to_attach_to] tgt_offset_len = len(tgt_offset) deps = dependencies[op_to_attach] return all(offsets[dep][:tgt_offset_len] <= tgt_offset for dep in deps) def attach( opbranch: Tuple[sd.Command, ...], new_parent: sd.Command, slice_start: int = 1, as_implicit: bool = False, ) -> None: """Attach a portion of a given command branch to another parent. Args: opbranch: Command branch to attach to *new_parent*. new_parent: Command node to attach the specified portion of *opbranch* to. slice_start: Offset into *opbranch* that determines which commands get attached. as_implicit: If True, the command branch is considered to be implicit, i.e. it is not recorded in the command index. """ parent = opbranch[slice_start] op = opbranch[-1] offset_within_parent = new_parent.get_nonattr_subcommand_count() if not isinstance(new_parent, sd.DeltaRoot): parent_offset = offsets[new_parent] + (offset_within_parent,) else: parent_offset = (offset_within_parent,) old_parent = parents[parent] old_parent.discard(parent) new_parent.add_caused(parent) parents[parent] = new_parent for i in range(slice_start, len(opbranch)): op = opbranch[i] if isinstance(op, sd.ObjectCommand): ancestor_key = (type(op), op.classname, as_implicit) opindex[ancestor_key] = op if op in offsets: op_offset = offsets[op][slice_start:] else: op_offset = (0,) * (i - slice_start) offsets[op] = parent_offset + op_offset def maybe_replace_preceding( op: sd.Command, ) -> bool: """Possibly merge and replace an earlier command with *op*. If *op* is a DELETE command, or an ALTER command that has no subcommands, and there is an earlier ALTER command operating on the same object as *op*, merge that command into *op* and replace it with *op*. Returns: True if merge and replace happened, False otherwise. """ if not ( isinstance(op, sd.DeleteObject) or ( isinstance(op, sd.AlterObject) and op.get_nonattr_subcommand_count() == 0 ) ): return False alter_cmd_cls = sd.get_object_command_class( sd.AlterObject, op.get_schema_metaclass()) if alter_cmd_cls is None: # ALTER isn't even defined for this object class, bail. return False alter_key = ((alter_cmd_cls), op.classname, False) alter_op = opindex.get(alter_key) if alter_op is None: # No preceding ALTER, bail. return False if ( not ok_to_attach_to(op, alter_op) or ( isinstance(parents[op], sd.DeltaRoot) != isinstance(parents[alter_op], sd.DeltaRoot) ) or bool(alter_op.get_subcommands(type=sd.RenameObject)) ): return False for alter_sub in reversed(alter_op.get_prerequisites()): op.prepend_prerequisite(alter_sub) parents[alter_sub] = op for alter_sub in reversed( alter_op.get_subcommands(include_prerequisites=False) ): op.prepend(alter_sub) parents[alter_sub] = op attached_root = parents[alter_op] attached_root.replace(alter_op, op) opindex[alter_key] = op opindex[type(op), op.classname, False] = op offsets[op] = offsets[alter_op] parents[op] = attached_root return True def maybe_attach_to_preceding( opbranch: Tuple[sd.Command, ...], parent_candidates: List[sn.Name], allowed_op_types: List[Type[sd.ObjectCommand[so.Object]]], as_implicit: bool = False, slice_start: int = 1, ) -> bool: """Find a parent and attach a given portion of command branch to it. Args: opbranch: Command branch to consider. parent_candidates: A list of parent object names to consider when looking for a parent command. allowed_op_types: A list of command types to consider when looking for a parent command. as_implicit: If True, the command branch is considered to be implicit, i.e. it is not recorded in the command index. slice_start: Offset into *opbranch* that determines which commands get attached. """ for candidate in parent_candidates: for op_type in allowed_op_types: parent_op = opindex.get((op_type, candidate, False)) # implicit ops are allowed to attach to other implicit # ops. (Since we want them to chain properly in # inheritance order.) if parent_op is None and as_implicit: parent_op = opindex.get((op_type, candidate, True)) if ( parent_op is not None and ok_to_attach_to( op, parent_op, only_if_confident=not as_implicit, ) ): attach( opbranch, parent_op, as_implicit=as_implicit, slice_start=slice_start, ) return True return False # First, build parents and dependencies maps. for info in sortedlist: opbranch = info.item op = opbranch[-1] for j, pop in enumerate(opbranch[1:]): parents[pop] = opbranch[j] for dep in info.deps: dep_item = depgraph[dep] dep_stack = dep_item.item dep_op = dep_stack[-1] dependencies[op].add(dep_op) for info in sortedlist: opbranch = info.item op = opbranch[-1] # Elide empty ALTER statements from output. if isinstance(op, sd.AlterObject) and not op.get_subcommands(): continue # If applicable, replace a preceding command with this op. if maybe_replace_preceding(op): continue if ( isinstance(op, sd.ObjectCommand) and not isinstance(op, sd.CreateObject) and info.extra is not None and info.extra.implicit_ancestors ): # This command is deemed to be an implicit effect of another # command, such as when alteration is propagated through the # inheritance chain. If so, find a command that operates on # a parent object and attach this branch to it. allowed_ops = [type(op)] if isinstance(op, sd.DeleteObject): allowed_ops.append(op.get_other_command_class(sd.DeleteObject)) if maybe_attach_to_preceding( opbranch, info.extra.implicit_ancestors, allowed_ops, as_implicit=True, ): continue # Walking the branch toward root, see if there's a matching # branch prefix we could attach to. for depth, ancestor_op in enumerate(reversed(opbranch[1:-1])): assert isinstance(ancestor_op, sd.ObjectCommand) allowed_ops = [] create_cmd_t = ancestor_op.get_other_command_class(sd.CreateObject) if type(ancestor_op) is not create_cmd_t: allowed_ops.append(create_cmd_t) allowed_ops.append(type(ancestor_op)) if maybe_attach_to_preceding( opbranch, [ancestor_op.classname], allowed_ops, slice_start=len(opbranch) - (depth + 1), ): break else: # No branches to attach to, so attach to root. attach(opbranch, result) return result
Determine if a given command can be attached to another. Returns True, if *op_to_attach* can be attached to *op_to_attach_to* without violating the dependency order.
reconstruct_tree
python
geldata/gel
edb/schema/ordering.py
https://github.com/geldata/gel/blob/master/edb/schema/ordering.py
Apache-2.0
def delta_schemas( schema_a: Optional[s_schema.Schema], schema_b: s_schema.Schema, *, included_modules: Optional[Iterable[sn.Name]]=None, excluded_modules: Optional[Iterable[sn.Name]]=None, included_items: Optional[Iterable[sn.Name]]=None, excluded_items: Optional[Iterable[sn.Name]]=None, schema_a_filters: Iterable[ Callable[[s_schema.Schema, so.Object], bool] ] = (), schema_b_filters: Iterable[ Callable[[s_schema.Schema, so.Object], bool] ] = (), include_module_diff: bool=True, include_std_diff: bool=False, include_derived_types: bool=True, include_extensions: bool=False, linearize_delta: bool=True, descriptive_mode: bool=False, generate_prompts: bool=False, guidance: Optional[so.DeltaGuidance]=None, ) -> sd.DeltaRoot: """Return difference between *schema_a* and *schema_b*. The returned object is a delta tree that, when applied to *schema_a* results in *schema_b*. Args: schema_a: Schema to use as a starting state. If ``None``, then a schema with only standard modules is assumed, unless *include_std_diff* is ``True``, in which case an entirely empty schema is assumed as a starting point. schema_b: Schema to use as the ending state. included_modules: Optional list of modules to include in the delta. excluded_modules: Optional list of modules to exlude from the delta. Takes precedence over *included_modules*. NOTE: standard library modules are always excluded, unless *include_std_diff* is ``True``. included_items: Optional list of names of objects to include in the delta. excluded_items: Optional list of names of objects to exclude from the delta. Takes precedence over *included_items*. schema_a_filters: Optional list of additional filters to place on *schema_a*. schema_b_filters: Optional list of additional filters to place on *schema_b*. include_module_diff: Whether to include create/drop module operations in the delta diff. include_std_diff: Whether to include the standard library in the diff. include_derived_types: Whether to include derived types, like unions, in the diff. linearize_delta: Whether the resulting diff should be properly ordered using the dependencies between objects. descriptive_mode: DESCRIBE AS TEXT mode. generate_prompts: Whether to generate prompts that can be used in DESCRIBE MIGRATION. guidance: Optional explicit guidance to schema diff. Returns: A :class:`schema.delta.DeltaRoot` instances representing the delta between *schema_a* and *schema_b*. """ result = sd.DeltaRoot() schema_a_filters = list(schema_a_filters) schema_b_filters = list(schema_b_filters) context = so.ComparisonContext( generate_prompts=generate_prompts, descriptive_mode=descriptive_mode, guidance=guidance, ) if schema_a is None: if include_std_diff: schema_a = s_schema.EMPTY_SCHEMA else: schema_a = schema_b def _filter(schema: s_schema.Schema, obj: so.Object) -> bool: return ( ( isinstance(obj, so.QualifiedObject) and ( obj.get_name(schema).get_module_name() in s_schema.STD_MODULES ) ) or ( isinstance(obj, s_mod.Module) and obj.get_name(schema) in s_schema.STD_MODULES ) ) schema_a_filters.append(_filter) my_modules = { m.get_name(schema_b) for m in schema_b.get_objects( type=s_mod.Module, extra_filters=schema_b_filters, ) } other_modules = { m.get_name(schema_a) for m in schema_a.get_objects( type=s_mod.Module, extra_filters=schema_a_filters, ) } added_modules = my_modules - other_modules dropped_modules = other_modules - my_modules if included_modules is not None: included_modules = set(included_modules) added_modules &= included_modules dropped_modules &= included_modules else: included_modules = set() if excluded_modules is None: excluded_modules = set() else: excluded_modules = set(excluded_modules) if not include_std_diff: excluded_modules.update(s_schema.STD_MODULES) def _filter(schema: s_schema.Schema, obj: so.Object) -> bool: return not obj.get_builtin(schema) schema_a_filters.append(_filter) schema_b_filters.append(_filter) # In theory, __derived__ is ephemeral and should not need to be # included. In practice, unions created by computed links put # persistent things into __derived__ and need to be included in # diffs. # TODO: Fix this. if not include_derived_types: excluded_modules.add(sn.UnqualName('__derived__')) excluded_modules.add(sn.UnqualName('__ext_casts__')) excluded_modules.add(sn.UnqualName('__ext_index_matches__')) # Don't analyze the objects from extensions. if not include_extensions and isinstance(schema_b, s_schema.ChainedSchema): ext_packages = schema_b._global_schema.get_objects( type=s_ext.ExtensionPackage) ext_mods = set() for pkg in ext_packages: if not (modname := pkg.get_ext_module(schema_b)): continue if schema_a and schema_a.get_referrers(pkg): ext_mods.add(sn.UnqualName(modname)) if schema_b.get_referrers(pkg): ext_mods.add(sn.UnqualName(modname)) for ext_mod in ext_mods: if ext_mod not in included_modules: excluded_modules.add(ext_mod) if excluded_modules: added_modules -= excluded_modules dropped_modules -= excluded_modules if included_items is not None: included_items = set(included_items) if excluded_items is not None: excluded_items = set(excluded_items) if include_module_diff: for added_module in sorted(added_modules): if ( guidance is None or ( (s_mod.Module, added_module) not in guidance.banned_creations ) ): mod = schema_b.get_global(s_mod.Module, added_module, None) assert mod is not None create = mod.as_create_delta( schema=schema_b, context=context, ) assert isinstance(create, sd.CreateObject) create.if_not_exists = True # We currently fully assume that modules are created # or deleted and never renamed. This is fine, because module # objects are never actually referenced directly, only by # the virtue of being the leading part of a fully-qualified # name. create.set_annotation('confidence', 1.0) result.add(create) excluded_classes = ( so.GlobalObject, s_mod.Module, s_func.Parameter, s_pseudo.PseudoType, s_migr.Migration, ) schemaclasses = [ schemacls for schemacls in so.ObjectMeta.get_schema_metaclasses() if ( not issubclass(schemacls, excluded_classes) and not schemacls.is_abstract() ) ] assert not context.renames # We retry performing the diff until we stop finding new renames # and deletions. This allows us to be agnostic to the order that # we process schemaclasses. old_count = -1, -1 while old_count != (len(context.renames), len(context.deletions)): old_count = len(context.renames), len(context.deletions) objects = sd.DeltaRoot() for sclass in schemaclasses: filters: List[Callable[[s_schema.Schema, so.Object], bool]] = [] if not issubclass(sclass, so.QualifiedObject): # UnqualifiedObjects (like anonymous tuples and arrays) # should not use an included_modules filter. incl_modules = None else: if issubclass(sclass, so.DerivableObject): def _only_generic( schema: s_schema.Schema, obj: so.Object, ) -> bool: assert isinstance(obj, so.DerivableObject) return obj.is_non_concrete(schema) or ( isinstance(obj, s_types.Type) and obj.get_from_global(schema) ) filters.append(_only_generic) incl_modules = included_modules new = schema_b.get_objects( type=sclass, included_modules=incl_modules, excluded_modules=excluded_modules, included_items=included_items, excluded_items=excluded_items, extra_filters=filters + schema_b_filters, ) old = schema_a.get_objects( type=sclass, included_modules=incl_modules, excluded_modules=excluded_modules, included_items=included_items, excluded_items=excluded_items, extra_filters=filters + schema_a_filters, ) objects.add( sd.delta_objects( old, new, sclass=sclass, old_schema=schema_a, new_schema=schema_b, context=context, ) ) # We don't propertly understand the dependencies on extensions, so # instead of having s_ordering sort them, we just put all # CreateExtension commands first and all DeleteExtension commands # last. create_exts: list[s_ext.CreateExtension] = [] delete_exts = [] for cmd in list(objects.get_subcommands()): if isinstance(cmd, s_ext.CreateExtension): cmd.canonical = False objects.discard(cmd) create_exts.append(cmd) elif isinstance(cmd, s_ext.DeleteExtension): cmd.canonical = False objects.discard(cmd) delete_exts.append(cmd) if linearize_delta: objects = s_ordering.linearize_delta( objects, old_schema=schema_a, new_schema=schema_b) if include_derived_types: result.add(objects) else: for cmd in objects.get_subcommands(): if isinstance(cmd, s_objtypes.ObjectTypeCommand): if isinstance(cmd, s_objtypes.DeleteObjectType): relevant_schema = schema_a else: relevant_schema = schema_b obj = cast(s_objtypes.ObjectType, relevant_schema.get(cmd.classname)) if obj.is_union_type(relevant_schema): continue result.add(cmd) if include_module_diff: # Process dropped modules in *reverse* sorted order, so that # `foo::bar` gets dropped before `foo`. for dropped_module in reversed(sorted(dropped_modules)): if ( guidance is None or ( (s_mod.Module, dropped_module) not in guidance.banned_deletions ) ): mod = schema_a.get_global(s_mod.Module, dropped_module, None) assert mod is not None dropped = mod.as_delete_delta( schema=schema_a, context=context, ) dropped.set_annotation('confidence', 1.0) result.add(dropped) create_exts_sorted = sd.sort_by_cross_refs_key( schema_b, create_exts, key=lambda x: x.scls) delete_exts_sorted = sd.sort_by_cross_refs_key( schema_a, delete_exts, key=lambda x: x.scls) for op in create_exts_sorted: result.prepend(op) result.update(delete_exts_sorted) return result
Return difference between *schema_a* and *schema_b*. The returned object is a delta tree that, when applied to *schema_a* results in *schema_b*. Args: schema_a: Schema to use as a starting state. If ``None``, then a schema with only standard modules is assumed, unless *include_std_diff* is ``True``, in which case an entirely empty schema is assumed as a starting point. schema_b: Schema to use as the ending state. included_modules: Optional list of modules to include in the delta. excluded_modules: Optional list of modules to exlude from the delta. Takes precedence over *included_modules*. NOTE: standard library modules are always excluded, unless *include_std_diff* is ``True``. included_items: Optional list of names of objects to include in the delta. excluded_items: Optional list of names of objects to exclude from the delta. Takes precedence over *included_items*. schema_a_filters: Optional list of additional filters to place on *schema_a*. schema_b_filters: Optional list of additional filters to place on *schema_b*. include_module_diff: Whether to include create/drop module operations in the delta diff. include_std_diff: Whether to include the standard library in the diff. include_derived_types: Whether to include derived types, like unions, in the diff. linearize_delta: Whether the resulting diff should be properly ordered using the dependencies between objects. descriptive_mode: DESCRIBE AS TEXT mode. generate_prompts: Whether to generate prompts that can be used in DESCRIBE MIGRATION. guidance: Optional explicit guidance to schema diff. Returns: A :class:`schema.delta.DeltaRoot` instances representing the delta between *schema_a* and *schema_b*.
delta_schemas
python
geldata/gel
edb/schema/ddl.py
https://github.com/geldata/gel/blob/master/edb/schema/ddl.py
Apache-2.0
def ddl_text_from_delta( schema_a: Optional[s_schema.Schema], schema_b: s_schema.Schema, delta: sd.DeltaRoot, *, include_ext_version: bool = True, ) -> str: """Return DDL text corresponding to a delta plan. Args: schema_a: The original schema (or None if starting from empty/std) schema_b: The schema to which the *delta* has **already** been applied. delta: The delta plan. Returns: DDL text corresponding to *delta*. """ return text_from_delta( schema_a, schema_b, delta, sdlmode=False, include_ext_version=include_ext_version, )
Return DDL text corresponding to a delta plan. Args: schema_a: The original schema (or None if starting from empty/std) schema_b: The schema to which the *delta* has **already** been applied. delta: The delta plan. Returns: DDL text corresponding to *delta*.
ddl_text_from_delta
python
geldata/gel
edb/schema/ddl.py
https://github.com/geldata/gel/blob/master/edb/schema/ddl.py
Apache-2.0
def sdl_text_from_delta( schema_a: Optional[s_schema.Schema], schema_b: s_schema.Schema, delta: sd.DeltaRoot, ) -> str: """Return SDL text corresponding to a delta plan. Args: schema_a: The original schema (or None if starting from empty/std) schema_b: The schema to which the *delta* has **already** been applied. delta: The delta plan. Returns: SDL text corresponding to *delta*. """ return text_from_delta(schema_a, schema_b, delta, sdlmode=True)
Return SDL text corresponding to a delta plan. Args: schema_a: The original schema (or None if starting from empty/std) schema_b: The schema to which the *delta* has **already** been applied. delta: The delta plan. Returns: SDL text corresponding to *delta*.
sdl_text_from_delta
python
geldata/gel
edb/schema/ddl.py
https://github.com/geldata/gel/blob/master/edb/schema/ddl.py
Apache-2.0
def descriptive_text_from_delta( schema_a: Optional[s_schema.Schema], schema_b: s_schema.Schema, delta: sd.DeltaRoot, *, limit_ref_classes: Iterable[so.ObjectMeta]=tuple(), ) -> str: """Return descriptive text corresponding to a delta plan. Args: schema_a: The original schema (or None if starting from empty/std) schema_b: The schema to which the *delta* has **already** been applied. delta: The delta plan. limit_ref_classes: If specified, limit the output of referenced objects to the specified classes. Returns: Descriptive text corresponding to *delta*. """ return text_from_delta( schema_a, schema_b, delta, sdlmode=True, descriptive_mode=True, limit_ref_classes=limit_ref_classes, )
Return descriptive text corresponding to a delta plan. Args: schema_a: The original schema (or None if starting from empty/std) schema_b: The schema to which the *delta* has **already** been applied. delta: The delta plan. limit_ref_classes: If specified, limit the output of referenced objects to the specified classes. Returns: Descriptive text corresponding to *delta*.
descriptive_text_from_delta
python
geldata/gel
edb/schema/ddl.py
https://github.com/geldata/gel/blob/master/edb/schema/ddl.py
Apache-2.0
def _search_with_getter( self, name: Union[str, sn.Name], *, getter: Callable[[FlatSchema, sn.Name], Any], default: Any, module_aliases: Optional[Mapping[Optional[str], str]], disallow_module: Optional[Callable[[str], bool]], ) -> Any: """ Find something in the schema with a given name. This function mostly mirrors edgeql.tracer.resolve_name except: - When searching in std, disallow some modules (often the base modules) - If no result found, return default """ if isinstance(name, str): name = sn.name_from_string(name) shortname = name.name module = name.module if isinstance(name, sn.QualName) else None orig_module = module if module == '__std__': fqname = sn.QualName('std', shortname) result = getter(self, fqname) if result is not None: return result else: return default # Apply module aliases current_module = ( module_aliases[None] if module_aliases and None in module_aliases else None ) is_current, module = apply_module_aliases( module, module_aliases, current_module, ) if is_current and current_module is None: return default no_std = is_current # Check if something matches the name if module is not None: fqname = sn.QualName(module, shortname) result = getter(self, fqname) if result is not None: return result # Try something in std if __current__ was not specified if not no_std: # If module == None, look in std if orig_module is None: mod_name = 'std' fqname = sn.QualName(mod_name, shortname) result = getter(self, fqname) if result is not None: return result # Ensure module is not a base module. # Then try the module as part of std. if module and not ( self.has_module(fmod := module.split('::')[0]) or (disallow_module and disallow_module(fmod)) ): mod_name = f'std::{module}' fqname = sn.QualName(mod_name, shortname) result = getter(self, fqname) if result is not None: return result return default
Find something in the schema with a given name. This function mostly mirrors edgeql.tracer.resolve_name except: - When searching in std, disallow some modules (often the base modules) - If no result found, return default
_search_with_getter
python
geldata/gel
edb/schema/schema.py
https://github.com/geldata/gel/blob/master/edb/schema/schema.py
Apache-2.0
def upgrade_schema(schema: FlatSchema) -> FlatSchema: """Repair a schema object serialized by an older patch version When an edgeql+schema patch adds fields to schema types, old serialized schemas will be broken, since their tuples are missing the fields. In this situation, we run through all the data tuples and fill them out. The upgraded version will then be cached. """ cls_fields = {} for py_cls in so.ObjectMeta.get_schema_metaclasses(): if isinstance(py_cls, adapter.Adapter): continue fields = py_cls._schema_fields.values() cls_fields[py_cls] = sorted(fields, key=lambda f: f.index) id_to_data = schema._id_to_data fixes = {} for id, typ_name in schema._id_to_type.items(): data = id_to_data[id] obj = so.Object.schema_restore((typ_name, id)) typ = type(obj) tfields = cls_fields[typ] exp_len = len(tfields) if len(data) < exp_len: ldata = list(data) for _ in range(len(ldata), exp_len): ldata.append(None) fixes[id] = tuple(ldata) return schema._replace(id_to_data=id_to_data.update(fixes))
Repair a schema object serialized by an older patch version When an edgeql+schema patch adds fields to schema types, old serialized schemas will be broken, since their tuples are missing the fields. In this situation, we run through all the data tuples and fill them out. The upgraded version will then be cached.
upgrade_schema
python
geldata/gel
edb/schema/schema.py
https://github.com/geldata/gel/blob/master/edb/schema/schema.py
Apache-2.0
def get_ordered_migrations( schema: s_schema.Schema, ) -> list[Migration]: '''Get all the migrations, in order. It would be nice if our toposort could do this for us, but toposort is implemented recursively, and it would be a pain to change that. ''' output = [] mig = schema.get_last_migration() while mig: output.append(mig) parents = mig.get_parents(schema).objects(schema) assert len(parents) <= 1, "only one parent supported currently" mig = parents[0] if parents else None output.reverse() return output
Get all the migrations, in order. It would be nice if our toposort could do this for us, but toposort is implemented recursively, and it would be a pain to change that.
get_ordered_migrations
python
geldata/gel
edb/schema/migrations.py
https://github.com/geldata/gel/blob/master/edb/schema/migrations.py
Apache-2.0
def merge_deferrability( idx: Index, bases: List[Index], field_name: str, *, ignore_local: bool = False, schema: s_schema.Schema, ) -> Optional[qltypes.IndexDeferrability]: """Merge function for abstract index deferrability.""" return utils.merge_reduce( idx, bases, field_name=field_name, ignore_local=ignore_local, schema=schema, f=_merge_deferrability, type=qltypes.IndexDeferrability, )
Merge function for abstract index deferrability.
merge_deferrability
python
geldata/gel
edb/schema/indexes.py
https://github.com/geldata/gel/blob/master/edb/schema/indexes.py
Apache-2.0
def merge_deferred( idx: Index, bases: List[Index], field_name: str, *, ignore_local: bool = False, schema: s_schema.Schema, ) -> Optional[bool]: """Merge function for the DEFERRED qualifier on indexes.""" if idx.is_non_concrete(schema): return None if bases: deferrability = next(iter(bases)).get_deferrability(schema) else: deferrability = qltypes.IndexDeferrability.Prohibited local_deferred = idx.get_explicit_local_field_value( schema, field_name, None) idx_repr = idx.get_verbosename(schema, with_parent=True) if not idx.is_defined_here(schema): ignore_local = True if ignore_local: return deferrability is qltypes.IndexDeferrability.Required elif local_deferred is None: # No explicit local declaration, derive from abstract index # deferrability. if deferrability is qltypes.IndexDeferrability.Required: raise errors.SchemaDefinitionError( f"{idx_repr} must be declared as deferred" ) else: return False else: if ( local_deferred and deferrability is qltypes.IndexDeferrability.Prohibited ): raise errors.SchemaDefinitionError( f"{idx_repr} cannot be declared as deferred" ) elif ( not local_deferred and deferrability is qltypes.IndexDeferrability.Required ): raise errors.SchemaDefinitionError( f"{idx_repr} must be declared as deferred" ) return local_deferred # type: ignore
Merge function for the DEFERRED qualifier on indexes.
merge_deferred
python
geldata/gel
edb/schema/indexes.py
https://github.com/geldata/gel/blob/master/edb/schema/indexes.py
Apache-2.0
def is_defined_here( self, schema: s_schema.Schema, ) -> bool: """ Returns True iff the index has not been inherited from a parent subject, and was originally defined on the subject. """ return all( base.get_abstract(schema) for base in self.get_bases(schema).objects(schema) )
Returns True iff the index has not been inherited from a parent subject, and was originally defined on the subject.
is_defined_here
python
geldata/gel
edb/schema/indexes.py
https://github.com/geldata/gel/blob/master/edb/schema/indexes.py
Apache-2.0
def ast_ignore_field_ownership(self, field: str) -> bool: """Whether to force generating an AST even though field isn't owned""" return field == "deferred"
Whether to force generating an AST even though field isn't owned
ast_ignore_field_ownership
python
geldata/gel
edb/schema/indexes.py
https://github.com/geldata/gel/blob/master/edb/schema/indexes.py
Apache-2.0
def get_effective_object_index( schema: s_schema.Schema, subject: IndexableSubject, base_idx_name: sn.QualName, span: Optional[parsing.Span] = None, ) -> tuple[Optional[Index], Sequence[Index]]: """ Returns the effective index of a subject and any overridden fs indexes """ indexes: so.ObjectIndexByFullname[Index] = subject.get_indexes(schema) base = schema.get(base_idx_name, type=Index, default=None) if base is None: # Abstract base index does not exist. return (None, ()) object_indexes = [ ind for ind in indexes.objects(schema) if ind.issubclass(schema, base) ] if len(object_indexes) == 0: return (None, ()) object_indexes_defined_here = [ ind for ind in object_indexes if ind.is_defined_here(schema) ] if len(object_indexes_defined_here) > 0: # indexes defined here have priority if len(object_indexes_defined_here) > 1: subject_name = subject.get_displayname(schema) raise errors.InvalidDefinitionError( f'multiple {base_idx_name} indexes defined for {subject_name}', span=span, ) effective = object_indexes_defined_here[0] overridden = [ i.get_implicit_bases(schema)[0] for i in object_indexes if i != effective ] else: # there are no object-scoped indexes defined on the subject # the inherited indexes take effect if len(object_indexes) > 1: subject_name = subject.get_displayname(schema) raise errors.InvalidDefinitionError( f'multiple {base_idx_name} indexes ' f'inherited for {subject_name}', span=span, ) effective = object_indexes[0] overridden = [] return (effective, overridden)
Returns the effective index of a subject and any overridden fs indexes
get_effective_object_index
python
geldata/gel
edb/schema/indexes.py
https://github.com/geldata/gel/blob/master/edb/schema/indexes.py
Apache-2.0
def _classify_object_field(field: s_obj.Field[Any]) -> FieldStorage: """Determine FieldStorage for a given schema class field.""" ftype = field.type shadow_ptr_kind = None shadow_ptr_type = None fieldtype = FieldType.OTHER is_array = is_multiprop = False if issubclass(ftype, s_obj.MultiPropSet): is_multiprop = True ftype = ftype.type elif ( issubclass( ftype, (checked.CheckedList, checked.FrozenCheckedList, checked.CheckedSet, checked.FrozenCheckedSet)) and not issubclass(ftype, s_expr.ExpressionList) ): is_array = True ftype = ftype.type # type: ignore if issubclass(ftype, s_obj.ObjectCollection): ptr_kind = 'multi link' ptr_type = 'schema::Object' if issubclass(ftype, s_obj.ObjectDict): fieldtype = FieldType.OBJ_DICT elif issubclass(ftype, s_obj.Object): ptr_kind = 'link' ptr_type = f'schema::{ftype.__name__}' elif issubclass(ftype, s_expr.Expression): shadow_ptr_kind = 'property' shadow_ptr_type = 'tuple<text: str, refs: array<uuid>>' ptr_kind = 'property' ptr_type = 'str' fieldtype = FieldType.EXPR elif issubclass(ftype, s_expr.ExpressionList): shadow_ptr_kind = 'property' shadow_ptr_type = ( 'array<tuple<text: str, refs: array<uuid>>>' ) ptr_kind = 'property' ptr_type = 'array<str>' fieldtype = FieldType.EXPR_LIST elif issubclass(ftype, s_expr.ExpressionDict): shadow_ptr_kind = 'property' shadow_ptr_type = '''array<tuple< name: str, expr: tuple<text: str, refs: array<uuid>> >>''' ptr_kind = 'property' ptr_type = 'array<tuple<name: str, expr: str>>' fieldtype = FieldType.EXPR_DICT elif issubclass(ftype, collections.abc.Mapping): ptr_kind = 'property' ptr_type = 'json' elif issubclass(ftype, (str, sn.Name)): ptr_kind = 'property' ptr_type = 'str' if field.name == 'name': # TODO: consider shadow-reflecting names as tuples shadow_ptr_kind = 'property' shadow_ptr_type = 'str' elif issubclass(ftype, bool): ptr_kind = 'property' ptr_type = 'bool' elif issubclass(ftype, int): ptr_kind = 'property' ptr_type = 'int64' elif issubclass(ftype, uuid.UUID): ptr_kind = 'property' ptr_type = 'uuid' elif issubclass(ftype, verutils.Version): ptr_kind = 'property' ptr_type = ''' tuple< major: std::int64, minor: std::int64, stage: sys::VersionStage, stage_no: std::int64, local: array<std::str>, > ''' else: raise RuntimeError( f'no metaschema reflection for field {field.name} of type {ftype}' ) if is_multiprop: ptr_kind = 'multi property' if is_array: ptr_type = f'array<{ptr_type}>' return FieldStorage( fieldtype=fieldtype, ptrkind=ptr_kind, ptrtype=ptr_type, shadow_ptrkind=shadow_ptr_kind, shadow_ptrtype=shadow_ptr_type, )
Determine FieldStorage for a given schema class field.
_classify_object_field
python
geldata/gel
edb/schema/reflection/structure.py
https://github.com/geldata/gel/blob/master/edb/schema/reflection/structure.py
Apache-2.0
def generate_structure( schema: s_schema.Schema, *, make_funcs: bool=True, ) -> SchemaReflectionParts: """Generate schema reflection structure from Python schema classes. Returns: A quadruple (as a SchemaReflectionParts instance) containing: - Delta, which, when applied to stdlib, yields an enhanced version of the `schema` module that contains all types and properties, not just those that are publicly exposed for introspection. - A mapping, containing type layout description for all schema classes. - A sequence of EdgeQL queries necessary to introspect a database schema. - A sequence of EdgeQL queries necessary to introspect global objects, such as roles and databases. """ delta = sd.DeltaRoot() classlayout: Dict[ Type[s_obj.Object], SchemaTypeLayout, ] = {} ordered_link = schema.get('schema::ordered', type=s_links.Link) if make_funcs: schema = _run_ddl( ''' CREATE FUNCTION sys::_get_pg_type_for_edgedb_type( typeid: std::uuid, kind: std::str, elemid: OPTIONAL std::uuid, sql_type: OPTIONAL std::str, ) -> std::int64 { USING SQL FUNCTION 'edgedb.get_pg_type_for_edgedb_type'; SET volatility := 'STABLE'; SET impl_is_strict := false; }; CREATE FUNCTION sys::_expr_from_json( data: json ) -> OPTIONAL tuple<text: str, refs: array<uuid>> { USING SQL $$ SELECT "data"->>'text' AS text, coalesce(r.refs, ARRAY[]::uuid[]) AS refs FROM (SELECT array_agg(v::uuid) AS refs FROM jsonb_array_elements_text("data"->'refs') AS v ) AS r WHERE jsonb_typeof("data") != 'null' $$; SET volatility := 'IMMUTABLE'; }; # A strictly-internal get config function that bypasses # the redaction of secrets in the public-facing one. CREATE FUNCTION cfg::_get_config_json_internal( NAMED ONLY sources: OPTIONAL array<std::str> = {}, NAMED ONLY max_source: OPTIONAL std::str = {} ) -> std::json { USING SQL $$ SELECT coalesce(jsonb_object_agg(cfg.name, cfg), '{}'::jsonb) FROM edgedb_VER._read_sys_config( sources::edgedb._sys_config_source_t[], max_source::edgedb._sys_config_source_t ) AS cfg $$; }; ''', schema=schema, delta=delta, ) py_classes = [] for py_cls in s_obj.ObjectMeta.get_schema_metaclasses(): if isinstance(py_cls, adapter.Adapter): continue if py_cls is s_obj.GlobalObject: continue py_classes.append(py_cls) read_sets: Dict[Type[s_obj.Object], List[str]] = {} for py_cls in py_classes: rschema_name = get_schema_name_for_pycls(py_cls) schema_objtype = schema.get( rschema_name, type=s_objtypes.ObjectType, default=None, ) bases = [] for base in py_cls.__bases__: if base in py_classes: bases.append(get_schema_name_for_pycls(base)) default_base = get_default_base_for_pycls(py_cls) if not bases and rschema_name != default_base: bases.append(default_base) reflection = py_cls.get_reflection_method() is_simple_wrapper = issubclass(py_cls, s_types.CollectionExprAlias) if schema_objtype is None: as_abstract = ( reflection is s_obj.ReflectionMethod.REGULAR and not is_simple_wrapper and ( py_cls is s_obj.InternalObject or not issubclass(py_cls, s_obj.InternalObject) ) and py_cls._abstract is not False ) schema = _run_ddl( f''' CREATE {'ABSTRACT' if as_abstract else ''} TYPE {rschema_name} EXTENDING {', '.join(str(b) for b in bases)}; ''', schema=schema, delta=delta, ) schema_objtype = schema.get( rschema_name, type=s_objtypes.ObjectType) else: ex_bases = schema_objtype.get_bases(schema).names(schema) _, added_bases = s_inh.delta_bases( ex_bases, bases, t=type(schema_objtype), ) if added_bases: for subset, position in added_bases: # XXX: Don't generate changes for just moving around the # order of types when the mismatch between python and # the schema, since it doesn't work anyway and causes mass # grief when trying to patch the schema. subset = [x for x in subset if x.name not in ex_bases] if not subset: continue if isinstance(position, tuple): position_clause = ( f'{position[0]} {position[1].name}' ) else: position_clause = position bases_expr = ', '.join(str(t.name) for t in subset) stmt = f''' ALTER TYPE {rschema_name} {{ EXTENDING {bases_expr} {position_clause} }} ''' schema = _run_ddl( stmt, schema=schema, delta=delta, ) if reflection is s_obj.ReflectionMethod.NONE: continue referrers = py_cls.get_referring_classes() if reflection is s_obj.ReflectionMethod.AS_LINK: if not referrers: raise RuntimeError( f'schema class {py_cls.__name__} is declared with AS_LINK ' f'reflection method but is not referenced in any RefDict' ) is_concrete = not schema_objtype.get_abstract(schema) if ( is_concrete and not is_simple_wrapper and any( not b.get_abstract(schema) for b in schema_objtype.get_ancestors(schema).objects(schema) ) ): raise RuntimeError( f'non-abstract {schema_objtype.get_verbosename(schema)} has ' f'non-abstract ancestors' ) read_shape = read_sets[py_cls] = [] if is_concrete: read_shape.append( '_tname := .__type__[IS schema::ObjectType].name' ) classlayout[py_cls] = {} ownfields = py_cls.get_ownfields() for fn, field in py_cls.get_fields().items(): sfn = field.sname if ( field.ephemeral or ( field.reflection_method is not s_obj.ReflectionMethod.REGULAR ) ): continue storage = _classify_object_field(field) ptr = schema_objtype.maybe_get_ptr(schema, sn.UnqualName(sfn)) if fn in ownfields: qual = "REQUIRED" if field.required else "OPTIONAL" otd = " { ON TARGET DELETE ALLOW }" if field.weak_ref else "" if ptr is None: schema = _run_ddl( f''' ALTER TYPE {rschema_name} {{ CREATE {qual} {storage.ptrkind} {sfn} -> {storage.ptrtype} {otd}; }} ''', schema=schema, delta=delta, ) ptr = schema_objtype.getptr(schema, sn.UnqualName(fn)) if storage.shadow_ptrkind is not None: pn = f'{sfn}__internal' internal_ptr = schema_objtype.maybe_get_ptr( schema, sn.UnqualName(pn)) if internal_ptr is None: ptrkind = storage.shadow_ptrkind ptrtype = storage.shadow_ptrtype schema = _run_ddl( f''' ALTER TYPE {rschema_name} {{ CREATE {qual} {ptrkind} {pn} -> {ptrtype}; }} ''', schema=schema, delta=delta, ) else: assert ptr is not None if is_concrete: read_ptr = sfn if field.type_is_generic_self: read_ptr = f'{read_ptr}[IS {rschema_name}]' if field.reflection_proxy: _proxy_type, proxy_link = field.reflection_proxy read_ptr = ( f'{read_ptr}: {{name, value := .{proxy_link}.id}}' ) if ptr.issubclass(schema, ordered_link): read_ptr = f'{read_ptr} ORDER BY @index' read_shape.append(read_ptr) if storage.shadow_ptrkind is not None: read_shape.append(f'{sfn}__internal') if field.reflection_proxy: proxy_type_name, proxy_link_name = field.reflection_proxy proxy_obj = schema.get( proxy_type_name, type=s_objtypes.ObjectType) proxy_link_obj = proxy_obj.getptr( schema, sn.UnqualName(proxy_link_name)) tgt = proxy_link_obj.get_target(schema) else: tgt = ptr.get_target(schema) assert tgt is not None cardinality = ptr.get_cardinality(schema) assert cardinality is not None classlayout[py_cls][sfn] = SchemaFieldDesc( fieldname=fn, schema_fieldname=sfn, type=tgt, cardinality=cardinality, properties={}, storage=storage, is_ordered=ptr.issubclass(schema, ordered_link), reflection_proxy=field.reflection_proxy, ) # Second pass: deal with RefDicts, which are reflected as links. for py_cls in py_classes: rschema_name = get_schema_name_for_pycls(py_cls) schema_cls = schema.get(rschema_name, type=s_objtypes.ObjectType) for refdict in py_cls.get_own_refdicts().values(): ref_ptr = schema_cls.maybe_get_ptr( schema, sn.UnqualName(refdict.attr)) ref_cls = refdict.ref_cls assert issubclass(ref_cls, s_obj.Object) shadow_ref_ptr = None reflect_as_link = ( ref_cls.get_reflection_method() is s_obj.ReflectionMethod.AS_LINK ) if reflect_as_link: reflection_link = ref_cls.get_reflection_link() assert reflection_link is not None target_field = ref_cls.get_field(reflection_link) target_cls = target_field.type shadow_pn = f'{refdict.attr}__internal' shadow_ref_ptr = schema_cls.maybe_get_ptr( schema, sn.UnqualName(shadow_pn)) if reflect_as_link and not shadow_ref_ptr: schema = _run_ddl( f''' ALTER TYPE {rschema_name} {{ CREATE OPTIONAL MULTI LINK {shadow_pn} EXTENDING schema::reference -> {get_schema_name_for_pycls(ref_cls)} {{ ON TARGET DELETE ALLOW; }}; }} ''', schema=schema, delta=delta, ) shadow_ref_ptr = schema_cls.getptr( schema, sn.UnqualName(shadow_pn)) else: target_cls = ref_cls if ref_ptr is None: ptr_type = get_schema_name_for_pycls(target_cls) schema = _run_ddl( f''' ALTER TYPE {rschema_name} {{ CREATE OPTIONAL MULTI LINK {refdict.attr} EXTENDING schema::reference -> {ptr_type} {{ ON TARGET DELETE ALLOW; }}; }} ''', schema=schema, delta=delta, ) ref_ptr = schema_cls.getptr( schema, sn.UnqualName(refdict.attr)) assert isinstance(ref_ptr, s_links.Link) if py_cls not in classlayout: classlayout[py_cls] = {} # First, fields declared to be reflected as link properties. props = _get_reflected_link_props( ref_ptr=ref_ptr, target_cls=ref_cls, schema=schema, ) if reflect_as_link: # Then, because it's a passthrough reflection, all scalar # fields of the proxy object. fields_as_props = [ f for f in ref_cls.get_ownfields().values() if ( not f.ephemeral and ( f.reflection_method is not s_obj.ReflectionMethod.AS_LINK ) and f.name != refdict.backref_attr and f.name != ref_cls.get_reflection_link() ) ] extra_props = _classify_scalar_object_fields(fields_as_props) for field, storage in {**props, **extra_props}.items(): sfn = field.sname prop_ptr = ref_ptr.maybe_get_ptr(schema, sn.UnqualName(sfn)) if prop_ptr is None: pty = storage.ptrtype schema = _run_ddl( f''' ALTER TYPE {rschema_name} {{ ALTER LINK {refdict.attr} {{ CREATE OPTIONAL PROPERTY {sfn} -> {pty}; }} }} ''', schema=schema, delta=delta, ) if shadow_ref_ptr is not None: assert isinstance(shadow_ref_ptr, s_links.Link) shadow_pn = shadow_ref_ptr.get_shortname(schema).name for field, storage in props.items(): sfn = field.sname prop_ptr = shadow_ref_ptr.maybe_get_ptr( schema, sn.UnqualName(sfn)) if prop_ptr is None: pty = storage.ptrtype schema = _run_ddl( f''' ALTER TYPE {rschema_name} {{ ALTER LINK {shadow_pn} {{ CREATE OPTIONAL PROPERTY {sfn} -> {pty}; }} }} ''', schema=schema, delta=delta, ) for py_cls in py_classes: rschema_name = get_schema_name_for_pycls(py_cls) schema_cls = schema.get(rschema_name, type=s_objtypes.ObjectType) is_concrete = not schema_cls.get_abstract(schema) read_shape = read_sets[py_cls] for refdict in py_cls.get_refdicts(): if py_cls not in classlayout: classlayout[py_cls] = {} ref_ptr = schema_cls.getptr( schema, sn.UnqualName(refdict.attr), type=s_links.Link) tgt = ref_ptr.get_target(schema) assert tgt is not None cardinality = ref_ptr.get_cardinality(schema) assert cardinality is not None classlayout[py_cls][refdict.attr] = SchemaFieldDesc( fieldname=refdict.attr, schema_fieldname=refdict.attr, type=tgt, cardinality=cardinality, properties={}, is_ordered=ref_ptr.issubclass(schema, ordered_link), reflection_proxy=None, is_refdict=True, ) target_cls = refdict.ref_cls props = _get_reflected_link_props( ref_ptr=ref_ptr, target_cls=target_cls, schema=schema, ) reflect_as_link = ( target_cls.get_reflection_method() is s_obj.ReflectionMethod.AS_LINK ) prop_layout = {} extra_prop_layout = {} for field, storage in props.items(): prop_ptr = ref_ptr.getptr(schema, sn.UnqualName(field.sname)) prop_tgt = prop_ptr.get_target(schema) assert prop_tgt is not None prop_layout[field.name] = (prop_tgt, storage.fieldtype) if reflect_as_link: # Then, because it's a passthrough reflection, all scalar # fields of the proxy object. fields_as_props = [ f for f in target_cls.get_ownfields().values() if ( not f.ephemeral and ( f.reflection_method is not s_obj.ReflectionMethod.AS_LINK ) and f.name != refdict.backref_attr and f.name != target_cls.get_reflection_link() ) ] extra_props = _classify_scalar_object_fields(fields_as_props) for field, storage in extra_props.items(): prop_ptr = ref_ptr.getptr( schema, sn.UnqualName(field.sname)) prop_tgt = prop_ptr.get_target(schema) assert prop_tgt is not None extra_prop_layout[field.name] = ( prop_tgt, storage.fieldtype) else: extra_prop_layout = {} classlayout[py_cls][refdict.attr].properties.update({ **prop_layout, **extra_prop_layout, }) if reflect_as_link: shadow_tgt = schema.get( get_schema_name_for_pycls(ref_cls), type=s_objtypes.ObjectType, ) iname = f'{refdict.attr}__internal' classlayout[py_cls][iname] = ( SchemaFieldDesc( fieldname=refdict.attr, schema_fieldname=iname, type=shadow_tgt, cardinality=qltypes.SchemaCardinality.Many, properties=prop_layout, is_refdict=True, ) ) if is_concrete: read_ptr = refdict.attr prop_shape_els = [] if reflect_as_link: read_ptr = f'{read_ptr}__internal' ref_ptr = schema_cls.getptr( schema, sn.UnqualName(f'{refdict.attr}__internal'), ) for field in props: sfn = field.sname prop_shape_els.append(f'@{sfn}') if prop_shape_els: prop_shape = ',\n'.join(prop_shape_els) read_ptr = f'{read_ptr}: {{id, {prop_shape}}}' if ref_ptr.issubclass(schema, ordered_link): read_ptr = f'{read_ptr} ORDER BY @index' read_shape.append(read_ptr) local_parts = [] global_parts = [] for py_cls, shape_els in read_sets.items(): if ( not shape_els # The CollectionExprAlias family needs to be excluded # because TupleExprAlias and ArrayExprAlias inherit from # concrete classes and so are picked up from those. or issubclass(py_cls, s_types.CollectionExprAlias) ): continue rschema_name = get_schema_name_for_pycls(py_cls) shape = ',\n'.join(shape_els) qry = f''' SELECT {rschema_name} {{ {shape} }} ''' if not issubclass(py_cls, (s_types.Collection, s_obj.GlobalObject)): qry += ' FILTER NOT .builtin' if issubclass(py_cls, s_obj.GlobalObject): global_parts.append(qry) else: local_parts.append(qry) delta.canonical = True return SchemaReflectionParts( intro_schema_delta=delta, class_layout=classlayout, local_intro_parts=local_parts, global_intro_parts=global_parts, )
Generate schema reflection structure from Python schema classes. Returns: A quadruple (as a SchemaReflectionParts instance) containing: - Delta, which, when applied to stdlib, yields an enhanced version of the `schema` module that contains all types and properties, not just those that are publicly exposed for introspection. - A mapping, containing type layout description for all schema classes. - A sequence of EdgeQL queries necessary to introspect a database schema. - A sequence of EdgeQL queries necessary to introspect global objects, such as roles and databases.
generate_structure
python
geldata/gel
edb/schema/reflection/structure.py
https://github.com/geldata/gel/blob/master/edb/schema/reflection/structure.py
Apache-2.0
def _hoist_if_unused_deletes( cmd: sd.Command, target: Optional[sd.DeleteObject[so.Object]] = None, ) -> None: """Hoist up if_unused deletes higher in the tree. if_unused deletes for things like union and collection types need to be done *after* the referring object that triggered the deletion is deleted. There is special handling in the write_meta case for DeleteObject to support this, but we need to also handle the case where the delete of the union/collection happens down in a nested delete of a child. Work around this by hoisting up if_unused to the outermost enclosing delete. (We can't just hoist to the actual toplevel, because that might move the command after something that needs to go *after*, like a delete of one of the union components.) Don't hoist the if_unused all the way *outside* an extension. We want the effects of deleting an extension to be contained in the DeleteExtension command. If there are union/collection types used outside this extension, they won't be deleted. If the union/collection types are used only by this extension, there is a chance that they also rely on the types *from* the extension. This means that it will be impossible to delete the base types if we defer deleting the union/collection types until all extension content is removed. FIXME: Could we instead *generate* the deletions at the outermost point? """ new_target = target if ( not new_target and isinstance(cmd, sd.DeleteObject) and not isinstance(cmd, s_ext.DeleteExtension) ): new_target = cmd for sub in cmd.get_subcommands(): if ( isinstance(sub, sd.DeleteObject) and target and sub.if_unused ): cmd.discard(sub) target.add_caused(sub) else: _hoist_if_unused_deletes(sub, new_target)
Hoist up if_unused deletes higher in the tree. if_unused deletes for things like union and collection types need to be done *after* the referring object that triggered the deletion is deleted. There is special handling in the write_meta case for DeleteObject to support this, but we need to also handle the case where the delete of the union/collection happens down in a nested delete of a child. Work around this by hoisting up if_unused to the outermost enclosing delete. (We can't just hoist to the actual toplevel, because that might move the command after something that needs to go *after*, like a delete of one of the union components.) Don't hoist the if_unused all the way *outside* an extension. We want the effects of deleting an extension to be contained in the DeleteExtension command. If there are union/collection types used outside this extension, they won't be deleted. If the union/collection types are used only by this extension, there is a chance that they also rely on the types *from* the extension. This means that it will be impossible to delete the base types if we defer deleting the union/collection types until all extension content is removed. FIXME: Could we instead *generate* the deletions at the outermost point?
_hoist_if_unused_deletes
python
geldata/gel
edb/schema/reflection/writer.py
https://github.com/geldata/gel/blob/master/edb/schema/reflection/writer.py
Apache-2.0
def write_meta( cmd: sd.Command, *, classlayout: Dict[Type[so.Object], sr_struct.SchemaTypeLayout], schema: s_schema.Schema, context: sd.CommandContext, blocks: List[Tuple[str, Dict[str, Any]]], internal_schema_mode: bool, stdmode: bool, ) -> None: """Generate EdgeQL statements populating schema metadata. Args: cmd: Delta command tree for which EdgeQL DML must be generated. classlayout: Schema class layout as returned from :func:`schema.reflection.structure.generate_structure`. schema: A schema instance. context: Delta context corresponding to *cmd*. blocks: A list where a sequence of (edgeql, args) tuples will be appended to. internal_schema_mode: If True, *cmd* represents internal `schema` modifications. stdmode: If True, *cmd* represents a standard library bootstrap DDL. """ raise NotImplementedError(f"cannot handle {cmd!r}")
Generate EdgeQL statements populating schema metadata. Args: cmd: Delta command tree for which EdgeQL DML must be generated. classlayout: Schema class layout as returned from :func:`schema.reflection.structure.generate_structure`. schema: A schema instance. context: Delta context corresponding to *cmd*. blocks: A list where a sequence of (edgeql, args) tuples will be appended to. internal_schema_mode: If True, *cmd* represents internal `schema` modifications. stdmode: If True, *cmd* represents a standard library bootstrap DDL.
write_meta
python
geldata/gel
edb/schema/reflection/writer.py
https://github.com/geldata/gel/blob/master/edb/schema/reflection/writer.py
Apache-2.0
def _build_object_mutation_shape( cmd: sd.ObjectCommand[so.Object], *, classlayout: Dict[Type[so.Object], sr_struct.SchemaTypeLayout], lprop_fields: Optional[ Dict[str, Tuple[s_types.Type, sr_struct.FieldType]] ] = None, lprops_only: bool = False, internal_schema_mode: bool, stdmode: bool, var_prefix: str = '', schema: s_schema.Schema, context: sd.CommandContext, ) -> Tuple[str, Dict[str, Any]]: props = cmd.get_resolved_attributes(schema, context) mcls = cmd.get_schema_metaclass() layout = classlayout[mcls] if lprop_fields is None: lprop_fields = {} # XXX: This is a hack around the fact that _update_lprops works by # removing all the links and recreating them. Since that will lose # data in situations where not every lprop attribute is specified, # merge AlterOwned props up into the enclosing command. (This avoids # trouble with annotations, which is the main place where we have # multiple interesting lprops at once.) if isinstance(cmd, s_ref.AlterOwned): return '', {} for sub in cmd.get_subcommands(type=s_ref.AlterOwned): props.update(sub.get_resolved_attributes(schema, context)) assignments = [] variables: Dict[str, str] = {} if isinstance(cmd, sd.CreateObject): empties = { v.fieldname: None for f, v in layout.items() if ( f != 'backend_id' and v.storage is not None and v.storage.ptrkind != 'link' and v.storage.ptrkind != 'multi link' ) } all_props = {**empties, **props} else: all_props = props for n, v in sorted(all_props.items(), key=lambda i: i[0]): ns = mcls.get_field(n).sname lprop_target = lprop_fields.get(n) if lprop_target is not None: target, ftype = lprop_target cardinality = qltypes.SchemaCardinality.One is_ordered = False reflection_proxy = None elif lprops_only: continue else: layout_entry = layout.get(ns) if layout_entry is None: # The field is ephemeral, skip it. continue else: target = layout_entry.type cardinality = layout_entry.cardinality is_ordered = layout_entry.is_ordered reflection_proxy = layout_entry.reflection_proxy assert layout_entry.storage is not None ftype = layout_entry.storage.fieldtype target_value: Any var_n = f'__{var_prefix}{n}' if ( issubclass(mcls, s_constr.Constraint) and n == 'params' and isinstance(cmd, s_ref.ReferencedObjectCommand) and cmd.get_referrer_context(context) is not None ): # Constraint args are represented as a `@value` link property # on the `params` link. # TODO: replace this hack by a generic implementation of # an ObjectKeyDict collection that allow associating objects # with arbitrary values (a transposed ObjectDict). target_expr = f"""assert_distinct(( FOR v IN {{ enumerate(json_array_unpack(<json>${var_n})) }} UNION ( SELECT {target.get_name(schema)} {{ @index := v.0, @value := <str>v.1[1], }} FILTER .id = <uuid>v.1[0] ) ))""" args = props.get('args', []) target_value = [] if v is not None: for i, param in enumerate(v.objects(schema)): if i == 0: # skip the implicit __subject__ parameter arg_expr = '' else: try: arg = args[i - 1] except IndexError: arg_expr = '' else: pkind = param.get_kind(schema) if pkind is qltypes.ParameterKind.VariadicParam: rest = [arg.text for arg in args[i - 1:]] arg_expr = f'[{",".join(rest)}]' else: arg_expr = arg.text target_value.append((str(param.id), arg_expr)) elif n == 'name': target_expr = f'<str>${var_n}' assignments.append(f'{ns}__internal := <str>${var_n}__internal') if v is not None: target_value = mcls.get_displayname_static(v) variables[f'{var_n}__internal'] = json.dumps(str(v)) else: target_value = None variables[f'{var_n}__internal'] = json.dumps(None) elif isinstance(target, s_objtypes.ObjectType): if cardinality is qltypes.SchemaCardinality.Many: if ftype is sr_struct.FieldType.OBJ_DICT: target_expr, target_value = _reflect_object_dict_value( schema=schema, value=v, is_ordered=is_ordered, value_var_name=var_n, target=target, reflection_proxy=reflection_proxy, ) elif is_ordered: target_expr = f'''( FOR v IN {{ enumerate(assert_distinct( <uuid>json_array_unpack(<json>${var_n}) )) }} UNION ( SELECT (DETACHED {target.get_name(schema)}) {{ @index := v.0, }} FILTER .id = v.1 ) )''' if v is not None: target_value = [str(i) for i in v.ids(schema)] else: target_value = [] else: target_expr = f'''( SELECT (DETACHED {target.get_name(schema)}) FILTER .id IN <uuid>json_array_unpack(<json>${var_n}) )''' if v is not None: target_value = [str(i) for i in v.ids(schema)] else: target_value = [] else: target_expr = f'''( SELECT (DETACHED {target.get_name(schema)}) FILTER .id = <uuid>${var_n} )''' if v is not None: target_value = str(v.id) else: target_value = None elif ftype is sr_struct.FieldType.EXPR: target_expr = f'<str>${var_n}' if v is not None: target_value = v.text else: target_value = None shadow_target_expr = ( f'sys::_expr_from_json(<json>${var_n}_expr)' ) assignments.append(f'{ns}__internal := {shadow_target_expr}') if v is not None: ids = [str(i) for i in v.refs.ids(schema)] variables[f'{var_n}_expr'] = json.dumps( {'text': v.text, 'refs': ids} ) else: variables[f'{var_n}_expr'] = json.dumps(None) elif ftype is sr_struct.FieldType.EXPR_LIST: target_expr = f''' array_agg(<str>json_array_unpack(<json>${var_n})["text"]) ''' if v is not None: target_value = [ { 'text': ex.text, 'refs': ( [str(i) for i in ex.refs.ids(schema)] if ex.refs else [] ) } for ex in v ] else: target_value = [] shadow_target_expr = f''' (SELECT array_agg( sys::_expr_from_json( json_array_unpack(<json>${var_n}) ) ) ) ''' assignments.append(f'{ns}__internal := {shadow_target_expr}') elif ftype is sr_struct.FieldType.EXPR_DICT: target_expr = f''' ( WITH orig_json := json_array_unpack(<json>${var_n}) SELECT array_agg(( for orig_json in orig_json union ( name := <str>orig_json['name'], expr := <str>orig_json['expr']['text'], ) )) ) ''' if v is not None: target_value = [ { 'name': key, 'expr': { 'text': ex.text, 'refs': ( [str(i) for i in ex.refs.ids(schema)] if ex.refs else [] ) } } for key, ex in v.items() ] else: target_value = [] shadow_target_expr = f''' ( WITH orig_json := json_array_unpack(<json>${var_n}) SELECT array_agg(( for orig_json in orig_json union ( name := <str>orig_json['name'], expr := sys::_expr_from_json( orig_json['expr'] ) ) )) ) ''' assignments.append(f'{ns}__internal := {shadow_target_expr}') elif isinstance(target, s_types.Array): eltype = target.get_element_type(schema) target_expr = f''' array_agg(<{eltype.get_name(schema)}> json_array_unpack(<json>${var_n})) IF json_typeof(<json>${var_n}) != 'null' ELSE <array<{eltype.get_name(schema)}>>{{}} ''' if v is not None: target_value = list(v) else: target_value = None else: target_expr = f'${var_n}' if cardinality and cardinality.is_multi(): target_expr = f'json_array_unpack(<json>{target_expr})' if target.is_enum(schema): target_expr = f'<str>{target_expr}' target_expr = f'<{target.get_name(schema)}>{target_expr}' if v is not None and cardinality.is_multi(): target_value = list(v) elif v is None or isinstance(v, numbers.Number): target_value = v else: target_value = str(v) if lprop_target is not None: assignments.append(f'@{ns} := {target_expr}') else: assignments.append(f'{ns} := {target_expr}') variables[var_n] = json.dumps(target_value) object_actually_exists = schema.has_object(cmd.scls.id) if ( isinstance(cmd, sd.CreateObject) and object_actually_exists and issubclass(mcls, (s_scalars.ScalarType, s_types.Collection)) and not issubclass(mcls, s_types.CollectionExprAlias) and not cmd.get_attribute_value('abstract') and not cmd.get_attribute_value('transient') ): kind = f'"schema::{mcls.__name__}"' if issubclass(mcls, (s_types.Array, s_types.Range, s_types.MultiRange)): assignments.append( f'backend_id := sys::_get_pg_type_for_edgedb_type(' f'<uuid>$__{var_prefix}id, ' f'{kind}, ' f'<uuid>$__{var_prefix}element_type, ' f'<str>$__{var_prefix}sql_type2), ' ) else: assignments.append( f'backend_id := sys::_get_pg_type_for_edgedb_type(' f'<uuid>$__{var_prefix}id, {kind}, <uuid>{{}}, ' f'<str>$__{var_prefix}sql_type2), ' ) sql_type = None if isinstance(cmd.scls, s_scalars.ScalarType): sql_type, _ = cmd.scls.resolve_sql_type_scheme(schema) variables[f'__{var_prefix}id'] = json.dumps( str(cmd.get_attribute_value('id')) ) variables[f'__{var_prefix}sql_type2'] = json.dumps(sql_type) shape = ',\n'.join(assignments) return shape, variables
if v is not None: target_value = [ { 'text': ex.text, 'refs': ( [str(i) for i in ex.refs.ids(schema)] if ex.refs else [] ) } for ex in v ] else: target_value = [] shadow_target_expr = f
_build_object_mutation_shape
python
geldata/gel
edb/schema/reflection/writer.py
https://github.com/geldata/gel/blob/master/edb/schema/reflection/writer.py
Apache-2.0
def write_meta_create_object( cmd: sd.CreateObject, # type: ignore *, classlayout: Dict[Type[so.Object], sr_struct.SchemaTypeLayout], schema: s_schema.Schema, context: sd.CommandContext, blocks: List[Tuple[str, Dict[str, Any]]], internal_schema_mode: bool, stdmode: bool, ) -> None: _descend( cmd, classlayout=classlayout, schema=schema, context=context, blocks=blocks, prerequisites=True, internal_schema_mode=internal_schema_mode, stdmode=stdmode, ) mcls = cmd.maybe_get_schema_metaclass() if mcls is not None and not issubclass(mcls, so.GlobalObject): if isinstance(cmd, s_ref.ReferencedObjectCommand): refctx = cmd.get_referrer_context(context) else: refctx = None if refctx is None: shape, variables = _build_object_mutation_shape( cmd, classlayout=classlayout, internal_schema_mode=internal_schema_mode, stdmode=stdmode, schema=schema, context=context, ) insert_query = f''' INSERT schema::{mcls.__name__} {{ {shape} }} ''' blocks.append((insert_query, variables)) else: refop = refctx.op refcls = refop.get_schema_metaclass() refdict = refcls.get_refdict_for_class(mcls) layout = classlayout[refcls][refdict.attr] lprops = layout.properties reflect_as_link = ( mcls.get_reflection_method() is so.ReflectionMethod.AS_LINK ) shape, variables = _build_object_mutation_shape( cmd, classlayout=classlayout, lprop_fields=lprops, lprops_only=reflect_as_link, internal_schema_mode=internal_schema_mode, stdmode=stdmode, schema=schema, context=context, ) assignments = [] if reflect_as_link: target_link = mcls.get_reflection_link() assert target_link is not None target_field = mcls.get_field(target_link) target = cmd.get_attribute_value(target_link) append_query = f''' SELECT DETACHED schema::{target_field.type.__name__} {{ {shape} }} FILTER .name__internal = <str>$__{target_link} ''' variables[f'__{target_link}'] = ( json.dumps(str(target.get_name(schema))) ) shadow_clslayout = classlayout[refcls] shadow_link_layout = ( shadow_clslayout[f'{refdict.attr}__internal']) shadow_shape, shadow_variables = _build_object_mutation_shape( cmd, classlayout=classlayout, internal_schema_mode=internal_schema_mode, lprop_fields=shadow_link_layout.properties, stdmode=stdmode, var_prefix='shadow_', schema=schema, context=context, ) variables.update(shadow_variables) shadow_append_query = f''' INSERT schema::{mcls.__name__} {{ {shadow_shape} }} ''' assignments.append(f''' {refdict.attr}__internal += ( {shadow_append_query} ) ''') else: append_query = f''' INSERT schema::{mcls.__name__} {{ {shape} }} ''' assignments.append(f''' {refdict.attr} += ( {append_query} ) ''') update_shape = ',\n'.join(assignments) parent_update_query = f''' UPDATE schema::{refcls.__name__} FILTER .name__internal = <str>$__parent_classname SET {{ {update_shape} }} ''' ref_name = context.get_referrer_name(refctx) variables['__parent_classname'] = json.dumps(str(ref_name)) blocks.append((parent_update_query, variables)) _descend( cmd, classlayout=classlayout, schema=schema, context=context, blocks=blocks, internal_schema_mode=internal_schema_mode, stdmode=stdmode, )
blocks.append((insert_query, variables)) else: refop = refctx.op refcls = refop.get_schema_metaclass() refdict = refcls.get_refdict_for_class(mcls) layout = classlayout[refcls][refdict.attr] lprops = layout.properties reflect_as_link = ( mcls.get_reflection_method() is so.ReflectionMethod.AS_LINK ) shape, variables = _build_object_mutation_shape( cmd, classlayout=classlayout, lprop_fields=lprops, lprops_only=reflect_as_link, internal_schema_mode=internal_schema_mode, stdmode=stdmode, schema=schema, context=context, ) assignments = [] if reflect_as_link: target_link = mcls.get_reflection_link() assert target_link is not None target_field = mcls.get_field(target_link) target = cmd.get_attribute_value(target_link) append_query = f
write_meta_create_object
python
geldata/gel
edb/schema/reflection/writer.py
https://github.com/geldata/gel/blob/master/edb/schema/reflection/writer.py
Apache-2.0
def _update_lprops( cmd: s_ref.ReferencedObjectCommand, # type: ignore *, classlayout: Dict[Type[so.Object], sr_struct.SchemaTypeLayout], schema: s_schema.Schema, blocks: List[Tuple[str, Dict[str, Any]]], context: sd.CommandContext, internal_schema_mode: bool, stdmode: bool, ) -> None: mcls = cmd.get_schema_metaclass() refctx = cmd.get_referrer_context_or_die(context) refop = refctx.op refcls = refop.get_schema_metaclass() refdict = refcls.get_refdict_for_class(mcls) layout = classlayout[refcls][refdict.attr] lprops = layout.properties if not lprops: return reflect_as_link = ( mcls.get_reflection_method() is so.ReflectionMethod.AS_LINK ) if reflect_as_link: target_link = mcls.get_reflection_link() assert target_link is not None target_field = mcls.get_field(target_link) target_obj = cmd.get_ddl_identity(target_link) if target_obj is None: raise AssertionError( f'cannot find link target in ddl_identity of a command for ' f'schema class reflected as link: {cmd!r}' ) target_clsname = target_field.type.__name__ else: referrer_cls = refop.get_schema_metaclass() target_field = referrer_cls.get_field(refdict.attr) if issubclass(target_field.type, so.ObjectCollection): target_type = target_field.type.type else: target_type = target_field.type target_clsname = target_type.__name__ target_link = refdict.attr target_obj = cmd.scls shape, append_variables = _build_object_mutation_shape( cmd, classlayout=classlayout, lprop_fields=lprops, lprops_only=True, internal_schema_mode=internal_schema_mode, stdmode=stdmode, schema=schema, context=context, ) if shape: parent_variables = {} parent_variables[f'__{target_link}'] = json.dumps(str(target_obj.id)) ref_name = context.get_referrer_name(refctx) parent_variables['__parent_classname'] = json.dumps(str(ref_name)) # XXX: we have to do a -= followed by a += because # support for filtered nested link property updates # is currently broken. # This is fragile! If not all of the lprops are specified, # we will drop them. assignments = [] assignments.append(textwrap.dedent( f'''\ {refdict.attr} -= ( SELECT DETACHED (schema::{target_clsname}) FILTER .id = <uuid>$__{target_link} )''' )) if reflect_as_link: parent_variables[f'__{target_link}_shadow'] = ( json.dumps(str(cmd.classname))) assignments.append(textwrap.dedent( f'''\ {refdict.attr}__internal -= ( SELECT DETACHED (schema::{mcls.__name__}) FILTER .name__internal = <str>$__{target_link}_shadow )''' )) update_shape = textwrap.indent( '\n' + ',\n'.join(assignments), ' ' * 4) parent_update_query = textwrap.dedent(f'''\ UPDATE schema::{refcls.__name__} FILTER .name__internal = <str>$__parent_classname SET {{{update_shape} }} ''') blocks.append((parent_update_query, parent_variables)) assignments = [] shape = textwrap.indent(f'\n{shape}', ' ' * 5) assignments.append(textwrap.dedent( f'''\ {refdict.attr} += ( SELECT DETACHED schema::{target_clsname} {{{shape} }} FILTER .id = <uuid>$__{target_link} )''' )) if reflect_as_link: shadow_clslayout = classlayout[refcls] shadow_link_layout = shadow_clslayout[f'{refdict.attr}__internal'] shadow_shape, shadow_variables = _build_object_mutation_shape( cmd, classlayout=classlayout, internal_schema_mode=internal_schema_mode, lprop_fields=shadow_link_layout.properties, lprops_only=True, stdmode=stdmode, var_prefix='shadow_', schema=schema, context=context, ) shadow_shape = textwrap.indent(f'\n{shadow_shape}', ' ' * 6) assignments.append(textwrap.dedent( f'''\ {refdict.attr}__internal += ( SELECT DETACHED schema::{mcls.__name__} {{{shadow_shape} }} FILTER .name__internal = <str>$__{target_link}_shadow )''' )) parent_variables.update(shadow_variables) update_shape = textwrap.indent( '\n' + ',\n'.join(assignments), ' ' * 4) parent_update_query = textwrap.dedent(f''' UPDATE schema::{refcls.__name__} FILTER .name__internal = <str>$__parent_classname SET {{{update_shape} }} ''') parent_variables.update(append_variables) blocks.append((parent_update_query, parent_variables))
) blocks.append((parent_update_query, parent_variables)) assignments = [] shape = textwrap.indent(f'\n{shape}', ' ' * 5) assignments.append(textwrap.dedent( f'''\ {refdict.attr} += ( SELECT DETACHED schema::{target_clsname} {{{shape} }} FILTER .id = <uuid>$__{target_link} )
_update_lprops
python
geldata/gel
edb/schema/reflection/writer.py
https://github.com/geldata/gel/blob/master/edb/schema/reflection/writer.py
Apache-2.0
def write_meta_delete_object( cmd: sd.DeleteObject, # type: ignore *, classlayout: Dict[Type[so.Object], sr_struct.SchemaTypeLayout], schema: s_schema.Schema, context: sd.CommandContext, blocks: List[Tuple[str, Dict[str, Any]]], internal_schema_mode: bool, stdmode: bool, ) -> None: _descend( cmd, classlayout=classlayout, schema=schema, context=context, blocks=blocks, prerequisites=True, internal_schema_mode=internal_schema_mode, stdmode=stdmode, ) defer_filter = ( lambda cmd: isinstance(cmd, sd.DeleteObject) and cmd.if_unused ) _descend( cmd, classlayout=classlayout, schema=schema, context=context, blocks=blocks, internal_schema_mode=internal_schema_mode, stdmode=stdmode, cmd_filter=lambda cmd: not defer_filter(cmd), ) mcls = cmd.maybe_get_schema_metaclass() if mcls is not None and not issubclass(mcls, so.GlobalObject): if isinstance(cmd, s_ref.ReferencedObjectCommand): refctx = cmd.get_referrer_context(context) else: refctx = None if ( refctx is not None and mcls.get_reflection_method() is so.ReflectionMethod.AS_LINK ): refop = refctx.op refcls = refop.get_schema_metaclass() refdict = refcls.get_refdict_for_class(mcls) target_link = mcls.get_reflection_link() assert target_link is not None target_field = mcls.get_field(target_link) target = cmd.get_orig_attribute_value(target_link) parent_variables = {} parent_variables[f'__{target_link}'] = ( json.dumps(str(target.id)) ) parent_update_query = f''' UPDATE schema::{refcls.__name__} FILTER .name__internal = <str>$__parent_classname SET {{ {refdict.attr} -= ( SELECT DETACHED (schema::{target_field.type.__name__}) FILTER .id = <uuid>$__{target_link} ) }} ''' ref_name = context.get_referrer_name(refctx) parent_variables['__parent_classname'] = ( json.dumps(str(ref_name)) ) blocks.append((parent_update_query, parent_variables)) # We need to delete any links created via reflection_proxy layout = classlayout[mcls] proxy_links = [ link for link, layout_entry in layout.items() if layout_entry.reflection_proxy ] to_delete = ['D'] + [f'D.{link}' for link in proxy_links] operations = [f'(DELETE {x})' for x in to_delete] query = f''' WITH D := (SELECT schema::{mcls.__name__} FILTER .name__internal = <str>$__classname), SELECT {{{", ".join(operations)}}}; ''' variables = {'__classname': json.dumps(str(cmd.classname))} blocks.append((query, variables)) _descend( cmd, classlayout=classlayout, schema=schema, context=context, blocks=blocks, internal_schema_mode=internal_schema_mode, stdmode=stdmode, cmd_filter=defer_filter, )
ref_name = context.get_referrer_name(refctx) parent_variables['__parent_classname'] = ( json.dumps(str(ref_name)) ) blocks.append((parent_update_query, parent_variables)) # We need to delete any links created via reflection_proxy layout = classlayout[mcls] proxy_links = [ link for link, layout_entry in layout.items() if layout_entry.reflection_proxy ] to_delete = ['D'] + [f'D.{link}' for link in proxy_links] operations = [f'(DELETE {x})' for x in to_delete] query = f
write_meta_delete_object
python
geldata/gel
edb/schema/reflection/writer.py
https://github.com/geldata/gel/blob/master/edb/schema/reflection/writer.py
Apache-2.0
def parse_into( base_schema: s_schema.Schema, schema: s_schema.FlatSchema, data: Union[str, bytes], schema_class_layout: SchemaClassLayout, ) -> s_schema.FlatSchema: """Parse JSON-encoded schema objects and populate the schema with them. Args: schema: A schema instance to use as a starting point. data: A JSON-encoded schema object data as returned by an introspection query. schema_class_layout: A mapping describing schema class layout in the reflection, as returned from :func:`schema.reflection.structure.generate_structure`. Returns: A schema instance including objects encoded in the provided JSON sequence. """ id_to_type = {} id_to_data = {} name_to_id = {} shortname_to_id = collections.defaultdict(set) globalname_to_id = {} dict_of_dicts: Callable[ [], Dict[Tuple[Type[s_obj.Object], str], Dict[uuid.UUID, None]], ] = functools.partial(collections.defaultdict, dict) refs_to: Dict[ uuid.UUID, Dict[Tuple[Type[s_obj.Object], str], Dict[uuid.UUID, None]] ] = collections.defaultdict(dict_of_dicts) objects: Dict[uuid.UUID, Tuple[s_obj.Object, Dict[str, Any]]] = {} objid: uuid.UUID for entry in json.loads(data): _, _, clsname = entry['_tname'].rpartition('::') mcls = s_obj.ObjectMeta.maybe_get_schema_class(clsname) if mcls is None: raise ValueError( f'unexpected type in schema reflection: {clsname}') objid = uuidgen.UUID(entry['id']) objects[objid] = (mcls._create_from_id(objid), entry) refdict_updates = {} for objid, (obj, entry) in objects.items(): mcls = type(obj) name = s_name.name_from_string(entry['name__internal']) layout = schema_class_layout[mcls] if ( base_schema.has_object(objid) and not isinstance(obj, s_ver.BaseSchemaVersion) ): continue if isinstance(obj, s_obj.QualifiedObject): name_to_id[name] = objid else: name = s_name.UnqualName(str(name)) globalname_to_id[mcls, name] = objid if isinstance(obj, (s_func.Function, s_oper.Operator)): shortname = mcls.get_shortname_static(name) shortname_to_id[mcls, shortname].add(objid) id_to_type[objid] = type(obj).__name__ all_fields = mcls.get_schema_fields() objdata: List[Any] = [None] * len(all_fields) val: Any refid: uuid.UUID for k, v in entry.items(): desc = layout.get(k) if desc is None: continue fn = desc.fieldname field = all_fields.get(fn) if field is None: continue findex = field.index if desc.storage is not None: if v is None: pass elif desc.storage.ptrkind == 'link': refid = uuidgen.UUID(v['id']) newobj = objects.get(refid) if newobj is not None: val = newobj[0] else: val = base_schema.get_by_id(refid) objdata[findex] = val.schema_reduce() refs_to[val.id][mcls, fn][objid] = None elif desc.storage.ptrkind == 'multi link': ftype = mcls.get_field(fn).type if issubclass(ftype, s_obj.ObjectDict): refids = ftype._container( uuidgen.UUID(e['value']) for e in v) refkeys = tuple(e['name'] for e in v) val = ftype(refids, refkeys, _private_init=True) else: refids = ftype._container( uuidgen.UUID(e['id']) for e in v) val = ftype(refids, _private_init=True) objdata[findex] = val.schema_reduce() for refid in refids: refs_to[refid][mcls, fn][objid] = None elif desc.storage.shadow_ptrkind: val = entry[f'{k}__internal'] ftype = mcls.get_field(fn).type if val is not None and type(val) is not ftype: if issubclass(ftype, s_expr.Expression): val = _parse_expression(val, objid, k) for refid in val.refs.ids(schema): refs_to[refid][mcls, fn][objid] = None elif issubclass(ftype, s_expr.ExpressionList): exprs = [] for e_dict in val: e = _parse_expression(e_dict, objid, k) assert e.refs is not None for refid in e.refs.ids(schema): refs_to[refid][mcls, fn][objid] = None exprs.append(e) val = ftype(exprs) elif issubclass(ftype, s_expr.ExpressionDict): expr_dict = dict() for e_dict in val: e = _parse_expression( e_dict['expr'], objid, k) assert e.refs is not None for refid in e.refs.ids(schema): refs_to[refid][mcls, fn][objid] = None expr_dict[e_dict['name']] = e val = ftype(expr_dict) elif issubclass(ftype, s_obj.Object): val = val.id elif issubclass(ftype, s_name.Name): if isinstance(obj, s_obj.QualifiedObject): val = s_name.name_from_string(val) else: val = s_name.UnqualName(val) else: val = ftype(val) if issubclass(ftype, s_abc.Reducible): val = val.schema_reduce() objdata[findex] = val else: ftype = mcls.get_field(fn).type if type(v) is not ftype: if issubclass(ftype, verutils.Version): objdata[findex] = _parse_version(v) elif issubclass(ftype, s_name.Name): objdata[findex] = s_name.name_from_string(v) elif ( issubclass(ftype, checked.ParametricContainer) and ftype.types and len(ftype.types) == 1 ): # Coerce the elements in a parametric container # type. # XXX: Or should we do it in the container? subtyp = ftype.types[0] objdata[findex] = ftype( subtyp(x) for x in v) # type: ignore else: objdata[findex] = ftype(v) else: objdata[findex] = v elif desc.is_refdict: ftype = mcls.get_field(fn).type refids = ftype._container(uuidgen.UUID(e['id']) for e in v) for refid in refids: refs_to[refid][mcls, fn][objid] = None val = ftype(refids, _private_init=True) objdata[findex] = val.schema_reduce() if desc.properties: for e_dict in v: refdict_updates[uuidgen.UUID(e_dict['id'])] = { p: pv for p in desc.properties if (pv := e_dict[f'@{p}']) is not None } id_to_data[objid] = tuple(objdata) for objid, updates in refdict_updates.items(): if updates: sclass = s_obj.ObjectMeta.get_schema_class(id_to_type[objid]) updated_data = list(id_to_data[objid]) for fn, v in updates.items(): field = sclass.get_schema_field(fn) updated_data[field.index] = v id_to_data[objid] = tuple(updated_data) with schema._refs_to.mutate() as mm: for referred_id, refdata in refs_to.items(): try: refs = mm[referred_id] except KeyError: refs = immutables.Map(( (k, immutables.Map(r)) for k, r in refdata.items() )) else: refs_update = {} for k, referrers in refdata.items(): try: rt = refs[k] except KeyError: rt = immutables.Map(referrers) else: rt = rt.update(referrers) refs_update[k] = rt refs = refs.update(refs_update) mm[referred_id] = refs schema = schema._replace( id_to_type=schema._id_to_type.update(id_to_type), id_to_data=schema._id_to_data.update(id_to_data), name_to_id=schema._name_to_id.update(name_to_id), shortname_to_id=schema._shortname_to_id.update( (k, frozenset(v)) for k, v in shortname_to_id.items() ), globalname_to_id=schema._globalname_to_id.update(globalname_to_id), refs_to=mm.finish(), ) return schema
Parse JSON-encoded schema objects and populate the schema with them. Args: schema: A schema instance to use as a starting point. data: A JSON-encoded schema object data as returned by an introspection query. schema_class_layout: A mapping describing schema class layout in the reflection, as returned from :func:`schema.reflection.structure.generate_structure`. Returns: A schema instance including objects encoded in the provided JSON sequence.
parse_into
python
geldata/gel
edb/schema/reflection/reader.py
https://github.com/geldata/gel/blob/master/edb/schema/reflection/reader.py
Apache-2.0
def __init__( self, *, nullable: typing.Optional[bool] = None, null_safe: bool = False, **kwargs, ) -> None: """Function call node. @param null_safe: Specifies whether this function is guaranteed to never return NULL on non-NULL input. """ if nullable is None and not null_safe: nullable = True super().__init__(nullable=nullable, **kwargs)
Function call node. @param null_safe: Specifies whether this function is guaranteed to never return NULL on non-NULL input.
__init__
python
geldata/gel
edb/pgsql/ast.py
https://github.com/geldata/gel/blob/master/edb/pgsql/ast.py
Apache-2.0
def apply( self, schema: s_schema.Schema, context: sd.CommandContext, ) -> s_schema.Schema: schema = super().apply(schema, context) ver_id = str(self.scls.id) ver_name = str(self.scls.get_name(schema)) ctx_backend_params = context.backend_runtime_params if ctx_backend_params is not None: backend_params = cast( params.BackendRuntimeParams, ctx_backend_params) else: backend_params = params.get_default_runtime_params() if not backend_params.has_create_database: key = f'{edbdef.EDGEDB_TEMPLATE_DB}metadata' lock = dbops.Query( trampoline.fixup_query(f''' SELECT json FROM edgedbinstdata_VER.instdata WHERE key = {ql(key)} FOR UPDATE INTO _dummy_text ''' )) elif backend_params.has_superuser_access: # Only superusers are generally allowed to make an UPDATE # lock on shared catalogs. lock = dbops.Query( f''' SELECT description FROM pg_catalog.pg_shdescription WHERE objoid = ( SELECT oid FROM pg_database WHERE datname = {V('edgedb')}.get_database_backend_name( {ql(edbdef.EDGEDB_TEMPLATE_DB)}) ) AND classoid = 'pg_database'::regclass::oid FOR UPDATE INTO _dummy_text ''' ) else: # Without superuser access we have to resort to lock polling. # This is racy, but is unfortunately the best we can do. lock = dbops.Query(f''' SELECT edgedb_VER.raise_on_not_null( ( SELECT 'locked' FROM pg_catalog.pg_locks WHERE locktype = 'object' AND classid = 'pg_database'::regclass::oid AND objid = ( SELECT oid FROM pg_database WHERE datname = {V('edgedb')}.get_database_backend_name( {ql(edbdef.EDGEDB_TEMPLATE_DB)}) ) AND mode = 'ShareUpdateExclusiveLock' AND granted AND pid != pg_backend_pid() ), 'serialization_failure', msg => ( 'Cannot serialize global DDL: ' || (SELECT version::text FROM {V('edgedb')}."_SysGlobalSchemaVersion") ) ) INTO _dummy_text ''') self.pgops.add(lock) expected_ver = self.get_orig_attribute_value('version') check = dbops.Query( f''' SELECT edgedb_VER.raise_on_not_null( (SELECT NULLIF( (SELECT version::text FROM {V('edgedb')}."_SysGlobalSchemaVersion" ), {ql(str(expected_ver))} )), 'serialization_failure', msg => ( 'Cannot serialize global DDL: ' || (SELECT version::text FROM {V('edgedb')}."_SysGlobalSchemaVersion") ) ) INTO _dummy_text ''' ) self.pgops.add(check) metadata = { ver_id: { 'id': ver_id, 'name': ver_name, 'version': str(self.scls.get_version(schema)), 'builtin': self.scls.get_builtin(schema), 'internal': self.scls.get_internal(schema), } } if backend_params.has_create_database: self.pgops.add( dbops.UpdateMetadataSection( dbops.DatabaseWithTenant(name=edbdef.EDGEDB_TEMPLATE_DB), section='GlobalSchemaVersion', metadata=metadata ) ) else: self.pgops.add( dbops.UpdateSingleDBMetadataSection( edbdef.EDGEDB_TEMPLATE_DB, section='GlobalSchemaVersion', metadata=metadata ) ) return schema
)) elif backend_params.has_superuser_access: # Only superusers are generally allowed to make an UPDATE # lock on shared catalogs. lock = dbops.Query( f
apply
python
geldata/gel
edb/pgsql/delta.py
https://github.com/geldata/gel/blob/master/edb/pgsql/delta.py
Apache-2.0
def compile_edgeql_overloaded_function_body( self, func: s_funcs.Function, overloads: List[s_funcs.Function], ov_param_idx: int, schema: s_schema.Schema, context: sd.CommandContext, ) -> str: func_return_typemod = func.get_return_typemod(schema) set_returning = func_return_typemod is ql_ft.TypeModifier.SetOfType my_params = func.get_params(schema).objects(schema) param_name = my_params[ov_param_idx].get_parameter_name(schema) type_param_name = f'__{param_name}__type' cases = {} all_overloads = list(overloads) if not isinstance(self, DeleteFunction): all_overloads.append(func) for overload in all_overloads: ov_p = tuple(overload.get_params(schema).objects(schema)) ov_p_t = ov_p[ov_param_idx].get_type(schema) ov_body = self.compile_edgeql_function_body( overload, schema, context) if set_returning: case = ( f"(SELECT * FROM ({ov_body}) AS q " f"WHERE ancestor = {ql(str(ov_p_t.id))})" ) else: case = ( f"WHEN ancestor = {ql(str(ov_p_t.id))} " f"THEN \n({ov_body})" ) cases[ov_p_t] = case impl_ids = ', '.join(f'{ql(str(t.id))}::uuid' for t in cases) branches = list(cases.values()) # N.B: edgedb_VER.raise and coalesce are used below instead of # raise_on_null, because the latter somehow results in a # significantly more complex query plan. matching_impl = f""" coalesce( ( SELECT ancestor FROM (SELECT {qi(type_param_name)} AS ancestor, -1 AS index UNION ALL SELECT target AS ancestor, index FROM edgedb._object_ancestors WHERE source = {qi(type_param_name)} ) a WHERE ancestor IN ({impl_ids}) ORDER BY index LIMIT 1 ), edgedb.raise( NULL::uuid, 'assert_failure', msg => format( 'unhandled object type %s in overloaded function', {qi(type_param_name)} ) ) ) AS impl(ancestor) """ if set_returning: arms = "\nUNION ALL\n".join(branches) return f""" SELECT q.* FROM {matching_impl}, LATERAL ( {arms} ) AS q """ else: arms = "\n".join(branches) return f""" SELECT (CASE {arms} END) FROM {matching_impl} """
if set_returning: arms = "\nUNION ALL\n".join(branches) return f
compile_edgeql_overloaded_function_body
python
geldata/gel
edb/pgsql/delta.py
https://github.com/geldata/gel/blob/master/edb/pgsql/delta.py
Apache-2.0
def sql_strict_consistency_check( self, cobj: s_funcs.CallableObject, func: str, schema: s_schema.Schema, ) -> dbops.Command: fname = cobj.get_verbosename(schema) # impl_is_strict means that the function is strict in all # singleton arguments, so we don't need to do the check if # no such arguments exist. if ( not cobj.get_impl_is_strict(schema) or not cobj.get_params(schema).has_type_mod( schema, ql_ft.TypeModifier.SingletonType ) ): return dbops.CommandGroup() if '.' in func: ns, func = func.split('.') else: ns = 'pg_catalog' f_test = textwrap.dedent(f'''\ COALESCE(( SELECT bool_and(proisstrict) FROM pg_proc INNER JOIN pg_namespace ON pg_namespace.oid = pronamespace WHERE proname = {ql(func)} AND nspname = {ql(ns)} ), false) ''') check = dbops.Query(text=f''' PERFORM edgedb_VER.raise_on_null( NULLIF( false, {f_test} ), 'invalid_function_definition', msg => format( '%s is declared to have a strict impl but does not', {ql(fname)} ), hint => ( 'Add `impl_is_strict := false` to the declaration.' ) ); ''') return check
) check = dbops.Query(text=f
sql_strict_consistency_check
python
geldata/gel
edb/pgsql/delta.py
https://github.com/geldata/gel/blob/master/edb/pgsql/delta.py
Apache-2.0
def _undo_everything( self, schema: s_schema.Schema, context: sd.CommandContext, objs: Tuple[so.Object, ...], props: Dict[s_props.Property, s_types.TypeShell], ) -> s_schema.Schema: """Rewrite the type of everything that uses this scalar dangerously. See _get_problematic_refs above for details. """ # First we need to strip out any default value that might reference # one of the functions we are going to delete. # We also create any new types, in this pass. cmd = sd.DeltaRoot() for prop, new_typ in props.items(): try: cmd.add(new_typ.as_create_delta(schema)) except errors.UnsupportedFeatureError: pass if prop.get_default(schema): delta_alter, cmd_alter, _alter_context = prop.init_delta_branch( schema, context, cmdtype=sd.AlterObject) cmd_alter.set_attribute_value('default', None) cmd.add(delta_alter) cmd.apply(schema, context) acmd = CommandMeta.adapt(cmd) schema = acmd.apply(schema, context) self.pgops.update(acmd.get_subcommands()) # Now process all the objects in the appropriate order for obj in objs: if isinstance(obj, s_funcs.Function): # Force function deletions at the SQL level without ever # bothering to remove them from our schema. fc = FunctionCommand() variadic = obj.get_params(schema).find_variadic(schema) self.pgops.add( dbops.DropFunction( name=fc.get_pgname(obj, schema), args=fc.compile_args(obj, schema), has_variadic=variadic is not None, ) ) elif isinstance(obj, s_constr.Constraint): self.pgops.add(ConstraintCommand.delete_constraint(obj, schema)) elif isinstance(obj, s_indexes.Index): self.pgops.add(DeleteIndex.delete_index(obj, schema, context)) elif isinstance(obj, s_types.Tuple): self.pgops.add(dbops.DropCompositeType( name=common.get_backend_name(schema, obj, catenate=False), )) elif isinstance(obj, s_scalars.ScalarType): self.pgops.add(DeleteScalarType.delete_scalar(obj, schema)) elif isinstance(obj, s_props.Property): new_typ = props[obj] delta_alter, cmd_alter, _alter_context = obj.init_delta_branch( schema, context, cmdtype=sd.AlterObject) cmd_alter.set_attribute_value('target', new_typ) cmd_alter.set_attribute_value('default', None) delta_alter.apply(schema, context) acmd2 = CommandMeta.adapt(delta_alter) schema = acmd2.apply(schema, context) self.pgops.add(acmd2) return schema
Rewrite the type of everything that uses this scalar dangerously. See _get_problematic_refs above for details.
_undo_everything
python
geldata/gel
edb/pgsql/delta.py
https://github.com/geldata/gel/blob/master/edb/pgsql/delta.py
Apache-2.0
def _redo_everything( self, schema: s_schema.Schema, orig_schema: s_schema.Schema, context: sd.CommandContext, objs: Tuple[so.Object, ...], props: Dict[s_props.Property, s_types.TypeShell], ) -> s_schema.Schema: """Restore the type of everything that uses this scalar dangerously. See _get_problematic_refs above for details. """ for obj in reversed(objs): if isinstance(obj, s_funcs.Function): # Super hackily recreate the functions fc = CreateFunction( classname=obj.get_name(schema)) # type: ignore for f in ('language', 'params', 'return_type'): fc.set_attribute_value(f, obj.get_field_value(schema, f)) self.pgops.update(fc.make_op(obj, schema, context)) elif isinstance(obj, s_constr.Constraint): self.pgops.add(ConstraintCommand.create_constraint( self, obj, schema, context, create_triggers_if_needed=False, )) elif isinstance(obj, s_indexes.Index): self.pgops.add( CreateIndex.create_index(obj, orig_schema, context)) elif isinstance(obj, s_types.Tuple): self.pgops.add(CreateTuple.create_tuple(obj, orig_schema)) elif isinstance(obj, s_scalars.ScalarType): self.pgops.add( CreateScalarType.create_scalar( obj, obj.get_default(schema), orig_schema, context ) ) elif isinstance(obj, s_props.Property): new_typ = props[obj] delta_alter, cmd_alter, _ = obj.init_delta_branch( schema, context, cmdtype=sd.AlterObject) cmd_alter.set_attribute_value( 'target', obj.get_target(orig_schema)) delta_alter.apply(schema, context) acmd = CommandMeta.adapt(delta_alter) schema = acmd.apply(schema, context) self.pgops.add(acmd) # Restore defaults and prune newly created types cmd = sd.DeltaRoot() for prop, new_typ in props.items(): rnew_typ = new_typ.resolve(schema) if delete := rnew_typ.as_type_delete_if_unused(schema): cmd.add_caused(delete) delta_alter, cmd_alter, _ = prop.init_delta_branch( schema, context, cmdtype=sd.AlterObject) cmd_alter.set_attribute_value( 'default', prop.get_default(orig_schema)) cmd.add(delta_alter) # do an apply of the schema-level command to force it to canonicalize, # which prunes out duplicate deletions # # HACK: Clear out the context's stack so that # context.canonical is false while doing this. stack, context.stack = context.stack, [] cmd.apply(schema, context) context.stack = stack for sub in cmd.get_subcommands(): acmd2 = CommandMeta.adapt(sub) schema = acmd2.apply(schema, context) self.pgops.add(acmd2) return schema
Restore the type of everything that uses this scalar dangerously. See _get_problematic_refs above for details.
_redo_everything
python
geldata/gel
edb/pgsql/delta.py
https://github.com/geldata/gel/blob/master/edb/pgsql/delta.py
Apache-2.0
def drop_dependant_func_cache(pg_type: Tuple[str, ...]) -> dbops.PLQuery: if len(pg_type) == 1: types_cte = f''' SELECT pt.oid AS oid FROM pg_type pt WHERE pt.typname = {ql(pg_type[0])} OR pt.typname = {ql('_' + pg_type[0])}\ ''' else: types_cte = f''' SELECT pt.oid AS oid FROM pg_type pt JOIN pg_namespace pn ON pt.typnamespace = pn.oid WHERE pn.nspname = {ql(pg_type[0])} AND ( pt.typname = {ql(pg_type[1])} OR pt.typname = {ql('_' + pg_type[1])} )\ ''' drop_func_cache_sql = textwrap.dedent(f''' DECLARE qc RECORD; BEGIN FOR qc IN WITH types AS ({types_cte} ), class AS ( SELECT pc.oid AS oid FROM pg_class pc JOIN pg_namespace pn ON pc.relnamespace = pn.oid WHERE pn.nspname = 'pg_catalog' AND pc.relname = 'pg_type' ) SELECT substring(p.proname FROM 6)::uuid AS key FROM pg_proc p JOIN pg_depend d ON d.objid = p.oid JOIN types t ON d.refobjid = t.oid JOIN class c ON d.refclassid = c.oid WHERE p.proname LIKE '__qh_%' LOOP PERFORM edgedb_VER."_evict_query_cache"(qc.key); END LOOP; END; ''') return dbops.PLQuery(drop_func_cache_sql)
else: types_cte = f
drop_dependant_func_cache
python
geldata/gel
edb/pgsql/delta.py
https://github.com/geldata/gel/blob/master/edb/pgsql/delta.py
Apache-2.0
def get_reindex_sql( obj: s_objtypes.ObjectType, restore_desc: sertypes.ShapeDesc, schema: s_schema.Schema, ) -> Optional[str]: """Generate SQL statement that repopulates the index after a restore. Currently this only applies to FTS indexes, and it only fires if __fts_document__ is not in the dump (which it wasn't prior to 5.0). AI index columns might also be missing if they were made with a 5.0rc1 dump, but the indexer will pick them up without our intervention. """ (fts_index, _) = s_indexes.get_effective_object_index( schema, obj, sn.QualName("std::fts", "index") ) if fts_index and '__fts_document__' not in restore_desc.fields: options = get_index_compile_options(fts_index, schema, {}, None) cmd = deltafts.update_fts_document(fts_index, options, schema) return cmd.code() return None
Generate SQL statement that repopulates the index after a restore. Currently this only applies to FTS indexes, and it only fires if __fts_document__ is not in the dump (which it wasn't prior to 5.0). AI index columns might also be missing if they were made with a 5.0rc1 dump, but the indexer will pick them up without our intervention.
get_reindex_sql
python
geldata/gel
edb/pgsql/delta.py
https://github.com/geldata/gel/blob/master/edb/pgsql/delta.py
Apache-2.0
def _compute_version(self, ext_spec: str) -> None: '''Emits a Query to compute the version. Dumps it in _dummy_text. ''' ext, vclauses = _parse_spec(ext_spec) # Dynamically select the highest version extension that matches # the provided version specification. lclauses = [] for op, ver in vclauses: pver = f"string_to_array({ql(ver)}, '.')::int8[]" assert op in {'=', '>', '>=', '<', '<='} lclauses.append(f'v.split {op} {pver}') cond = ' and '.join(lclauses) if lclauses else 'true' ver_regexp = r'^\d+(\.\d+)+$' qry = textwrap.dedent(f'''\ with v as ( select name, version, string_to_array(version, '.')::int8[] as split from pg_available_extension_versions where name = {ql(ext)} and version ~ '{ver_regexp}' ) select edgedb_VER.raise_on_null( ( select v.version from v where {cond} order by split desc limit 1 ), 'feature_not_supported', msg => ( 'could not find extension satisfying ' || {ql(ext_spec)} || ': ' || coalesce( 'only found versions ' || (select string_agg(v.version, ', ' order by v.split) from v), 'extension not found' ) ) ) into _dummy_text; ''') self.pgops.add(dbops.Query(qry))
Emits a Query to compute the version. Dumps it in _dummy_text.
_compute_version
python
geldata/gel
edb/pgsql/delta.py
https://github.com/geldata/gel/blob/master/edb/pgsql/delta.py
Apache-2.0
def has_table( obj: Optional[s_obj.InheritingObject], schema: s_schema.Schema ) -> bool: """Returns True for all schema objects that need a postgres table""" assert obj if isinstance(obj, s_objtypes.ObjectType): return not ( obj.is_compound_type(schema) or obj.get_is_derived(schema) or obj.is_view(schema) ) assert isinstance(obj, s_pointers.Pointer) if obj.is_pure_computable(schema) or obj.get_is_derived(schema): return False elif obj.is_non_concrete(schema): return ( not isinstance(obj, s_properties.Property) and str(obj.get_name(schema)) != 'std::link' ) elif obj.is_link_property(schema): return not obj.singular(schema) elif not has_table(obj.get_source(schema), schema): return False else: ptr_stor_info = get_pointer_storage_info( obj, resolve_type=False, schema=schema, link_bias=True) return ( ptr_stor_info is not None and ptr_stor_info.table_type == 'link' )
Returns True for all schema objects that need a postgres table
has_table
python
geldata/gel
edb/pgsql/types.py
https://github.com/geldata/gel/blob/master/edb/pgsql/types.py
Apache-2.0
def qtl(t: tuple[str, ...]) -> str: """Quote type literal""" return ql(f'{t[0]}.{t[1]}') if len(t) == 2 else ql(f'pg_catalog.{t[0]}')
Quote type literal
qtl
python
geldata/gel
edb/pgsql/metaschema.py
https://github.com/geldata/gel/blob/master/edb/pgsql/metaschema.py
Apache-2.0
def __init__(self, config_spec: edbconfig.Spec) -> None: variants_list = [] for setting in config_spec.values(): if ( setting.backend_setting and isinstance(setting.type, type) and issubclass(setting.type, statypes.ScalarType) ): conv_expr = setting.type.to_frontend_expr('"value"->>0') if conv_expr is not None: variants_list.append(f""" WHEN {ql(setting.backend_setting)} THEN to_jsonb({conv_expr}) """) variants = "\n".join(variants_list) text = f""" SELECT ( CASE "setting_name" {variants} ELSE "value" END ) """ super().__init__( name=('edgedb', '_postgres_json_config_value_to_fe_config_value'), args=[ ('setting_name', ('text',)), ('value', ('jsonb',)) ], returns=('jsonb',), volatility='immutable', text=text, )
) variants = "\n".join(variants_list) text = f
__init__
python
geldata/gel
edb/pgsql/metaschema.py
https://github.com/geldata/gel/blob/master/edb/pgsql/metaschema.py
Apache-2.0
def __init__(self, config_spec: edbconfig.Spec) -> None: backend_settings = {} for setting_name in config_spec: setting = config_spec[setting_name] if setting.backend_setting and not setting.system: backend_settings[setting_name] = setting.backend_setting variants_list = [] for setting_name, backend_setting_name in backend_settings.items(): setting = config_spec[setting_name] valql = '"value"->>0' if ( isinstance(setting.type, type) and issubclass(setting.type, statypes.ScalarType) ): valql = setting.type.to_backend_expr(valql) variants_list.append(f''' WHEN "name" = {ql(setting_name)} THEN pg_catalog.set_config( {ql(backend_setting_name)}::text, {valql}, false ) ''') ext_config = ''' SELECT pg_catalog.set_config( (s.val->>'backend_setting')::text, "value"->>0, false ) FROM edgedbinstdata_VER.instdata as id, LATERAL jsonb_each(id.json) AS s(key, val) WHERE id.key = 'configspec_ext' AND s.key = "name" ''' variants = "\n".join(variants_list) text = f''' SELECT ( CASE WHEN "name" = any( ARRAY[{",".join(ql(str(bs)) for bs in backend_settings)}] ) THEN ( CASE WHEN (CASE {variants} END) IS NULL THEN "name" ELSE "name" END ) WHEN "name" LIKE '%::%' THEN CASE WHEN ({ext_config}) IS NULL THEN "name" ELSE "name" END ELSE "name" END ) ''' super().__init__( name=('edgedb', '_apply_session_config'), args=[ ('name', ('text',)), ('value', ('jsonb',)), ], returns=('text',), language='sql', volatility='volatile', text=text, )
) ext_config =
__init__
python
geldata/gel
edb/pgsql/metaschema.py
https://github.com/geldata/gel/blob/master/edb/pgsql/metaschema.py
Apache-2.0
def get_fixed_bootstrap_commands() -> dbops.CommandGroup: """Create metaschema objects that are truly global""" cmds = [ dbops.CreateSchema(name='edgedb'), dbops.CreateSchema(name='edgedbt'), dbops.CreateSchema(name='edgedbpub'), dbops.CreateSchema(name='edgedbstd'), dbops.CreateSchema(name='edgedbinstdata'), dbops.CreateTable( DBConfigTable(), ), # TODO: SHOULD THIS BE VERSIONED? dbops.CreateTable(QueryCacheTable()), dbops.CreateDomain(BigintDomain()), dbops.CreateDomain(ConfigMemoryDomain()), dbops.CreateDomain(TimestampTzDomain()), dbops.CreateDomain(TimestampDomain()), dbops.CreateDomain(DateDomain()), dbops.CreateDomain(DurationDomain()), dbops.CreateDomain(RelativeDurationDomain()), dbops.CreateDomain(DateDurationDomain()), dbops.CreateEnum(SysConfigSourceType()), dbops.CreateEnum(SysConfigScopeType()), dbops.CreateCompositeType(SysConfigValueType()), dbops.CreateCompositeType(SysConfigEntryType()), dbops.CreateRange(Float32Range()), dbops.CreateRange(Float64Range()), dbops.CreateRange(DatetimeRange()), dbops.CreateRange(LocalDatetimeRange()), ] commands = dbops.CommandGroup() commands.add_commands(cmds) return commands
Create metaschema objects that are truly global
get_fixed_bootstrap_commands
python
geldata/gel
edb/pgsql/metaschema.py
https://github.com/geldata/gel/blob/master/edb/pgsql/metaschema.py
Apache-2.0
def format_fields( schema: s_schema.Schema, obj: s_sources.Source, fields: dict[str, str], ) -> str: """Format a dictionary of column mappings for database views The reason we do it this way is because, since these views are overwriting existing temporary views, we need to put all the columns in the same order as the original view. """ ptrs = [obj.getptr(schema, s_name.UnqualName(s)) for s in fields] # Sort by the order the pointers were added to the source. # N.B: This only works because we are using the original in-memory # schema. If it was loaded from reflection it probably wouldn't # work. ptr_indexes = { v: i for i, v in enumerate(obj.get_pointers(schema).objects(schema)) } ptrs.sort(key=( lambda p: (not p.is_link_source_property(schema), ptr_indexes[p]) )) cols = [] for ptr in ptrs: name = ptr.get_shortname(schema).name val = fields[name] sname = qi(ptr_col_name(schema, obj, name)) cols.append(f' {val} AS {sname}') return ',\n'.join(cols)
Format a dictionary of column mappings for database views The reason we do it this way is because, since these views are overwriting existing temporary views, we need to put all the columns in the same order as the original view.
format_fields
python
geldata/gel
edb/pgsql/metaschema.py
https://github.com/geldata/gel/blob/master/edb/pgsql/metaschema.py
Apache-2.0
def _generate_branch_views(schema: s_schema.Schema) -> List[dbops.View]: Branch = schema.get('sys::Branch', type=s_objtypes.ObjectType) annos = Branch.getptr( schema, s_name.UnqualName('annotations'), type=s_links.Link) int_annos = Branch.getptr( schema, s_name.UnqualName('annotations__internal'), type=s_links.Link) view_fields = { 'id': "((d.description)->>'id')::uuid", 'internal': f"""(CASE WHEN (edgedb_VER.get_backend_capabilities() & {int(params.BackendCapabilities.CREATE_DATABASE)}) != 0 THEN datname IN ( edgedb_VER.get_database_backend_name( {ql(defines.EDGEDB_TEMPLATE_DB)}), edgedb_VER.get_database_backend_name( {ql(defines.EDGEDB_SYSTEM_DB)}) ) ELSE False END )""", 'name': ( 'edgedb_VER.get_database_frontend_name(datname) COLLATE "default"' ), 'name__internal': ( 'edgedb_VER.get_database_frontend_name(datname) COLLATE "default"' ), 'computed_fields': 'ARRAY[]::text[]', 'builtin': "((d.description)->>'builtin')::bool", 'last_migration': "(d.description)->>'last_migration'", } view_query = f''' SELECT {format_fields(schema, Branch, view_fields)} FROM pg_database dat CROSS JOIN LATERAL ( SELECT edgedb_VER.shobj_metadata(dat.oid, 'pg_database') AS description ) AS d WHERE (d.description)->>'id' IS NOT NULL AND (d.description)->>'tenant_id' = edgedb_VER.get_backend_tenant_id() ''' annos_link_fields = { 'source': "((d.description)->>'id')::uuid", 'target': "(annotations->>'id')::uuid", 'value': "(annotations->>'value')::text", 'owned': "(annotations->>'owned')::bool", } annos_link_query = f''' SELECT {format_fields(schema, annos, annos_link_fields)} FROM pg_database dat CROSS JOIN LATERAL ( SELECT edgedb_VER.shobj_metadata(dat.oid, 'pg_database') AS description ) AS d CROSS JOIN LATERAL ROWS FROM ( jsonb_array_elements((d.description)->'annotations') ) AS annotations ''' int_annos_link_fields = { 'source': "((d.description)->>'id')::uuid", 'target': "(annotations->>'id')::uuid", 'owned': "(annotations->>'owned')::bool", } int_annos_link_query = f''' SELECT {format_fields(schema, int_annos, int_annos_link_fields)} FROM pg_database dat CROSS JOIN LATERAL ( SELECT edgedb_VER.shobj_metadata(dat.oid, 'pg_database') AS description ) AS d CROSS JOIN LATERAL ROWS FROM ( jsonb_array_elements( (d.description)->'annotations__internal' ) ) AS annotations ''' objects = { Branch: view_query, annos: annos_link_query, int_annos: int_annos_link_query, } views: list[dbops.View] = [] for obj, query in objects.items(): tabview = trampoline.VersionedView( name=tabname(schema, obj), query=query) views.append(tabview) return views
annos_link_fields = { 'source': "((d.description)->>'id')::uuid", 'target': "(annotations->>'id')::uuid", 'value': "(annotations->>'value')::text", 'owned': "(annotations->>'owned')::bool", } annos_link_query = f
_generate_branch_views
python
geldata/gel
edb/pgsql/metaschema.py
https://github.com/geldata/gel/blob/master/edb/pgsql/metaschema.py
Apache-2.0
def _generate_extension_views(schema: s_schema.Schema) -> List[dbops.View]: ExtPkg = schema.get('sys::ExtensionPackage', type=s_objtypes.ObjectType) annos = ExtPkg.getptr( schema, s_name.UnqualName('annotations'), type=s_links.Link) int_annos = ExtPkg.getptr( schema, s_name.UnqualName('annotations__internal'), type=s_links.Link) ver = ExtPkg.getptr( schema, s_name.UnqualName('version'), type=s_props.Property) ver_t = common.get_backend_name( schema, not_none(ver.get_target(schema)), catenate=False, ) view_query_fields = { 'id': "(e.value->>'id')::uuid", 'name': "(e.value->>'name')", 'name__internal': "(e.value->>'name__internal')", 'script': "(e.value->>'script')", 'sql_extensions': ''' COALESCE( (SELECT array_agg(edgedb_VER.jsonb_extract_scalar(q.v, 'string')) FROM jsonb_array_elements( e.value->'sql_extensions' ) AS q(v)), ARRAY[]::text[] ) ''', 'dependencies': ''' COALESCE( (SELECT array_agg(edgedb_VER.jsonb_extract_scalar(q.v, 'string')) FROM jsonb_array_elements( e.value->'dependencies' ) AS q(v)), ARRAY[]::text[] ) ''', 'ext_module': "(e.value->>'ext_module')", 'sql_setup_script': "(e.value->>'sql_setup_script')", 'sql_teardown_script': "(e.value->>'sql_teardown_script')", 'computed_fields': 'ARRAY[]::text[]', 'builtin': "(e.value->>'builtin')::bool", 'internal': "(e.value->>'internal')::bool", 'version': f''' ( (e.value->'version'->>'major')::int, (e.value->'version'->>'minor')::int, (e.value->'version'->>'stage')::text, (e.value->'version'->>'stage_no')::int, COALESCE( (SELECT array_agg(q.v::text) FROM jsonb_array_elements( e.value->'version'->'local' ) AS q(v)), ARRAY[]::text[] ) )::{qt(ver_t)} ''', } view_query = f''' SELECT {format_fields(schema, ExtPkg, view_query_fields)} FROM jsonb_each( edgedb_VER.get_database_metadata( {ql(defines.EDGEDB_TEMPLATE_DB)} ) -> 'ExtensionPackage' ) AS e ''' annos_link_fields = { 'source': "(e.value->>'id')::uuid", 'target': "(annotations->>'id')::uuid", 'value': "(annotations->>'value')::text", 'owned': "(annotations->>'owned')::bool", } int_annos_link_fields = { 'source': "(e.value->>'id')::uuid", 'target': "(annotations->>'id')::uuid", 'owned': "(annotations->>'owned')::bool", } annos_link_query = f''' SELECT {format_fields(schema, annos, annos_link_fields)} FROM jsonb_each( edgedb_VER.get_database_metadata( {ql(defines.EDGEDB_TEMPLATE_DB)} ) -> 'ExtensionPackage' ) AS e CROSS JOIN LATERAL ROWS FROM ( jsonb_array_elements(e.value->'annotations') ) AS annotations ''' int_annos_link_query = f''' SELECT {format_fields(schema, int_annos, int_annos_link_fields)} FROM jsonb_each( edgedb_VER.get_database_metadata( {ql(defines.EDGEDB_TEMPLATE_DB)} ) -> 'ExtensionPackage' ) AS e CROSS JOIN LATERAL ROWS FROM ( jsonb_array_elements(e.value->'annotations__internal') ) AS annotations ''' objects = { ExtPkg: view_query, annos: annos_link_query, int_annos: int_annos_link_query, } views: list[dbops.View] = [] for obj, query in objects.items(): tabview = trampoline.VersionedView( name=tabname(schema, obj), query=query) views.append(tabview) return views
, 'dependencies':
_generate_extension_views
python
geldata/gel
edb/pgsql/metaschema.py
https://github.com/geldata/gel/blob/master/edb/pgsql/metaschema.py
Apache-2.0
def _generate_extension_migration_views( schema: s_schema.Schema ) -> List[dbops.View]: ExtPkgMigration = schema.get( 'sys::ExtensionPackageMigration', type=s_objtypes.ObjectType) annos = ExtPkgMigration.getptr( schema, s_name.UnqualName('annotations'), type=s_links.Link) int_annos = ExtPkgMigration.getptr( schema, s_name.UnqualName('annotations__internal'), type=s_links.Link) from_ver = ExtPkgMigration.getptr( schema, s_name.UnqualName('from_version'), type=s_props.Property) ver_t = common.get_backend_name( schema, not_none(from_ver.get_target(schema)), catenate=False, ) view_query_fields = { 'id': "(e.value->>'id')::uuid", 'name': "(e.value->>'name')", 'name__internal': "(e.value->>'name__internal')", 'script': "(e.value->>'script')", 'sql_early_script': "(e.value->>'sql_early_script')", 'sql_late_script': "(e.value->>'sql_late_script')", 'computed_fields': 'ARRAY[]::text[]', 'builtin': "(e.value->>'builtin')::bool", 'internal': "(e.value->>'internal')::bool", # XXX: code duplication here 'from_version': f''' ( (e.value->'from_version'->>'major')::int, (e.value->'from_version'->>'minor')::int, (e.value->'from_version'->>'stage')::text, (e.value->'from_version'->>'stage_no')::int, COALESCE( (SELECT array_agg(q.v::text) FROM jsonb_array_elements( e.value->'from_version'->'local' ) AS q(v)), ARRAY[]::text[] ) )::{qt(ver_t)} ''', 'to_version': f''' ( (e.value->'to_version'->>'major')::int, (e.value->'to_version'->>'minor')::int, (e.value->'to_version'->>'stage')::text, (e.value->'to_version'->>'stage_no')::int, COALESCE( (SELECT array_agg(q.v::text) FROM jsonb_array_elements( e.value->'to_version'->'local' ) AS q(v)), ARRAY[]::text[] ) )::{qt(ver_t)} ''', } view_query = f''' SELECT {format_fields(schema, ExtPkgMigration, view_query_fields)} FROM jsonb_each( edgedb_VER.get_database_metadata( {ql(defines.EDGEDB_TEMPLATE_DB)} ) -> 'ExtensionPackageMigration' ) AS e ''' annos_link_fields = { 'source': "(e.value->>'id')::uuid", 'target': "(annotations->>'id')::uuid", 'value': "(annotations->>'value')::text", 'owned': "(annotations->>'owned')::bool", } int_annos_link_fields = { 'source': "(e.value->>'id')::uuid", 'target': "(annotations->>'id')::uuid", 'owned': "(annotations->>'owned')::bool", } annos_link_query = f''' SELECT {format_fields(schema, annos, annos_link_fields)} FROM jsonb_each( edgedb_VER.get_database_metadata( {ql(defines.EDGEDB_TEMPLATE_DB)} ) -> 'ExtensionPackageMigration' ) AS e CROSS JOIN LATERAL ROWS FROM ( jsonb_array_elements(e.value->'annotations') ) AS annotations ''' int_annos_link_query = f''' SELECT {format_fields(schema, int_annos, int_annos_link_fields)} FROM jsonb_each( edgedb_VER.get_database_metadata( {ql(defines.EDGEDB_TEMPLATE_DB)} ) -> 'ExtensionPackageMigration' ) AS e CROSS JOIN LATERAL ROWS FROM ( jsonb_array_elements(e.value->'annotations__internal') ) AS annotations ''' objects = { ExtPkgMigration: view_query, annos: annos_link_query, int_annos: int_annos_link_query, } views: list[dbops.View] = [] for obj, query in objects.items(): tabview = trampoline.VersionedView( name=tabname(schema, obj), query=query) views.append(tabview) return views
, 'to_version': f
_generate_extension_migration_views
python
geldata/gel
edb/pgsql/metaschema.py
https://github.com/geldata/gel/blob/master/edb/pgsql/metaschema.py
Apache-2.0
def _generate_role_views(schema: s_schema.Schema) -> List[dbops.View]: Role = schema.get('sys::Role', type=s_objtypes.ObjectType) member_of = Role.getptr( schema, s_name.UnqualName('member_of'), type=s_links.Link) bases = Role.getptr( schema, s_name.UnqualName('bases'), type=s_links.Link) ancestors = Role.getptr( schema, s_name.UnqualName('ancestors'), type=s_links.Link) annos = Role.getptr( schema, s_name.UnqualName('annotations'), type=s_links.Link) int_annos = Role.getptr( schema, s_name.UnqualName('annotations__internal'), type=s_links.Link) superuser = f''' a.rolsuper OR EXISTS ( SELECT FROM pg_auth_members m INNER JOIN pg_catalog.pg_roles g ON (m.roleid = g.oid) WHERE m.member = a.oid AND g.rolname = edgedb_VER.get_role_backend_name( {ql(defines.EDGEDB_SUPERGROUP)} ) ) ''' view_query_fields = { 'id': "((d.description)->>'id')::uuid", 'name': "(d.description)->>'name'", 'name__internal': "(d.description)->>'name'", 'superuser': f'{superuser}', 'abstract': 'False', 'is_derived': 'False', 'inherited_fields': 'ARRAY[]::text[]', 'computed_fields': 'ARRAY[]::text[]', 'builtin': "((d.description)->>'builtin')::bool", 'internal': 'False', 'password': "(d.description)->>'password_hash'", } view_query = f''' SELECT {format_fields(schema, Role, view_query_fields)} FROM pg_catalog.pg_roles AS a CROSS JOIN LATERAL ( SELECT edgedb_VER.shobj_metadata(a.oid, 'pg_authid') AS description ) AS d WHERE (d.description)->>'id' IS NOT NULL AND (d.description)->>'tenant_id' = edgedb_VER.get_backend_tenant_id() ''' member_of_link_query_fields = { 'source': "((d.description)->>'id')::uuid", 'target': "((md.description)->>'id')::uuid", } member_of_link_query = f''' SELECT {format_fields(schema, member_of, member_of_link_query_fields)} FROM pg_catalog.pg_roles AS a CROSS JOIN LATERAL ( SELECT edgedb_VER.shobj_metadata(a.oid, 'pg_authid') AS description ) AS d INNER JOIN pg_auth_members m ON m.member = a.oid CROSS JOIN LATERAL ( SELECT edgedb_VER.shobj_metadata(m.roleid, 'pg_authid') AS description ) AS md ''' bases_link_query_fields = { 'source': "((d.description)->>'id')::uuid", 'target': "((md.description)->>'id')::uuid", 'index': 'row_number() OVER (PARTITION BY a.oid ORDER BY m.roleid)', } bases_link_query = f''' SELECT {format_fields(schema, bases, bases_link_query_fields)} FROM pg_catalog.pg_roles AS a CROSS JOIN LATERAL ( SELECT edgedb_VER.shobj_metadata(a.oid, 'pg_authid') AS description ) AS d INNER JOIN pg_auth_members m ON m.member = a.oid CROSS JOIN LATERAL ( SELECT edgedb_VER.shobj_metadata(m.roleid, 'pg_authid') AS description ) AS md ''' ancestors_link_query = f''' SELECT {format_fields(schema, ancestors, bases_link_query_fields)} FROM pg_catalog.pg_roles AS a CROSS JOIN LATERAL ( SELECT edgedb_VER.shobj_metadata(a.oid, 'pg_authid') AS description ) AS d INNER JOIN pg_auth_members m ON m.member = a.oid CROSS JOIN LATERAL ( SELECT edgedb_VER.shobj_metadata(m.roleid, 'pg_authid') AS description ) AS md ''' annos_link_fields = { 'source': "((d.description)->>'id')::uuid", 'target': "(annotations->>'id')::uuid", 'value': "(annotations->>'value')::text", 'owned': "(annotations->>'owned')::bool", } annos_link_query = f''' SELECT {format_fields(schema, annos, annos_link_fields)} FROM pg_catalog.pg_roles AS a CROSS JOIN LATERAL ( SELECT edgedb_VER.shobj_metadata(a.oid, 'pg_authid') AS description ) AS d CROSS JOIN LATERAL ROWS FROM ( jsonb_array_elements( (d.description)->'annotations' ) ) AS annotations ''' int_annos_link_fields = { 'source': "((d.description)->>'id')::uuid", 'target': "(annotations->>'id')::uuid", 'owned': "(annotations->>'owned')::bool", } int_annos_link_query = f''' SELECT {format_fields(schema, int_annos, int_annos_link_fields)} FROM pg_catalog.pg_roles AS a CROSS JOIN LATERAL ( SELECT edgedb_VER.shobj_metadata(a.oid, 'pg_authid') AS description ) AS d CROSS JOIN LATERAL ROWS FROM ( jsonb_array_elements( (d.description)->'annotations__internal' ) ) AS annotations ''' objects = { Role: view_query, member_of: member_of_link_query, bases: bases_link_query, ancestors: ancestors_link_query, annos: annos_link_query, int_annos: int_annos_link_query, } views: list[dbops.View] = [] for obj, query in objects.items(): tabview = trampoline.VersionedView( name=tabname(schema, obj), query=query) views.append(tabview) return views
view_query_fields = { 'id': "((d.description)->>'id')::uuid", 'name': "(d.description)->>'name'", 'name__internal': "(d.description)->>'name'", 'superuser': f'{superuser}', 'abstract': 'False', 'is_derived': 'False', 'inherited_fields': 'ARRAY[]::text[]', 'computed_fields': 'ARRAY[]::text[]', 'builtin': "((d.description)->>'builtin')::bool", 'internal': 'False', 'password': "(d.description)->>'password_hash'", } view_query = f
_generate_role_views
python
geldata/gel
edb/pgsql/metaschema.py
https://github.com/geldata/gel/blob/master/edb/pgsql/metaschema.py
Apache-2.0
def _generate_single_role_views(schema: s_schema.Schema) -> List[dbops.View]: Role = schema.get('sys::Role', type=s_objtypes.ObjectType) member_of = Role.getptr( schema, s_name.UnqualName('member_of'), type=s_links.Link) bases = Role.getptr( schema, s_name.UnqualName('bases'), type=s_links.Link) ancestors = Role.getptr( schema, s_name.UnqualName('ancestors'), type=s_links.Link) annos = Role.getptr( schema, s_name.UnqualName('annotations'), type=s_links.Link) int_annos = Role.getptr( schema, s_name.UnqualName('annotations__internal'), type=s_links.Link) view_query_fields = { 'id': "(json->>'id')::uuid", 'name': "json->>'name'", 'name__internal': "json->>'name'", 'superuser': 'True', 'abstract': 'False', 'is_derived': 'False', 'inherited_fields': 'ARRAY[]::text[]', 'computed_fields': 'ARRAY[]::text[]', 'builtin': 'True', 'internal': 'False', 'password': "json->>'password_hash'", } view_query = f''' SELECT {format_fields(schema, Role, view_query_fields)} FROM edgedbinstdata_VER.instdata WHERE key = 'single_role_metadata' AND json->>'tenant_id' = edgedb_VER.get_backend_tenant_id() ''' member_of_link_query_fields = { 'source': "'00000000-0000-0000-0000-000000000000'::uuid", 'target': "'00000000-0000-0000-0000-000000000000'::uuid", } member_of_link_query = f''' SELECT {format_fields(schema, member_of, member_of_link_query_fields)} LIMIT 0 ''' bases_link_query_fields = { 'source': "'00000000-0000-0000-0000-000000000000'::uuid", 'target': "'00000000-0000-0000-0000-000000000000'::uuid", 'index': "0::bigint", } bases_link_query = f''' SELECT {format_fields(schema, bases, bases_link_query_fields)} LIMIT 0 ''' ancestors_link_query = f''' SELECT {format_fields(schema, ancestors, bases_link_query_fields)} LIMIT 0 ''' annos_link_fields = { 'source': "(json->>'id')::uuid", 'target': "(annotations->>'id')::uuid", 'value': "(annotations->>'value')::text", 'owned': "(annotations->>'owned')::bool", } annos_link_query = f''' SELECT {format_fields(schema, annos, annos_link_fields)} FROM edgedbinstdata_VER.instdata CROSS JOIN LATERAL ROWS FROM ( jsonb_array_elements(json->'annotations') ) AS annotations WHERE key = 'single_role_metadata' AND json->>'tenant_id' = edgedb_VER.get_backend_tenant_id() ''' int_annos_link_fields = { 'source': "(json->>'id')::uuid", 'target': "(annotations->>'id')::uuid", 'owned': "(annotations->>'owned')::bool", } int_annos_link_query = f''' SELECT {format_fields(schema, int_annos, int_annos_link_fields)} FROM edgedbinstdata_VER.instdata CROSS JOIN LATERAL ROWS FROM ( jsonb_array_elements(json->'annotations__internal') ) AS annotations WHERE key = 'single_role_metadata' AND json->>'tenant_id' = edgedb_VER.get_backend_tenant_id() ''' objects = { Role: view_query, member_of: member_of_link_query, bases: bases_link_query, ancestors: ancestors_link_query, annos: annos_link_query, int_annos: int_annos_link_query, } views: list[dbops.View] = [] for obj, query in objects.items(): tabview = trampoline.VersionedView( name=tabname(schema, obj), query=query) views.append(tabview) return views
member_of_link_query_fields = { 'source': "'00000000-0000-0000-0000-000000000000'::uuid", 'target': "'00000000-0000-0000-0000-000000000000'::uuid", } member_of_link_query = f
_generate_single_role_views
python
geldata/gel
edb/pgsql/metaschema.py
https://github.com/geldata/gel/blob/master/edb/pgsql/metaschema.py
Apache-2.0
def _generate_sql_information_schema( backend_version: params.BackendVersion ) -> List[dbops.Command]: # Helper to create wrappers around materialized views. For # performance, we use MATERIALIZED VIEW for some of our SQL # emulation tables. Unfortunately we can't use those directly, # since we need tableoid to match the real pg_catalog table. def make_wrapper_view(name: str) -> trampoline.VersionedView: return trampoline.VersionedView( name=("edgedbsql", name), query=f""" SELECT *, 'pg_catalog.{name}'::regclass::oid as tableoid, xmin, cmin, xmax, cmax, ctid FROM edgedbsql_VER.{name}_ """, ) # A helper view that contains all data tables we expose over SQL, excluding # introspection tables. # It contains table & schema names and associated module id. virtual_tables = trampoline.VersionedView( name=('edgedbsql', 'virtual_tables'), materialized=True, query=''' WITH obj_ty_pre AS ( SELECT id, REGEXP_REPLACE(name, '::[^:]*$', '') AS module_name, REGEXP_REPLACE(name, '^.*::', '') as table_name FROM edgedb_VER."_SchemaObjectType" WHERE internal IS NOT TRUE ), obj_ty AS ( SELECT id, REGEXP_REPLACE(module_name, '^default(?=::|$)', 'public') AS schema_name, module_name, table_name FROM obj_ty_pre ), all_tables (id, schema_name, module_name, table_name) AS (( SELECT * FROM obj_ty ) UNION ALL ( WITH qualified_links AS ( -- multi links and links with at least one property -- (besides source and target) SELECT link.id FROM edgedb_VER."_SchemaLink" link JOIN edgedb_VER."_SchemaProperty" AS prop ON link.id = prop.source WHERE prop.computable IS NOT TRUE AND prop.internal IS NOT TRUE GROUP BY link.id, link.cardinality HAVING link.cardinality = 'Many' OR COUNT(*) > 2 ) SELECT link.id, obj_ty.schema_name, obj_ty.module_name, CONCAT(obj_ty.table_name, '.', link.name) AS table_name FROM edgedb_VER."_SchemaLink" link JOIN obj_ty ON obj_ty.id = link.source WHERE link.id IN (SELECT * FROM qualified_links) ) UNION ALL ( -- multi properties SELECT prop.id, obj_ty.schema_name, obj_ty.module_name, CONCAT(obj_ty.table_name, '.', prop.name) AS table_name FROM edgedb_VER."_SchemaProperty" AS prop JOIN obj_ty ON obj_ty.id = prop.source WHERE prop.computable IS NOT TRUE AND prop.internal IS NOT TRUE AND prop.cardinality = 'Many' )) SELECT at.id, schema_name, table_name, sm.id AS module_id, pt.oid AS pg_type_id FROM all_tables at JOIN edgedb_VER."_SchemaModule" sm ON sm.name = at.module_name LEFT JOIN pg_type pt ON pt.typname = at.id::text WHERE schema_name not in ( 'cfg', 'sys', 'schema', 'std', 'std::net', 'std::net::http' ) ''' ) # A few tables in here were causing problems, so let's hide them as an # implementation detail. # To be more specific: # - following tables were missing from information_schema: # Link.properties, ObjectType.links, ObjectType.properties # - even though introspection worked, I wasn't able to select from some # tables in cfg and sys # For making up oids of schemas that represent modules uuid_to_oid = trampoline.VersionedFunction( name=('edgedbsql', 'uuid_to_oid'), args=( ('id', 'uuid'), # extra is two extra bits to throw into the oid, for now ('extra', 'int4', '0'), ), returns=('oid',), volatility='immutable', text=""" SELECT ( ('x' || substring(id::text, 2, 7))::bit(28)::bigint*4 + extra + 40000)::oid; """ ) long_name = trampoline.VersionedFunction( name=('edgedbsql', '_long_name'), args=[ ('origname', ('text',)), ('longname', ('text',)), ], returns=('text',), volatility='stable', text=r''' SELECT CASE WHEN length(longname) > 63 THEN left(longname, 55) || left(origname, 8) ELSE longname END ''' ) type_rename = trampoline.VersionedFunction( name=('edgedbsql', '_pg_type_rename'), args=[ ('typeoid', ('oid',)), ('typename', ('name',)), ], returns=('name',), volatility='stable', text=r''' SELECT COALESCE ( -- is the name in virtual_tables? ( SELECT vt.table_name::name FROM edgedbsql_VER.virtual_tables vt WHERE vt.pg_type_id = typeoid ), -- is this a scalar or tuple? ( SELECT name::name FROM ( -- get the built-in scalars SELECT split_part(name, '::', 2) AS name, backend_id FROM edgedb_VER."_SchemaScalarType" WHERE NOT builtin AND arg_values IS NULL UNION ALL -- get the tuples SELECT edgedbsql_VER._long_name(typename, name), backend_id FROM edgedb_VER."_SchemaTuple" ) x WHERE x.backend_id = typeoid ), typename ) ''' ) namespace_rename = trampoline.VersionedFunction( name=('edgedbsql', '_pg_namespace_rename'), args=[ ('typeoid', ('oid',)), ('typens', ('oid',)), ], returns=('oid',), volatility='stable', text=r''' WITH nspub AS ( SELECT oid FROM pg_namespace WHERE nspname = 'edgedbpub' ), nsdef AS ( SELECT edgedbsql_VER.uuid_to_oid(id) AS oid FROM edgedb_VER."_SchemaModule" WHERE name = 'default' ) SELECT COALESCE ( ( SELECT edgedbsql_VER.uuid_to_oid(vt.module_id) FROM edgedbsql_VER.virtual_tables vt WHERE vt.pg_type_id = typeoid ), -- just replace "edgedbpub" with "public" (SELECT nsdef.oid WHERE typens = nspub.oid), typens ) FROM nspub, nsdef ''' ) sql_ident = 'information_schema.sql_identifier' sql_str = 'information_schema.character_data' sql_bool = 'information_schema.yes_or_no' sql_card = 'information_schema.cardinal_number' tables_and_columns = [ trampoline.VersionedView( name=('edgedbsql', 'tables'), query=( f''' SELECT edgedb_VER.get_current_database()::{sql_ident} AS table_catalog, vt.schema_name::{sql_ident} AS table_schema, vt.table_name::{sql_ident} AS table_name, ist.table_type, ist.self_referencing_column_name, ist.reference_generation, ist.user_defined_type_catalog, ist.user_defined_type_schema, ist.user_defined_type_name, ist.is_insertable_into, ist.is_typed, ist.commit_action FROM information_schema.tables ist JOIN edgedbsql_VER.virtual_tables vt ON vt.id::text = ist.table_name ''' ), ), trampoline.VersionedView( name=('edgedbsql', 'columns'), query=( f''' SELECT edgedb_VER.get_current_database()::{sql_ident} AS table_catalog, vt_table_schema::{sql_ident} AS table_schema, vt_table_name::{sql_ident} AS table_name, v_column_name::{sql_ident} as column_name, ROW_NUMBER() OVER ( PARTITION BY vt_table_schema, vt_table_name ORDER BY position, v_column_name ) AS ordinal_position, column_default, is_nullable, data_type, NULL::{sql_card} AS character_maximum_length, NULL::{sql_card} AS character_octet_length, NULL::{sql_card} AS numeric_precision, NULL::{sql_card} AS numeric_precision_radix, NULL::{sql_card} AS numeric_scale, NULL::{sql_card} AS datetime_precision, NULL::{sql_str} AS interval_type, NULL::{sql_card} AS interval_precision, NULL::{sql_ident} AS character_set_catalog, NULL::{sql_ident} AS character_set_schema, NULL::{sql_ident} AS character_set_name, NULL::{sql_ident} AS collation_catalog, NULL::{sql_ident} AS collation_schema, NULL::{sql_ident} AS collation_name, NULL::{sql_ident} AS domain_catalog, NULL::{sql_ident} AS domain_schema, NULL::{sql_ident} AS domain_name, edgedb_VER.get_current_database()::{sql_ident} AS udt_catalog, 'pg_catalog'::{sql_ident} AS udt_schema, NULL::{sql_ident} AS udt_name, NULL::{sql_ident} AS scope_catalog, NULL::{sql_ident} AS scope_schema, NULL::{sql_ident} AS scope_name, NULL::{sql_card} AS maximum_cardinality, 0::{sql_ident} AS dtd_identifier, 'NO'::{sql_bool} AS is_self_referencing, 'NO'::{sql_bool} AS is_identity, NULL::{sql_str} AS identity_generation, NULL::{sql_str} AS identity_start, NULL::{sql_str} AS identity_increment, NULL::{sql_str} AS identity_maximum, NULL::{sql_str} AS identity_minimum, 'NO' ::{sql_bool} AS identity_cycle, 'NEVER'::{sql_str} AS is_generated, NULL::{sql_str} AS generation_expression, 'YES'::{sql_bool} AS is_updatable FROM ( SELECT vt.schema_name AS vt_table_schema, vt.table_name AS vt_table_name, COALESCE( -- this happends for id and __type__ spec.name, -- fallback to pointer name, with suffix '_id' for links sp.name || case when sl.id is not null then '_id' else '' end ) AS v_column_name, COALESCE(spec.position, 2) AS position, (sp.expr IS NOT NULL) AS is_computed, isc.column_default, CASE WHEN sp.required OR spec.k IS NOT NULL THEN 'NO' ELSE 'YES' END AS is_nullable, -- HACK: computeds don't have backing rows in isc, -- so we just default to 'text'. This is wrong. COALESCE(isc.data_type, 'text') AS data_type FROM edgedb_VER."_SchemaPointer" sp LEFT JOIN information_schema.columns isc ON ( isc.table_name = sp.source::TEXT AND CASE WHEN length(isc.column_name) = 36 -- if column name is uuid THEN isc.column_name = sp.id::text -- compare uuids ELSE isc.column_name = sp.name -- for id, source, target END ) -- needed for attaching `_id` LEFT JOIN edgedb_VER."_SchemaLink" sl ON sl.id = sp.id -- needed for determining table name JOIN edgedbsql_VER.virtual_tables vt ON vt.id = sp.source -- positions for special pointers -- duplicate id get both id and __type__ columns out of it LEFT JOIN ( VALUES ('id', 'id', 0), ('id', '__type__', 1), ('source', 'source', 0), ('target', 'target', 1) ) spec(k, name, position) ON (spec.k = isc.column_name) WHERE isc.column_name IS NOT NULL -- normal pointers OR sp.expr IS NOT NULL AND sp.cardinality <> 'Many' -- computeds UNION ALL -- special case: multi properties source and target -- (this is needed, because schema does not create pointers for -- these two columns) SELECT vt.schema_name AS vt_table_schema, vt.table_name AS vt_table_name, isc.column_name AS v_column_name, spec.position as position, FALSE as is_computed, isc.column_default, 'NO' as is_nullable, isc.data_type as data_type FROM edgedb_VER."_SchemaPointer" sp JOIN information_schema.columns isc ON isc.table_name = sp.id::TEXT -- needed for filtering out links LEFT JOIN edgedb_VER."_SchemaLink" sl ON sl.id = sp.id -- needed for determining table name JOIN edgedbsql_VER.virtual_tables vt ON vt.id = sp.id -- positions for special pointers JOIN ( VALUES ('source', 'source', 0), ('target', 'target', 1) ) spec(k, name, position) ON (spec.k = isc.column_name) WHERE sl.id IS NULL -- property (non-link) AND sp.cardinality = 'Many' -- multi AND sp.expr IS NULL -- non-computed ) t ''' ), ), ] pg_catalog_views = [ trampoline.VersionedView( name=("edgedbsql", "pg_namespace_"), materialized=True, query=""" -- system schemas SELECT oid, nspname, nspowner, nspacl FROM pg_namespace WHERE nspname IN ('pg_catalog', 'pg_toast', 'information_schema', 'edgedb', 'edgedbstd', 'edgedbt', 'edgedb_VER', 'edgedbstd_VER') UNION ALL -- virtual schemas SELECT edgedbsql_VER.uuid_to_oid(t.module_id) AS oid, t.schema_name AS nspname, (SELECT oid FROM pg_roles WHERE rolname = CURRENT_USER LIMIT 1) AS nspowner, NULL AS nspacl FROM ( SELECT schema_name, module_id FROM edgedbsql_VER.virtual_tables UNION -- always include the default module, -- because it is needed for tuple types SELECT 'public' AS schema_name, id AS module_id FROM edgedb_VER."_SchemaModule" WHERE name = 'default' ) t """, ), make_wrapper_view("pg_namespace"), trampoline.VersionedView( name=("edgedbsql", "pg_type_"), materialized=True, query=""" SELECT pt.oid, edgedbsql_VER._pg_type_rename(pt.oid, pt.typname) AS typname, edgedbsql_VER._pg_namespace_rename(pt.oid, pt.typnamespace) AS typnamespace, {0} FROM pg_type pt JOIN pg_namespace pn ON pt.typnamespace = pn.oid WHERE nspname IN ('pg_catalog', 'pg_toast', 'information_schema', 'edgedb', 'edgedbstd', 'edgedb_VER', 'edgedbstd_VER', 'edgedbpub', 'edgedbt') """.format( ",".join( f"pt.{col}" for col, _, _ in sql_introspection.PG_CATALOG["pg_type"][3:] ) ), ), make_wrapper_view("pg_type"), # pg_class that contains classes only for tables # This is needed so we can use it to filter pg_index to indexes only on # visible tables. trampoline.VersionedView( name=("edgedbsql", "pg_class_tables"), materialized=True, query=""" -- Postgres tables SELECT pc.* FROM pg_class pc JOIN pg_namespace pn ON pc.relnamespace = pn.oid WHERE nspname IN ('pg_catalog', 'pg_toast', 'information_schema') UNION ALL -- user-defined tables SELECT oid, vt.table_name as relname, edgedbsql_VER.uuid_to_oid(vt.module_id) as relnamespace, reltype, reloftype, relowner, relam, relfilenode, reltablespace, relpages, reltuples, relallvisible, reltoastrelid, relhasindex, relisshared, relpersistence, relkind, relnatts, 0 as relchecks, -- don't care about CHECK constraints relhasrules, relhastriggers, relhassubclass, relrowsecurity, relforcerowsecurity, relispopulated, relreplident, relispartition, relrewrite, relfrozenxid, relminmxid, relacl, reloptions, relpartbound FROM pg_class pc JOIN edgedbsql_VER.virtual_tables vt ON vt.pg_type_id = pc.reltype """, ), trampoline.VersionedView( name=("edgedbsql", "pg_index_"), materialized=True, query=f""" SELECT pi.indexrelid, pi.indrelid, pi.indnatts, pi.indnkeyatts, CASE WHEN COALESCE(is_id.t, FALSE) THEN TRUE ELSE pi.indisprimary END AS indisunique, {'pi.indnullsnotdistinct,' if backend_version.major >= 15 else ''} CASE WHEN COALESCE(is_id.t, FALSE) THEN TRUE ELSE pi.indisprimary END AS indisprimary, pi.indisexclusion, pi.indimmediate, pi.indisclustered, pi.indisvalid, pi.indcheckxmin, CASE WHEN COALESCE(is_id.t, FALSE) THEN TRUE ELSE FALSE -- override so pg_dump won't try to recreate them END AS indisready, pi.indislive, pi.indisreplident, CASE WHEN COALESCE(is_id.t, FALSE) THEN ARRAY[1]::int2vector -- id: 1 ELSE pi.indkey END AS indkey, pi.indcollation, pi.indclass, pi.indoption, pi.indexprs, pi.indpred FROM pg_index pi -- filter by tables visible in pg_class INNER JOIN edgedbsql_VER.pg_class_tables pr ON pi.indrelid = pr.oid -- find indexes that are on virtual tables and on `id` columns LEFT JOIN LATERAL ( SELECT TRUE AS t FROM pg_attribute pa WHERE pa.attrelid = pi.indrelid AND pa.attnum = ANY(pi.indkey) AND pa.attname = 'id' ) is_id ON TRUE -- for our tables show only primary key indexes LEFT JOIN edgedbsql_VER.virtual_tables vt ON vt.pg_type_id = pr.reltype WHERE vt.id IS NULL OR is_id.t IS NOT NULL """, ), make_wrapper_view('pg_index'), trampoline.VersionedView( name=("edgedbsql", "pg_class_"), materialized=True, query=""" -- tables SELECT pc.* FROM edgedbsql_VER.pg_class_tables pc UNION -- indexes SELECT pc.* FROM pg_class pc JOIN pg_index pi ON pc.oid = pi.indexrelid UNION -- compound types (tuples) SELECT pc.oid, edgedbsql_VER._long_name(pc.reltype::text, tup.name) as relname, nsdef.oid as relnamespace, pc.reltype, pc.reloftype, pc.relowner, pc.relam, pc.relfilenode, pc.reltablespace, pc.relpages, pc.reltuples, pc.relallvisible, pc.reltoastrelid, pc.relhasindex, pc.relisshared, pc.relpersistence, pc.relkind, pc.relnatts, 0 as relchecks, -- don't care about CHECK constraints pc.relhasrules, pc.relhastriggers, pc.relhassubclass, pc.relrowsecurity, pc.relforcerowsecurity, pc.relispopulated, pc.relreplident, pc.relispartition, pc.relrewrite, pc.relfrozenxid, pc.relminmxid, pc.relacl, pc.reloptions, pc.relpartbound FROM pg_class pc JOIN edgedb_VER."_SchemaTuple" tup ON tup.backend_id = pc.reltype JOIN ( SELECT edgedbsql_VER.uuid_to_oid(id) AS oid FROM edgedb_VER."_SchemaModule" WHERE name = 'default' ) nsdef ON TRUE """, ), make_wrapper_view("pg_class"), # Because we hide some columns and # because pg_dump expects attnum to be sequential numbers # we have to invent new attnums with ROW_NUMBER(). # Since attnum is used elsewhere, we need to know the mapping from # constructed attnum into underlying attnum. # To do that, we have pg_attribute_ext view with additional # attnum_internal column. trampoline.VersionedView( name=("edgedbsql", "pg_attribute_ext"), materialized=True, query=r""" SELECT attrelid, attname, atttypid, attstattarget, attlen, attnum, attnum as attnum_internal, attndims, attcacheoff, atttypmod, attbyval, attstorage, attalign, attnotnull, atthasdef, atthasmissing, attidentity, attgenerated, attisdropped, attislocal, attinhcount, attcollation, attacl, attoptions, attfdwoptions, null::int[] as attmissingval FROM pg_attribute pa JOIN pg_class pc ON pa.attrelid = pc.oid JOIN pg_namespace pn ON pc.relnamespace = pn.oid LEFT JOIN edgedb_VER."_SchemaTuple" tup ON tup.backend_id = pc.reltype WHERE nspname IN ('pg_catalog', 'pg_toast', 'information_schema') OR tup.backend_id IS NOT NULL UNION ALL SELECT pc_oid as attrelid, col_name as attname, COALESCE(atttypid, 25) as atttypid, -- defaults to TEXT COALESCE(attstattarget, -1) as attstattarget, COALESCE(attlen, -1) as attlen, (ROW_NUMBER() OVER ( PARTITION BY pc_oid ORDER BY col_position, col_name ) - 6)::smallint AS attnum, t.attnum as attnum_internal, COALESCE(attndims, 0) as attndims, COALESCE(attcacheoff, -1) as attcacheoff, COALESCE(atttypmod, -1) as atttypmod, COALESCE(attbyval, FALSE) as attbyval, COALESCE(attstorage, 'x') as attstorage, COALESCE(attalign, 'i') as attalign, required as attnotnull, -- Always report no default, to avoid expr trouble false as atthasdef, COALESCE(atthasmissing, FALSE) as atthasmissing, COALESCE(attidentity, '') as attidentity, COALESCE(attgenerated, '') as attgenerated, COALESCE(attisdropped, FALSE) as attisdropped, COALESCE(attislocal, TRUE) as attislocal, COALESCE(attinhcount, 0) as attinhcount, COALESCE(attcollation, 0) as attcollation, attacl, attoptions, attfdwoptions, null::int[] as attmissingval FROM ( SELECT COALESCE( spec.name, -- for special columns sp.name || case when sl.id is not null then '_id' else '' end, pa.attname -- for system columns ) as col_name, COALESCE(spec.position, 2) AS col_position, (sp.required IS TRUE OR spec.k IS NOT NULL) as required, pc.oid AS pc_oid, pa.* FROM edgedb_VER."_SchemaPointer" sp JOIN edgedbsql_VER.virtual_tables vt ON vt.id = sp.source JOIN pg_class pc ON pc.reltype = vt.pg_type_id -- try to find existing pg_attribute (it will not exist for computeds) LEFT JOIN pg_attribute pa ON ( pa.attrelid = pc.oid AND CASE WHEN length(pa.attname) = 36 -- if column name is uuid THEN pa.attname = sp.id::text -- compare uuids ELSE pa.attname = sp.name -- for id, source, target END ) -- positions for special pointers -- duplicate id get both id and __type__ columns out of it LEFT JOIN ( VALUES ('id', 'id', 0), ('id', '__type__', 1), ('source', 'source', 0), ('target', 'target', 1) ) spec(k, name, position) ON (spec.k = pa.attname) -- needed for attaching `_id` LEFT JOIN edgedb_VER."_SchemaLink" sl ON sl.id = sp.id WHERE pa.attname IS NOT NULL -- non-computed pointers OR sp.expr IS NOT NULL AND sp.cardinality <> 'Many' -- computeds UNION ALL -- special case: multi properties source and target -- (this is needed, because schema does not create pointers for -- these two columns) SELECT pa.attname AS col_name, spec.position as position, TRUE as required, pa.attrelid as pc_oid, pa.* FROM edgedb_VER."_SchemaProperty" sp JOIN pg_class pc ON pc.relname = sp.id::TEXT JOIN pg_attribute pa ON pa.attrelid = pc.oid -- positions for special pointers JOIN ( VALUES ('source', 0), ('target', 1) ) spec(k, position) ON (spec.k = pa.attname) WHERE sp.cardinality = 'Many' -- multi AND sp.expr IS NULL -- non-computed UNION ALL -- special case: system columns SELECT pa.attname AS col_name, pa.attnum as position, TRUE as required, pa.attrelid as pc_oid, pa.* FROM pg_attribute pa JOIN pg_class pc ON pc.oid = pa.attrelid JOIN edgedbsql_VER.virtual_tables vt ON vt.pg_type_id = pc.reltype WHERE pa.attnum < 0 ) t """, ), trampoline.VersionedView( name=("edgedbsql", "pg_attribute"), query=""" SELECT attrelid, attname, atttypid, attstattarget, attlen, attnum, attndims, attcacheoff, atttypmod, attbyval, attstorage, attalign, attnotnull, atthasdef, atthasmissing, attidentity, attgenerated, attisdropped, attislocal, attinhcount, attcollation, attacl, attoptions, attfdwoptions, attmissingval, 'pg_catalog.pg_attribute'::regclass::oid as tableoid, xmin, cmin, xmax, cmax, ctid FROM edgedbsql_VER.pg_attribute_ext """, ), trampoline.VersionedView( name=("edgedbsql", "pg_database"), query=""" SELECT oid, edgedb_VER.get_current_database()::name as datname, datdba, encoding, datcollate, datctype, datistemplate, datallowconn, datconnlimit, 0::oid AS datlastsysoid, datfrozenxid, datminmxid, dattablespace, datacl, tableoid, xmin, cmin, xmax, cmax, ctid FROM pg_database WHERE datname LIKE '%_edgedb' """, ), # HACK: there were problems with pg_dump when exposing this table, so # I've added WHERE FALSE. The query could be simplified, but it may # be needed in the future. Its EXPLAIN cost is 0..0 anyway. trampoline.VersionedView( name=("edgedbsql", "pg_stats"), query=""" SELECT n.nspname AS schemaname, c.relname AS tablename, a.attname, s.stainherit AS inherited, s.stanullfrac AS null_frac, s.stawidth AS avg_width, s.stadistinct AS n_distinct, NULL::real[] AS most_common_vals, s.stanumbers1 AS most_common_freqs, s.stanumbers1 AS histogram_bounds, s.stanumbers1[1] AS correlation, NULL::real[] AS most_common_elems, s.stanumbers1 AS most_common_elem_freqs, s.stanumbers1 AS elem_count_histogram FROM pg_statistic s JOIN pg_class c ON c.oid = s.starelid JOIN edgedbsql_VER.pg_attribute_ext a ON ( c.oid = a.attrelid and a.attnum_internal = s.staattnum ) LEFT JOIN pg_namespace n ON n.oid = c.relnamespace WHERE FALSE """, ), trampoline.VersionedView( name=("edgedbsql", "pg_constraint"), query=r""" -- primary keys for: -- - objects tables (that contains id) -- - link tables (that contains source and target) -- there exists a unique constraint for each of these SELECT pc.oid, vt.table_name || '_pk' AS conname, pc.connamespace, 'p'::"char" AS contype, pc.condeferrable, pc.condeferred, pc.convalidated, pc.conrelid, pc.contypid, pc.conindid, pc.conparentid, NULL::oid AS confrelid, NULL::"char" AS confupdtype, NULL::"char" AS confdeltype, NULL::"char" AS confmatchtype, pc.conislocal, pc.coninhcount, pc.connoinherit, CASE WHEN pa.attname = 'id' THEN ARRAY[1]::int2[] -- id will always have attnum 1 ELSE ARRAY[1, 2]::int2[] -- source and target END AS conkey, NULL::int2[] AS confkey, NULL::oid[] AS conpfeqop, NULL::oid[] AS conppeqop, NULL::oid[] AS conffeqop, NULL::int2[] AS confdelsetcols, NULL::oid[] AS conexclop, pc.conbin, pc.tableoid, pc.xmin, pc.cmin, pc.xmax, pc.cmax, pc.ctid FROM pg_constraint pc JOIN edgedbsql_VER.pg_class_tables pct ON pct.oid = pc.conrelid JOIN edgedbsql_VER.virtual_tables vt ON vt.pg_type_id = pct.reltype JOIN pg_attribute pa ON (pa.attrelid = pct.oid AND pa.attnum = ANY(conkey) AND pa.attname IN ('id', 'source') ) WHERE contype = 'u' -- our ids and all links will have unique constraint UNION ALL -- foreign keys for object tables SELECT -- uuid_to_oid needs "extra" arg to disambiguate from the link table -- keys below edgedbsql_VER.uuid_to_oid(sl.id, 0) as oid, vt.table_name || '_fk_' || sl.name AS conname, edgedbsql_VER.uuid_to_oid(vt.module_id) AS connamespace, 'f'::"char" AS contype, FALSE AS condeferrable, FALSE AS condeferred, TRUE AS convalidated, pc.oid AS conrelid, 0::oid AS contypid, 0::oid AS conindid, -- let's hope this is not needed 0::oid AS conparentid, pc_target.oid AS confrelid, 'a'::"char" AS confupdtype, 'a'::"char" AS confdeltype, 's'::"char" AS confmatchtype, TRUE AS conislocal, 0::int2 AS coninhcount, TRUE AS connoinherit, ARRAY[pa.attnum]::int2[] AS conkey, ARRAY[1]::int2[] AS confkey, -- id will always have attnum 1 ARRAY['uuid_eq'::regproc]::oid[] AS conpfeqop, ARRAY['uuid_eq'::regproc]::oid[] AS conppeqop, ARRAY['uuid_eq'::regproc]::oid[] AS conffeqop, NULL::int2[] AS confdelsetcols, NULL::oid[] AS conexclop, NULL::pg_node_tree AS conbin, pa.tableoid, pa.xmin, pa.cmin, pa.xmax, pa.cmax, pa.ctid FROM edgedbsql_VER.virtual_tables vt JOIN pg_class pc ON pc.reltype = vt.pg_type_id JOIN edgedb_VER."_SchemaLink" sl ON sl.source = vt.id -- AND COALESCE(sl.cardinality = 'One', TRUE) JOIN edgedbsql_VER.virtual_tables vt_target ON sl.target = vt_target.id JOIN pg_class pc_target ON pc_target.reltype = vt_target.pg_type_id JOIN edgedbsql_VER.pg_attribute pa ON pa.attrelid = pc.oid AND pa.attname = sl.name || '_id' UNION ALL -- foreign keys for: -- - multi link tables (source & target), -- - multi property tables (source), -- - single link with link properties (source & target), -- these constraints do not actually exist, so we emulate it entierly SELECT -- uuid_to_oid needs "extra" arg to disambiguate from other -- constraints using this pointer edgedbsql_VER.uuid_to_oid(sp.id, spec.attnum) AS oid, vt.table_name || '_fk_' || spec.name AS conname, edgedbsql_VER.uuid_to_oid(vt.module_id) AS connamespace, 'f'::"char" AS contype, FALSE AS condeferrable, FALSE AS condeferred, TRUE AS convalidated, pc.oid AS conrelid, pc.reltype AS contypid, 0::oid AS conindid, -- TODO 0::oid AS conparentid, pcf.oid AS confrelid, 'r'::"char" AS confupdtype, 'r'::"char" AS confdeltype, 's'::"char" AS confmatchtype, TRUE AS conislocal, 0::int2 AS coninhcount, TRUE AS connoinherit, ARRAY[spec.attnum]::int2[] AS conkey, ARRAY[1]::int2[] AS confkey, -- id will have attnum 1 ARRAY['uuid_eq'::regproc]::oid[] AS conpfeqop, ARRAY['uuid_eq'::regproc]::oid[] AS conppeqop, ARRAY['uuid_eq'::regproc]::oid[] AS conffeqop, NULL::int2[] AS confdelsetcols, NULL::oid[] AS conexclop, pc.relpartbound AS conbin, pc.tableoid, pc.xmin, pc.cmin, pc.xmax, pc.cmax, pc.ctid FROM edgedb_VER."_SchemaPointer" sp -- find links with link properties LEFT JOIN LATERAL ( SELECT sl.id FROM edgedb_VER."_SchemaLink" sl LEFT JOIN edgedb_VER."_SchemaProperty" AS slp ON slp.source = sl.id GROUP BY sl.id HAVING COUNT(*) > 2 ) link_props ON link_props.id = sp.id JOIN pg_class pc ON pc.relname = sp.id::TEXT JOIN edgedbsql_VER.virtual_tables vt ON vt.pg_type_id = pc.reltype -- duplicate each row for source and target JOIN LATERAL (VALUES ('source', 1::int2, sp.source), ('target', 2::int2, sp.target) ) spec(name, attnum, foreign_id) ON TRUE JOIN edgedbsql_VER.virtual_tables vtf ON vtf.id = spec.foreign_id JOIN pg_class pcf ON pcf.reltype = vtf.pg_type_id WHERE sp.cardinality = 'Many' OR link_props.id IS NOT NULL AND sp.computable IS NOT TRUE AND sp.internal IS NOT TRUE """ ), trampoline.VersionedView( name=("edgedbsql", "pg_statistic"), query=""" SELECT starelid, a.attnum as staattnum, stainherit, stanullfrac, stawidth, stadistinct, stakind1, stakind2, stakind3, stakind4, stakind5, staop1, staop2, staop3, staop4, staop5, stacoll1, stacoll2, stacoll3, stacoll4, stacoll5, stanumbers1, stanumbers2, stanumbers3, stanumbers4, stanumbers5, NULL::real[] AS stavalues1, NULL::real[] AS stavalues2, NULL::real[] AS stavalues3, NULL::real[] AS stavalues4, NULL::real[] AS stavalues5, s.tableoid, s.xmin, s.cmin, s.xmax, s.cmax, s.ctid FROM pg_statistic s JOIN edgedbsql_VER.pg_attribute_ext a ON ( a.attrelid = s.starelid AND a.attnum_internal = s.staattnum ) """, ), trampoline.VersionedView( name=("edgedbsql", "pg_statistic_ext"), query=""" SELECT oid, stxrelid, stxname, stxnamespace, stxowner, stxstattarget, stxkeys, stxkind, NULL::pg_node_tree as stxexprs, tableoid, xmin, cmin, xmax, cmax, ctid FROM pg_statistic_ext """, ), trampoline.VersionedView( name=("edgedbsql", "pg_statistic_ext_data"), query=""" SELECT stxoid, stxdndistinct, stxddependencies, stxdmcv, NULL::oid AS stxdexpr, tableoid, xmin, cmin, xmax, cmax, ctid FROM pg_statistic_ext_data """, ), trampoline.VersionedView( name=("edgedbsql", "pg_rewrite"), query=""" SELECT pr.*, pr.tableoid, pr.xmin, pr.cmin, pr.xmax, pr.cmax, pr.ctid FROM pg_rewrite pr JOIN edgedbsql_VER.pg_class pn ON pr.ev_class = pn.oid """, ), # HACK: Automatically generated cast function for ranges/multiranges # was causing issues for pg_dump. So at the end of the day we opt for # not exposing any casts at all here since there is no real reason for # this compatibility layer that is read-only to have elaborate casts # present. trampoline.VersionedView( name=("edgedbsql", "pg_cast"), query=""" SELECT pc.*, pc.tableoid, pc.xmin, pc.cmin, pc.xmax, pc.cmax, pc.ctid FROM pg_cast pc WHERE FALSE """, ), # Omit all funcitons for now. trampoline.VersionedView( name=("edgedbsql", "pg_proc"), query=""" SELECT *, tableoid, xmin, cmin, xmax, cmax, ctid FROM pg_proc WHERE FALSE """, ), # Omit all operators for now. trampoline.VersionedView( name=("edgedbsql", "pg_operator"), query=""" SELECT *, tableoid, xmin, cmin, xmax, cmax, ctid FROM pg_operator WHERE FALSE """, ), # Omit all triggers for now. trampoline.VersionedView( name=("edgedbsql", "pg_trigger"), query=""" SELECT *, tableoid, xmin, cmin, xmax, cmax, ctid FROM pg_trigger WHERE FALSE """, ), # Omit all subscriptions for now. # This table is queried by pg_dump with COUNT(*) when user does not # have permissions to access it. This should be allowed, but the # view expands the query to all columns, which is not allowed. # So we have to construct an empty view with correct signature that # does not reference pg_subscription. trampoline.VersionedView( name=("edgedbsql", "pg_subscription"), query=""" SELECT NULL::oid AS oid, NULL::oid AS subdbid, NULL::name AS subname, NULL::oid AS subowner, NULL::boolean AS subenabled, NULL::text AS subconninfo, NULL::name AS subslotname, NULL::text AS subsynccommit, NULL::oid AS subpublications, tableoid, xmin, cmin, xmax, cmax, ctid FROM pg_namespace WHERE FALSE """, ), trampoline.VersionedView( name=("edgedbsql", "pg_tables"), query=""" SELECT n.nspname AS schemaname, c.relname AS tablename, pg_get_userbyid(c.relowner) AS tableowner, t.spcname AS tablespace, c.relhasindex AS hasindexes, c.relhasrules AS hasrules, c.relhastriggers AS hastriggers, c.relrowsecurity AS rowsecurity FROM edgedbsql_VER.pg_class c LEFT JOIN edgedbsql_VER.pg_namespace n ON n.oid = c.relnamespace LEFT JOIN pg_tablespace t ON t.oid = c.reltablespace WHERE c.relkind = ANY (ARRAY['r'::"char", 'p'::"char"]) """, ), trampoline.VersionedView( name=("edgedbsql", "pg_views"), query=""" SELECT n.nspname AS schemaname, c.relname AS viewname, pg_get_userbyid(c.relowner) AS viewowner, pg_get_viewdef(c.oid) AS definition FROM edgedbsql_VER.pg_class c LEFT JOIN edgedbsql_VER.pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind = 'v'::"char" """, ), # Omit all descriptions (comments), becase all non-system comments # are our internal implementation details. trampoline.VersionedView( name=("edgedbsql", "pg_description"), query=""" SELECT *, tableoid, xmin, cmin, xmax, cmax, ctid FROM pg_description WHERE FALSE """, ), ] # We expose most of the views as empty tables, just to prevent errors when # the tools do introspection. # For the tables that it turns out are actually needed, we handcraft the # views that expose the actual data. # I've been cautious about exposing too much data, for example limiting # pg_type to pg_catalog and pg_toast namespaces. views: list[dbops.View] = [] views.extend(tables_and_columns) for table_name, columns in sql_introspection.INFORMATION_SCHEMA.items(): if table_name in ["tables", "columns"]: continue views.append( trampoline.VersionedView( name=("edgedbsql", table_name), query="SELECT {} LIMIT 0".format( ",".join( f"NULL::information_schema.{type} AS {name}" for name, type, _ver_since in columns ) ), ) ) PG_TABLES_SKIP = { 'pg_type', 'pg_attribute', 'pg_namespace', 'pg_class', 'pg_database', 'pg_proc', 'pg_operator', 'pg_pltemplate', 'pg_stats', 'pg_stats_ext_exprs', 'pg_statistic', 'pg_statistic_ext', 'pg_statistic_ext_data', 'pg_rewrite', 'pg_cast', 'pg_index', 'pg_constraint', 'pg_trigger', 'pg_subscription', 'pg_tables', 'pg_views', 'pg_description', } PG_TABLES_WITH_SYSTEM_COLS = { 'pg_aggregate', 'pg_am', 'pg_amop', 'pg_amproc', 'pg_attrdef', 'pg_attribute', 'pg_auth_members', 'pg_authid', 'pg_cast', 'pg_class', 'pg_collation', 'pg_constraint', 'pg_conversion', 'pg_database', 'pg_db_role_setting', 'pg_default_acl', 'pg_depend', 'pg_enum', 'pg_event_trigger', 'pg_extension', 'pg_foreign_data_wrapper', 'pg_foreign_server', 'pg_foreign_table', 'pg_index', 'pg_inherits', 'pg_init_privs', 'pg_language', 'pg_largeobject', 'pg_largeobject_metadata', 'pg_namespace', 'pg_opclass', 'pg_operator', 'pg_opfamily', 'pg_partitioned_table', 'pg_policy', 'pg_publication', 'pg_publication_rel', 'pg_range', 'pg_replication_origin', 'pg_rewrite', 'pg_seclabel', 'pg_sequence', 'pg_shdepend', 'pg_shdescription', 'pg_shseclabel', 'pg_statistic', 'pg_statistic_ext', 'pg_statistic_ext_data', 'pg_subscription_rel', 'pg_tablespace', 'pg_transform', 'pg_trigger', 'pg_ts_config', 'pg_ts_config_map', 'pg_ts_dict', 'pg_ts_parser', 'pg_ts_template', 'pg_type', 'pg_user_mapping', } SYSTEM_COLUMNS = ['tableoid', 'xmin', 'cmin', 'xmax', 'cmax', 'ctid'] def construct_pg_view( table_name: str, backend_version: params.BackendVersion ) -> Optional[dbops.View]: pg_columns = sql_introspection.PG_CATALOG[table_name] columns = [] has_columns = False for c_name, c_typ, c_ver_since in pg_columns: if c_ver_since <= backend_version.major: columns.append('o.' + c_name) has_columns = True elif c_typ: columns.append(f'NULL::{c_typ} as {c_name}') else: columns.append(f'NULL as {c_name}') if not has_columns: return None if table_name in PG_TABLES_WITH_SYSTEM_COLS: for c_name in SYSTEM_COLUMNS: columns.append('o.' + c_name) return trampoline.VersionedView( name=("edgedbsql", table_name), query=f"SELECT {','.join(columns)} FROM pg_catalog.{table_name} o", ) views.extend(pg_catalog_views) for table_name in sql_introspection.PG_CATALOG.keys(): if table_name in PG_TABLES_SKIP: continue if v := construct_pg_view(table_name, backend_version): views.append(v) util_functions = [ trampoline.VersionedFunction( name=('edgedbsql', 'has_schema_privilege'), args=( ('schema_name', 'text'), ('privilege', 'text'), ), returns=('bool',), text=""" SELECT COALESCE(( SELECT has_schema_privilege(oid, privilege) FROM edgedbsql_VER.pg_namespace WHERE nspname = schema_name ), TRUE); """ ), trampoline.VersionedFunction( name=('edgedbsql', 'has_schema_privilege'), args=( ('schema_oid', 'oid'), ('privilege', 'text'), ), returns=('bool',), text=""" SELECT COALESCE( has_schema_privilege(schema_oid, privilege), TRUE ) """ ), trampoline.VersionedFunction( name=('edgedbsql', 'has_table_privilege'), args=( ('table_name', 'text'), ('privilege', 'text'), ), returns=('bool',), text=""" SELECT has_table_privilege(oid, privilege) FROM edgedbsql_VER.pg_class WHERE relname = table_name; """ ), trampoline.VersionedFunction( name=('edgedbsql', 'has_table_privilege'), args=( ('schema_oid', 'oid'), ('privilege', 'text'), ), returns=('bool',), text=""" SELECT has_table_privilege(schema_oid, privilege) """ ), trampoline.VersionedFunction( name=('edgedbsql', 'has_column_privilege'), args=( ('tbl', 'oid'), ('col', 'smallint'), ('privilege', 'text'), ), returns=('bool',), text=""" SELECT has_column_privilege(tbl, col, privilege) """ ), trampoline.VersionedFunction( name=('edgedbsql', 'has_column_privilege'), args=( ('tbl', 'text'), ('col', 'smallint'), ('privilege', 'text'), ), returns=('bool',), text=""" SELECT has_column_privilege(oid, col, privilege) FROM edgedbsql_VER.pg_class WHERE relname = tbl; """ ), trampoline.VersionedFunction( name=('edgedbsql', 'has_column_privilege'), args=( ('tbl', 'oid'), ('col', 'text'), ('privilege', 'text'), ), returns=('bool',), text=""" SELECT has_column_privilege(tbl, attnum_internal, privilege) FROM edgedbsql_VER.pg_attribute_ext pa WHERE attrelid = tbl AND attname = col """ ), trampoline.VersionedFunction( name=('edgedbsql', 'has_column_privilege'), args=( ('tbl', 'text'), ('col', 'text'), ('privilege', 'text'), ), returns=('bool',), text=""" SELECT has_column_privilege(pc.oid, attnum_internal, privilege) FROM edgedbsql_VER.pg_class pc JOIN edgedbsql_VER.pg_attribute_ext pa ON pa.attrelid = pc.oid WHERE pc.relname = tbl AND pa.attname = col; """ ), trampoline.VersionedFunction( name=('edgedbsql', '_pg_truetypid'), args=( ('att', ('edgedbsql_VER', 'pg_attribute')), ('typ', ('edgedbsql_VER', 'pg_type')), ), returns=('oid',), volatility='IMMUTABLE', strict=True, text=""" SELECT CASE WHEN typ.typtype = 'd' THEN typ.typbasetype ELSE att.atttypid END """ ), trampoline.VersionedFunction( name=('edgedbsql', '_pg_truetypmod'), args=( ('att', ('edgedbsql_VER', 'pg_attribute')), ('typ', ('edgedbsql_VER', 'pg_type')), ), returns=('int4',), volatility='IMMUTABLE', strict=True, text=""" SELECT CASE WHEN typ.typtype = 'd' THEN typ.typtypmod ELSE att.atttypmod END """ ), trampoline.VersionedFunction( name=('edgedbsql', 'pg_table_is_visible'), args=[ ('id', ('oid',)), ('search_path', ('text[]',)), ], returns=('bool',), volatility='stable', text=r''' SELECT pc.relnamespace IN ( SELECT oid FROM edgedbsql_VER.pg_namespace pn WHERE pn.nspname IN (select * from unnest(search_path)) ) FROM edgedbsql_VER.pg_class pc WHERE id = pc.oid ''' ), trampoline.VersionedFunction( # Used instead of pg_catalog.format_type in pg_dump. name=('edgedbsql', '_format_type'), args=[ ('typeoid', ('oid',)), ('typemod', ('integer',)), ], returns=('text',), volatility='STABLE', text=r''' SELECT CASE -- arrays WHEN t.typcategory = 'A' THEN ( SELECT quote_ident(nspname) || '.' || quote_ident(el.typname) || tm.mod || '[]' FROM edgedbsql_VER.pg_namespace WHERE oid = el.typnamespace ) -- composite (tuples) and types in irregular schemas WHEN ( t.typcategory = 'C' OR COALESCE(tn.nspname IN ( 'edgedb', 'edgedbt', 'edgedbpub', 'edgedbstd', 'edgedb_VER', 'edgedbstd_VER' ), TRUE) ) THEN ( SELECT quote_ident(nspname) || '.' || quote_ident(t.typname) || tm.mod FROM edgedbsql_VER.pg_namespace WHERE oid = t.typnamespace ) ELSE format_type(typeoid, typemod) END FROM edgedbsql_VER.pg_type t LEFT JOIN pg_namespace tn ON t.typnamespace = tn.oid LEFT JOIN edgedbsql_VER.pg_type el ON t.typelem = el.oid CROSS JOIN ( SELECT CASE WHEN typemod >= 0 THEN '(' || typemod::text || ')' ELSE '' END AS mod ) as tm WHERE t.oid = typeoid ''', ), trampoline.VersionedFunction( name=("edgedbsql", "pg_get_constraintdef"), args=[ ('conid', ('oid',)), ], returns=('text',), volatility='stable', text=r""" -- Wrap in a subquery SELECT so that we get a clear failure -- if something is broken and this returns multiple rows. -- (By default it would silently return the first.) SELECT ( SELECT CASE WHEN contype = 'p' THEN 'PRIMARY KEY(' || ( SELECT string_agg('"' || attname || '"', ', ') FROM edgedbsql_VER.pg_attribute WHERE attrelid = conrelid AND attnum = ANY(conkey) ) || ')' WHEN contype = 'f' THEN 'FOREIGN KEY ("' || ( SELECT attname FROM edgedbsql_VER.pg_attribute WHERE attrelid = conrelid AND attnum = ANY(conkey) ) || '")' || ' REFERENCES "' || pn.nspname || '"."' || pc.relname || '"(id)' ELSE '' END FROM edgedbsql_VER.pg_constraint con LEFT JOIN edgedbsql_VER.pg_class_tables pc ON pc.oid = confrelid LEFT JOIN edgedbsql_VER.pg_namespace pn ON pc.relnamespace = pn.oid WHERE con.oid = conid ) """ ), trampoline.VersionedFunction( name=("edgedbsql", "pg_get_constraintdef"), args=[ ('conid', ('oid',)), ('pretty', ('bool',)), ], returns=('text',), volatility='stable', text=r""" SELECT pg_get_constraintdef(conid) """ ), ] return ( [cast(dbops.Command, dbops.CreateFunction(uuid_to_oid))] + [dbops.CreateView(virtual_tables)] + [ cast(dbops.Command, dbops.CreateFunction(long_name)), cast(dbops.Command, dbops.CreateFunction(type_rename)), cast(dbops.Command, dbops.CreateFunction(namespace_rename)), ] + [dbops.CreateView(view) for view in views] + [dbops.CreateFunction(func) for func in util_functions] )
, ) # A helper view that contains all data tables we expose over SQL, excluding # introspection tables. # It contains table & schema names and associated module id. virtual_tables = trampoline.VersionedView( name=('edgedbsql', 'virtual_tables'), materialized=True, query=
_generate_sql_information_schema
python
geldata/gel
edb/pgsql/metaschema.py
https://github.com/geldata/gel/blob/master/edb/pgsql/metaschema.py
Apache-2.0
def _pg_create_trigger( table_name: Tuple[str, str], exprs: Sequence[pgast.BaseExpr], ) -> dbops.CommandGroup: ops = dbops.CommandGroup() # prepare the expression to update __fts_document__ document_exprs = [] for expr in exprs: assert isinstance(expr, pgast.FTSDocument) lang_domain: Iterable[str] = expr.language_domain lang_domain = map(types.to_regconfig, lang_domain) unsupported = set(lang_domain).difference(types.pg_langs) if len(unsupported) > 0: _raise_unsupported_language_error(unsupported) text_sql = codegen.generate_source(expr.text) language_sql = codegen.generate_source(expr.language) document_expr = f''' to_tsvector( edgedb.fts_to_regconfig(({language_sql})::text), COALESCE({text_sql}, '') ) ''' if expr.weight: document_expr = f'setweight({document_expr}, {ql(expr.weight)})' document_exprs.append(document_expr) document_sql = ' || '.join(document_exprs) if document_exprs else 'NULL' # update existing rows ops.add_command( dbops.Query( f""" UPDATE {q(*table_name)} as NEW SET __fts_document__ = ({document_sql}); """ ) ) # create update function func_name = _pg_update_func_name(table_name) function = dbops.Function( name=func_name, text=f''' BEGIN NEW.__fts_document__ := ({document_sql}); RETURN NEW; END; ''', volatility='immutable', returns='trigger', language='plpgsql', ) ops.add_command(dbops.CreateFunction(function)) # create trigger to update the __fts_document__ trigger_name = _pg_trigger_name(table_name[1]) trigger = dbops.Trigger( name=trigger_name, table_name=table_name, events=('insert', 'update'), timing=dbops.TriggerTiming.Before, procedure=func_name, ) ops.add_command(dbops.CreateTrigger(trigger)) return ops
if expr.weight: document_expr = f'setweight({document_expr}, {ql(expr.weight)})' document_exprs.append(document_expr) document_sql = ' || '.join(document_exprs) if document_exprs else 'NULL' # update existing rows ops.add_command( dbops.Query( f
_pg_create_trigger
python
geldata/gel
edb/pgsql/deltafts.py
https://github.com/geldata/gel/blob/master/edb/pgsql/deltafts.py
Apache-2.0
def _zombo_create_fts_document( index: s_indexes.Index, exprs: Sequence[pgast.BaseExpr], predicate_src: Optional[str], sql_kwarg_exprs: Dict[str, str], schema: s_schema.Schema, ) -> dbops.Command: ops = dbops.CommandGroup() table_name = common.get_index_table_backend_name(index, schema) module_name = index.get_name(schema).module index_name = common.get_index_backend_name( index.id, module_name, catenate=False ) zombo_type_name = _zombo_type_name(table_name) ops.add_command( dbops.CreateCompositeType( dbops.CompositeType( name=zombo_type_name, columns=[ dbops.Column( name=f'field{idx}', type='text', ) for idx, _ in enumerate(exprs) ], ) ) ) type_mappings: List[Tuple[str, str]] = [] document_exprs = [] for idx, expr in enumerate(exprs): assert isinstance(expr, pgast.FTSDocument) text_sql = codegen.generate_source(expr.text) if len(expr.language_domain) != 1: raise errors.UnsupportedFeatureError( 'zombo fts indexes support only exactly one language' ) language = next(iter(expr.language_domain)) document_exprs.append(text_sql) type_mappings.append((f'field{idx}', language)) zombo_func_name = _zombo_func_name(table_name) ops.add_command( dbops.CreateFunction( dbops.Function( name=zombo_func_name, args=[('new', table_name)], returns=zombo_type_name, text=f''' SELECT ROW({','.join(document_exprs)})::{q(*zombo_type_name)}; ''', ) ) ) for col_name, language in type_mappings: mapping = f'{{"type": "text", "analyzer": "{language}"}}' ops.add_command( dbops.Query( f"""PERFORM zdb.define_field_mapping( {ql(q(*table_name))}::regclass, {ql(col_name)}::text, {ql(mapping)}::json )""" ) ) index_exprs = [f'{q(*zombo_func_name)}({qi(table_name[1])}.*)'] pg_index = dbops.Index( name=index_name[1], table_name=table_name, # type: ignore exprs=index_exprs, unique=False, inherit=True, with_clause={'url': ql('http://localhost:9200/')}, predicate=predicate_src, metadata={ 'schemaname': str(index.get_name(schema)), 'code': 'zombodb ((__col__))', 'kwargs': sql_kwarg_exprs, }, ) ops.add_command(dbops.CreateIndex(pg_index)) return ops
, ) ) ) for col_name, language in type_mappings: mapping = f'{{"type": "text", "analyzer": "{language}"}}' ops.add_command( dbops.Query( f"""PERFORM zdb.define_field_mapping( {ql(q(*table_name))}::regclass, {ql(col_name)}::text, {ql(mapping)}::json )
_zombo_create_fts_document
python
geldata/gel
edb/pgsql/deltafts.py
https://github.com/geldata/gel/blob/master/edb/pgsql/deltafts.py
Apache-2.0
def quote_bytea_literal(data: bytes) -> str: """Return valid SQL representation of a bytes value.""" if data: b = binascii.b2a_hex(data).decode('ascii') return f"'\\x{b}'::bytea" else: return "''::bytea"
Return valid SQL representation of a bytes value.
quote_bytea_literal
python
geldata/gel
edb/pgsql/common.py
https://github.com/geldata/gel/blob/master/edb/pgsql/common.py
Apache-2.0
def edgedb_name_to_pg_name(name: str, prefix_length: int = 0) -> str: """Convert Gel name to a valid PostgresSQL column name. PostgreSQL has a limit of 63 characters for column names. @param name: Gel name to convert @return: PostgreSQL column name """ if not (0 <= prefix_length < s_def.MAX_NAME_LENGTH): raise ValueError('supplied name is too long ' 'to be kept in original form') name = str(name) if len(name) <= s_def.MAX_NAME_LENGTH - prefix_length: return name return _edgedb_name_to_pg_name(name, prefix_length)
Convert Gel name to a valid PostgresSQL column name. PostgreSQL has a limit of 63 characters for column names. @param name: Gel name to convert @return: PostgreSQL column name
edgedb_name_to_pg_name
python
geldata/gel
edb/pgsql/common.py
https://github.com/geldata/gel/blob/master/edb/pgsql/common.py
Apache-2.0
def update_aspect(name, aspect): """Update the aspect on a non catenated name. It also needs to be from an object that uses ids for names""" suffix = get_aspect_suffix(aspect) stripped = name[1].rsplit("_", 1)[0] if suffix: return (name[0], f'{stripped}_{suffix}') else: return (name[0], stripped)
Update the aspect on a non catenated name. It also needs to be from an object that uses ids for names
update_aspect
python
geldata/gel
edb/pgsql/common.py
https://github.com/geldata/gel/blob/master/edb/pgsql/common.py
Apache-2.0
def get_version_key(num_patches: int): """Produce a version key to add to instdata keys after major patches. Patches that modify the schema class layout and introspection queries are not safe to downgrade from. So for such patches, we add a version suffix to the names of the core instdata entries that we would need to update, so that we don't clobber the old version. After a downgrade, we'll have more patches applied than we actually know exist in the running version, but since we compute the key based on the number of schema layout patches that we can *see*, we still compute the right key. """ num_major = sum( p.startswith('edgeql+schema') for p, _ in PATCHES[:num_patches]) if num_major == 0: return '' else: return f'_v{num_major}'
Produce a version key to add to instdata keys after major patches. Patches that modify the schema class layout and introspection queries are not safe to downgrade from. So for such patches, we add a version suffix to the names of the core instdata entries that we would need to update, so that we don't clobber the old version. After a downgrade, we'll have more patches applied than we actually know exist in the running version, but since we compute the key based on the number of schema layout patches that we can *see*, we still compute the right key.
get_version_key
python
geldata/gel
edb/pgsql/patches.py
https://github.com/geldata/gel/blob/master/edb/pgsql/patches.py
Apache-2.0
def enforce_ops(self) -> dbops.CommandGroup: ops = dbops.CommandGroup() tabconstr = self._table_constraint(self) constr_name = tabconstr.constraint_name() raw_constr_name = tabconstr.constraint_name(quote=False) for expr, relative_expr in zip( itertools.cycle(tabconstr._exprdata), tabconstr._relative_exprdata ): exprdata = expr.exprdata relative_exprdata = relative_expr.exprdata old_expr = relative_exprdata.old new_expr = exprdata.new assert relative_expr.subject_db_name schemaname, tablename = relative_expr.subject_db_name real_tablename = tabconstr.get_subject_name(quote=False) errmsg = 'duplicate key value violates unique ' \ 'constraint {constr}'.format(constr=constr_name) detail = common.quote_literal( f"Key ({relative_exprdata.plain}) already exists." ) if ( isinstance(self.subject, s_pointers.Pointer) and self.pg_constr_data.table_type == 'link' ): key = "source" else: key = "id" except_data = tabconstr._except_data relative_except_data = relative_expr.except_data if except_data: assert relative_except_data except_part = f''' AND ({relative_except_data.old} is not true) AND ({except_data.new} is not true) ''' else: except_part = '' check = dbops.Query( f''' SELECT edgedb_VER.raise( NULL::text, 'unique_violation', msg => '{errmsg}', "constraint" => '{raw_constr_name}', "table" => '{tablename}', "schema" => '{schemaname}', detail => {detail} ) FROM {common.qname(schemaname, tablename)} AS OLD CROSS JOIN {common.qname(*real_tablename)} AS NEW WHERE {old_expr} = {new_expr} and OLD.{key} != NEW.{key} {except_part} INTO _dummy_text; ''' ) ops.add_command(check) return ops
else: except_part = '' check = dbops.Query( f
enforce_ops
python
geldata/gel
edb/pgsql/schemamech.py
https://github.com/geldata/gel/blob/master/edb/pgsql/schemamech.py
Apache-2.0
def _rewrite_names_in_sql(text: str, schema: s_schema.Schema) -> str: """Rewrite the SQL output of the compiler to include real object names. Replace UUIDs with object names when possible. The output of this won't be valid, but will probably be easier to read. This is done by default when pretty printing our "reordered" output, which isn't anything like valid SQL anyway. """ # Functions are actually named after their `backend_name` rather # than their id, so that overloaded functions all have the same # name. Build a map from `backend_name` to real names. (This dict # comprehension might have collisions, but that's fine; the names # we get out will be the same no matter which is picked.) func_map = { f.get_backend_name(schema): f for f in schema.get_objects(type=s_funcs.Function) } # Find all the uuids and try to rewrite them. for m in set(uuidgen.UUID_RE.findall(text)): uid = uuid.UUID(m) sobj = schema.get_by_id(uid, default=None) if not sobj: sobj = func_map.get(uid) if sobj: s = _obj_to_name(sobj, schema) text = text.replace(m, s) return text
Rewrite the SQL output of the compiler to include real object names. Replace UUIDs with object names when possible. The output of this won't be valid, but will probably be easier to read. This is done by default when pretty printing our "reordered" output, which isn't anything like valid SQL anyway.
_rewrite_names_in_sql
python
geldata/gel
edb/pgsql/debug.py
https://github.com/geldata/gel/blob/master/edb/pgsql/debug.py
Apache-2.0
def get_trigger_proc_text(self): chunks = [] constr_name = self.constraint_name() raw_constr_name = self.constraint_name(quote=False) errmsg = 'duplicate key value violates unique ' \ 'constraint {constr}'.format(constr=constr_name) for expr, relative_expr in zip( itertools.cycle(self._exprdata), self._relative_exprdata ): exprdata = expr.exprdata relative_exprdata = relative_expr.exprdata except_data = self._except_data relative_except_data = relative_expr.except_data if self._except_data: except_part = f''' AND ({relative_except_data.plain} is not true) AND ({except_data.new} is not true) ''' else: except_part = '' # Link tables get updated by deleting and then reinserting # rows, and so the trigger might fire even on rows that # did not *really* change. Check `source` also to prevent # spurious errors in those cases. (Anything with the same # source must have the same type, so any genuine constraint # errors this filters away will get caught by the *actual* # constraint.) # We *could* do a check for id on object tables, but it # isn't needed and would take at least some time. src_check = ( ' AND source != NEW.source' if self._table_type == 'link' else '' ) schemaname, tablename = relative_expr.subject_db_name text = ''' PERFORM TRUE FROM {table} WHERE {plain_expr} = {new_expr}{except_part}{src_check}; IF FOUND THEN RAISE unique_violation USING TABLE = '{tablename}', SCHEMA = '{schemaname}', CONSTRAINT = '{constr}', MESSAGE = '{errmsg}', DETAIL = {detail}; END IF; '''.format( table=common.qname(schemaname, tablename), plain_expr=relative_exprdata.plain, new_expr=exprdata.new, except_part=except_part, src_check=src_check, schemaname=schemaname, tablename=tablename, constr=raw_constr_name, errmsg=errmsg, detail=common.quote_literal( f"Key ({relative_exprdata.plain}) already exists." ), ) chunks.append(text) text = 'BEGIN\n' + '\n\n'.join(chunks) + '\nRETURN NEW;\nEND;' return text
else: except_part = '' # Link tables get updated by deleting and then reinserting # rows, and so the trigger might fire even on rows that # did not *really* change. Check `source` also to prevent # spurious errors in those cases. (Anything with the same # source must have the same type, so any genuine constraint # errors this filters away will get caught by the *actual* # constraint.) # We *could* do a check for id on object tables, but it # isn't needed and would take at least some time. src_check = ( ' AND source != NEW.source' if self._table_type == 'link' else '' ) schemaname, tablename = relative_expr.subject_db_name text =
get_trigger_proc_text
python
geldata/gel
edb/pgsql/deltadbops.py
https://github.com/geldata/gel/blob/master/edb/pgsql/deltadbops.py
Apache-2.0
def create_constraint_trigger_and_fuction( self, constraint: SchemaConstraintTableConstraint ): """Create constraint trigger FUNCTION and TRIGGER. Adds the new function to the trigger. Disables the trigger if possible. """ if constraint.requires_triggers(): # Create trigger function self.add_commands(self.create_constr_trigger_function(constraint)) proc_name = constraint.get_trigger_procname() cr_trigger = self.create_constr_trigger( self.name, constraint, proc_name) self.add_commands(cr_trigger) if constraint.can_disable_triggers(): self.add_commands( self.disable_constr_trigger(self.name, constraint))
Create constraint trigger FUNCTION and TRIGGER. Adds the new function to the trigger. Disables the trigger if possible.
create_constraint_trigger_and_fuction
python
geldata/gel
edb/pgsql/deltadbops.py
https://github.com/geldata/gel/blob/master/edb/pgsql/deltadbops.py
Apache-2.0
def drop_constraint_trigger_and_fuction( self, constraint: SchemaConstraintTableConstraint ): """Drop constraint trigger FUNCTION and TRIGGER.""" if constraint.requires_triggers(): self.add_commands(self.drop_constr_trigger( constraint._subject_name, constraint)) proc_name = constraint.get_trigger_procname() self.add_commands(self.drop_constr_trigger_function(proc_name))
Drop constraint trigger FUNCTION and TRIGGER.
drop_constraint_trigger_and_fuction
python
geldata/gel
edb/pgsql/deltadbops.py
https://github.com/geldata/gel/blob/master/edb/pgsql/deltadbops.py
Apache-2.0