code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
async def test_edgeql_functions_inline_delete_policy_source_02(self): await self.con.execute(''' create type Bar { create required property a -> int64; }; create type Baz { create required property b -> int64; create link bar -> Bar { on source delete delete target; }; }; create function foo(x: int64) -> set of int64 { set is_inlined := true; using ( (delete Baz filter .b <= x).b ); }; ''') async def reset_data(): await self.con.execute(''' delete Baz; delete Bar; insert Baz{b := 4, bar := (insert Bar{a := 1})}; insert Baz{b := 5, bar := (insert Bar{a := 2})}; insert Baz{b := 6, bar := (insert Bar{a := 3})}; ''') await reset_data() await self.assert_query_result( 'select foo(0)', [], ) await self.assert_query_result( 'select Bar.a', [1, 2, 3], sort=True, ) await self.assert_query_result( 'select Baz{a := .bar.a, b} order by .b', [ {'a': 1, 'b': 4}, {'a': 2, 'b': 5}, {'a': 3, 'b': 6}, ], ) await reset_data() await self.assert_query_result( 'select foo(4)', [4], ) await self.assert_query_result( 'select Bar.a', [2, 3], sort=True, ) await self.assert_query_result( 'select Baz{a := .bar.a, b} order by .b', [ {'a': 2, 'b': 5}, {'a': 3, 'b': 6}, ], ) await reset_data() await self.assert_query_result( 'select foo(5)', [4, 5], sort=True, ) await self.assert_query_result( 'select Bar.a', [3], ) await self.assert_query_result( 'select Baz{a := .bar.a, b} order by .b', [ {'a': 3, 'b': 6}, ], ) await reset_data() await self.assert_query_result( 'select foo(6)', [4, 5, 6], sort=True, ) await self.assert_query_result( 'select Bar.a', [], ) await self.assert_query_result( 'select Baz{a := .bar.a, b} order by .b', [], )
) async def reset_data(): await self.con.execute(
test_edgeql_functions_inline_delete_policy_source_02
python
geldata/gel
tests/test_edgeql_functions_inline.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_functions_inline.py
Apache-2.0
async def test_edgeql_functions_inline_delete_policy_source_03(self): await self.con.execute(''' create type Bar { create required property a -> int64; }; create type Baz { create required property b -> int64; create link bar -> Bar { on source delete delete target if orphan; }; }; create function foo(x: int64) -> set of int64 { set is_inlined := true; using ( (delete Baz filter .b <= x).b ); }; ''') async def reset_data(): await self.con.execute(''' delete Baz; delete Bar; insert Baz{b := 4, bar := (insert Bar{a := 1})}; insert Baz{b := 5, bar := (insert Bar{a := 2})}; insert Baz{b := 6, bar := (insert Bar{a := 3})}; insert Baz{ b := 7, bar := assert_exists((select Bar filter .a = 1 limit 1)), }; ''') await reset_data() await self.assert_query_result( 'select foo(0)', [], ) await self.assert_query_result( 'select Bar.a', [1, 2, 3], sort=True, ) await self.assert_query_result( 'select Baz{a := .bar.a, b} order by .b', [ {'a': 1, 'b': 4}, {'a': 2, 'b': 5}, {'a': 3, 'b': 6}, {'a': 1, 'b': 7}, ], ) await reset_data() await self.assert_query_result( 'select foo(4)', [4], ) await self.assert_query_result( 'select Bar.a', [1, 2, 3], sort=True, ) await self.assert_query_result( 'select Baz{a := .bar.a, b} order by .b', [ {'a': 2, 'b': 5}, {'a': 3, 'b': 6}, {'a': 1, 'b': 7}, ], ) await reset_data() await self.assert_query_result( 'select foo(5)', [4, 5], sort=True, ) await self.assert_query_result( 'select Bar.a', [1, 3], ) await self.assert_query_result( 'select Baz{a := .bar.a, b} order by .b', [ {'a': 3, 'b': 6}, {'a': 1, 'b': 7}, ], ) await reset_data() await self.assert_query_result( 'select foo(6)', [4, 5, 6], sort=True, ) await self.assert_query_result( 'select Bar.a', [1], ) await self.assert_query_result( 'select Baz{a := .bar.a, b} order by .b', [ {'a': 1, 'b': 7}, ], )
) async def reset_data(): await self.con.execute(
test_edgeql_functions_inline_delete_policy_source_03
python
geldata/gel
tests/test_edgeql_functions_inline.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_functions_inline.py
Apache-2.0
async def test_edgeql_functions_inline_delete_nested_01(self): await self.con.execute(''' create type Bar { create required property a -> int64; }; create function inner(x: int64) -> set of Bar { set is_inlined := true; using ((delete Bar filter .a <= x)); }; create function foo(x: int64) -> set of Bar { set is_inlined := true; using (inner(x)); }; ''') async def reset_data(): await self.con.execute(''' delete Bar; insert Bar{a := 1}; insert Bar{a := 2}; insert Bar{a := 3}; ''') await reset_data() await self.assert_query_result( 'select foo(1).a', [1], ) await self.assert_query_result( 'select Bar.a', [2, 3], sort=True, ) await reset_data() await self.assert_query_result( 'select foo(2).a', [1, 2], sort=True, ) await self.assert_query_result( 'select Bar.a', [3], )
) async def reset_data(): await self.con.execute(
test_edgeql_functions_inline_delete_nested_01
python
geldata/gel
tests/test_edgeql_functions_inline.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_functions_inline.py
Apache-2.0
async def test_edgeql_ext_pg_trgm_similarity(self): await self.assert_query_result( """ SELECT Gist { p_str, sim := ext::pg_trgm::similarity(.p_str, "qwertyu0988") } FILTER ext::pg_trgm::similar(.p_str, "qwertyu0988") ORDER BY .sim DESC THEN .p_str LIMIT 11 """, [ { "p_str": "qwertyu0988", "sim": 1.0, }, { "p_str": "qwertyu0980", "sim": 0.714286, }, { "p_str": "qwertyu0981", "sim": 0.714286, }, { "p_str": "qwertyu0982", "sim": 0.714286, }, { "p_str": "qwertyu0983", "sim": 0.714286, }, { "p_str": "qwertyu0984", "sim": 0.714286, }, { "p_str": "qwertyu0985", "sim": 0.714286, }, { "p_str": "qwertyu0986", "sim": 0.714286, }, { "p_str": "qwertyu0987", "sim": 0.714286, }, { "p_str": "qwertyu0989", "sim": 0.714286, }, { "p_str": "qwertyu0088", "sim": 0.6, }, ] ) qry = """ SELECT Gist { p_str, sim_dist := ext::pg_trgm::similarity_dist( .p_str, "q0987wertyu0988" ) } ORDER BY .sim_dist EMPTY LAST LIMIT 2 """ await self.assert_query_result( qry, [ { "p_str": "qwertyu0988", "sim_dist": 0.411765, }, { "p_str": "qwertyu0987", "sim_dist": 0.5, }, ] ) await self.assert_index_use( qry, index_type="ext::pg_trgm::gist", ) qry = """ SELECT Gist2 { p_str, sim_dist := ext::pg_trgm::similarity_dist( .p_str, "q0987wertyu0988" ), p_str_2, sim_dist_2 := ext::pg_trgm::similarity_dist( .p_str_2, "q0987opasdf0988" ), } ORDER BY .sim_dist EMPTY LAST THEN .sim_dist_2 EMPTY LAST LIMIT 2 """ await self.assert_query_result( qry, [ { "p_str": "qwertyu0988", "sim_dist": 0.411765, "p_str_2": "iopasdf0988", "sim_dist_2": 0.5, }, { "p_str": "qwertyu0987", "sim_dist": 0.5, "p_str_2": "iopasdf0987", "sim_dist_2": 0.57894737, }, ] ) await self.assert_index_use( qry, index_type="ext::pg_trgm::gist", )
SELECT Gist { p_str, sim := ext::pg_trgm::similarity(.p_str, "qwertyu0988") } FILTER ext::pg_trgm::similar(.p_str, "qwertyu0988") ORDER BY .sim DESC THEN .p_str LIMIT 11 """, [ { "p_str": "qwertyu0988", "sim": 1.0, }, { "p_str": "qwertyu0980", "sim": 0.714286, }, { "p_str": "qwertyu0981", "sim": 0.714286, }, { "p_str": "qwertyu0982", "sim": 0.714286, }, { "p_str": "qwertyu0983", "sim": 0.714286, }, { "p_str": "qwertyu0984", "sim": 0.714286, }, { "p_str": "qwertyu0985", "sim": 0.714286, }, { "p_str": "qwertyu0986", "sim": 0.714286, }, { "p_str": "qwertyu0987", "sim": 0.714286, }, { "p_str": "qwertyu0989", "sim": 0.714286, }, { "p_str": "qwertyu0088", "sim": 0.6, }, ] ) qry =
test_edgeql_ext_pg_trgm_similarity
python
geldata/gel
tests/test_edgeql_ext_pg_trgm.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_ext_pg_trgm.py
Apache-2.0
async def test_edgeql_ext_pg_trgm_word_similarity(self): await self.assert_query_result( """ SELECT Gist { p_str, sim := ext::pg_trgm::word_similarity("Kabankala", .p_str) } FILTER ext::pg_trgm::word_similar("Kabankala", .p_str) ORDER BY .sim DESC THEN .p_str """, [ { "p_str": "Kabankala", "sim": 1.0, }, { "p_str": "Kabankalan City Public Plaza", "sim": 0.9, }, { "p_str": "Abankala", "sim": 0.7, }, { "p_str": "Ntombankala School", "sim": 0.6, }, ] ) qry = """ SELECT Gist { p_str, word_sim_dist := ext::pg_trgm::word_similarity_dist( "Kabankala", .p_str ) } ORDER BY .word_sim_dist EMPTY LAST LIMIT 7 """ await self.assert_query_result( qry, [ { "p_str": "Kabankala", "word_sim_dist": 0.0, }, { "p_str": "Kabankalan City Public Plaza", "word_sim_dist": 0.1, }, { "p_str": "Abankala", "word_sim_dist": 0.3, }, { "p_str": "Ntombankala School", "word_sim_dist": 0.4, }, { "p_str": "Kabakala", "word_sim_dist": 0.416667, }, { "p_str": "Nehalla Bankalah Reserved Forest", "word_sim_dist": 0.5, }, { "p_str": "Kabikala", "word_sim_dist": 0.538462, }, ] ) await self.assert_index_use( qry, index_type="ext::pg_trgm::gist", ) qry = """ SELECT Gist2 { p_str, word_sim_dist := ext::pg_trgm::word_similarity_dist( "Kabankala", .p_str ), p_str_2, word_sim_dist_2 := ext::pg_trgm::word_similarity_dist( "Pub", .p_str_2 ) } ORDER BY .word_sim_dist EMPTY LAST THEN .word_sim_dist_2 EMPTY LAST LIMIT 2 """ await self.assert_query_result( qry, [ { "p_str": "Kabankala", "word_sim_dist": 0.0, }, { "p_str": "Kabankalan City Public Plaza", "word_sim_dist": 0.1, }, ] ) await self.assert_index_use( qry, index_type="ext::pg_trgm::gist", )
SELECT Gist { p_str, sim := ext::pg_trgm::word_similarity("Kabankala", .p_str) } FILTER ext::pg_trgm::word_similar("Kabankala", .p_str) ORDER BY .sim DESC THEN .p_str """, [ { "p_str": "Kabankala", "sim": 1.0, }, { "p_str": "Kabankalan City Public Plaza", "sim": 0.9, }, { "p_str": "Abankala", "sim": 0.7, }, { "p_str": "Ntombankala School", "sim": 0.6, }, ] ) qry =
test_edgeql_ext_pg_trgm_word_similarity
python
geldata/gel
tests/test_edgeql_ext_pg_trgm.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_ext_pg_trgm.py
Apache-2.0
async def test_edgeql_ext_pg_trgm_strict_word_similarity(self): await self.assert_query_result( """ SELECT Gist { p_str, sim := ext::pg_trgm::strict_word_similarity( "Kabankala", .p_str ) } FILTER ext::pg_trgm::strict_word_similar("Kabankala", .p_str) ORDER BY .sim DESC THEN .p_str """, [ { "p_str": "Kabankala", "sim": 1.0, }, { "p_str": "Kabankalan City Public Plaza", "sim": 0.75, }, { "p_str": "Abankala", "sim": 0.583333, }, { "p_str": "Kabakala", "sim": 0.583333, }, ] ) qry = """ SELECT Gist { p_str, word_sim_dist := ext::pg_trgm::strict_word_similarity_dist( "Alaikallupoddakulam", .p_str ) } ORDER BY .word_sim_dist EMPTY LAST LIMIT 7 """ await self.assert_query_result( qry, [ { "p_str": "Alaikallupoddakulam", "word_sim_dist": 0.0, }, { "p_str": "Alaikallupodda Alankulam", "word_sim_dist": 0.25, }, { "p_str": "Alaikalluppodda Kulam", "word_sim_dist": 0.32, }, { "p_str": "Mulaikallu Kulam", "word_sim_dist": 0.615385, }, { "p_str": "Koraikalapu Kulam", "word_sim_dist": 0.724138, }, { "p_str": "Vaikaliththevakulam", "word_sim_dist": 0.75, }, { "p_str": "Karaivaikal Kulam", "word_sim_dist": 0.766667, }, ] ) await self.assert_index_use( qry, index_type="ext::pg_trgm::gist", )
SELECT Gist { p_str, sim := ext::pg_trgm::strict_word_similarity( "Kabankala", .p_str ) } FILTER ext::pg_trgm::strict_word_similar("Kabankala", .p_str) ORDER BY .sim DESC THEN .p_str """, [ { "p_str": "Kabankala", "sim": 1.0, }, { "p_str": "Kabankalan City Public Plaza", "sim": 0.75, }, { "p_str": "Abankala", "sim": 0.583333, }, { "p_str": "Kabakala", "sim": 0.583333, }, ] ) qry =
test_edgeql_ext_pg_trgm_strict_word_similarity
python
geldata/gel
tests/test_edgeql_ext_pg_trgm.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_ext_pg_trgm.py
Apache-2.0
async def test_edgeql_ext_pg_trgm_config(self): # We are going to fiddle with the similarity_threshold config # and make sure it works right. sim_query = """ WITH similar := ( SELECT Gist { p_str, sim := ext::pg_trgm::similarity(.p_str, "qwertyu0988") } FILTER ext::pg_trgm::similar(.p_str, "qwertyu0988") ), SELECT exists similar and all(similar.sim >= <float32>$sim) """ cfg_query = """ select cfg::Config.extensions[is ext::pg_trgm::Config] .similarity_threshold; """ await self.assert_query_result( sim_query, [True], variables=dict(sim=0.3), ) await self.assert_query_result( sim_query, [False], variables=dict(sim=0.5), ) await self.assert_query_result( sim_query, [False], variables=dict(sim=0.9), ) await self.assert_query_result( cfg_query, [0.3], ) await self.con.execute(''' configure session set ext::pg_trgm::Config::similarity_threshold := 0.5 ''') await self.assert_query_result( sim_query, [True], variables=dict(sim=0.3), ) await self.assert_query_result( sim_query, [True], variables=dict(sim=0.5), ) await self.assert_query_result( sim_query, [False], variables=dict(sim=0.9), ) await self.assert_query_result( cfg_query, [0.5], ) await self.con.execute(''' configure session set ext::pg_trgm::Config::similarity_threshold := 0.9 ''') await self.assert_query_result( sim_query, [True], variables=dict(sim=0.3), ) await self.assert_query_result( sim_query, [True], variables=dict(sim=0.5), ) await self.assert_query_result( sim_query, [True], variables=dict(sim=0.9), ) await self.assert_query_result( cfg_query, [0.9], ) await self.con.execute(''' configure session reset ext::pg_trgm::Config::similarity_threshold ''') await self.assert_query_result( sim_query, [True], variables=dict(sim=0.3), ) await self.assert_query_result( sim_query, [False], variables=dict(sim=0.5), ) await self.assert_query_result( sim_query, [False], variables=dict(sim=0.9), ) await self.assert_query_result( cfg_query, [0.3], )
cfg_query =
test_edgeql_ext_pg_trgm_config
python
geldata/gel
tests/test_edgeql_ext_pg_trgm.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_ext_pg_trgm.py
Apache-2.0
def test_graphql_schema_reflection_05(self): # `Fixed` is not supposed to have either "input" or "update" types. self.assert_graphql_query_result(r""" query { in: __type(name: "InsertFixed") { inputFields { name } } up: __type(name: "UpdateFixed") { inputFields { name } } } """, { 'in': None, 'up': None, }) # `NotEditable` is only supposed to have "input" type. self.assert_graphql_query_result(r""" query { in: __type(name: "InsertNotEditable") { inputFields { name } } up: __type(name: "UpdateNotEditable") { inputFields { name } } } """, { 'in': { 'inputFields': [{'name': 'once'}], }, 'up': None, })
, { 'in': None, 'up': None, }) # `NotEditable` is only supposed to have "input" type. self.assert_graphql_query_result(r
test_graphql_schema_reflection_05
python
geldata/gel
tests/test_http_graphql_schema.py
https://github.com/geldata/gel/blob/master/tests/test_http_graphql_schema.py
Apache-2.0
async def test_edgeql_volatility_for_11(self): await self.assert_query_result( r''' WITH X := ((FOR x in {(Obj { x := random() })} UNION (x.x))), SELECT count(DISTINCT X) ''', [3], ) await self.assert_query_result( r''' WITH X := ((FOR x in {(Obj { x := random() })} UNION (x.x))), SELECT count(X) ''', [3], )
, [3], ) await self.assert_query_result( r
test_edgeql_volatility_for_11
python
geldata/gel
tests/test_edgeql_volatility.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_volatility.py
Apache-2.0
async def test_edgeql_volatility_for_12(self): await self.assert_query_result( r''' WITH X := ((FOR x in {(Obj { x := random() }).x} UNION (x))), SELECT count(DISTINCT X) ''', [3], ) await self.assert_query_result( r''' WITH X := ((FOR x in {(Obj { x := random() }).x} UNION (x))), SELECT count(X) ''', [3], )
, [3], ) await self.assert_query_result( r
test_edgeql_volatility_for_12
python
geldata/gel
tests/test_edgeql_volatility.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_volatility.py
Apache-2.0
async def test_edgeql_volatility_with_and_use_01(self): await self.assert_query_result( r''' WITH X := (Obj { x := random() }).x, SELECT count(DISTINCT X); ''', [3], ) await self.assert_query_result( r''' WITH X := (Obj { x := random() }).x, SELECT count(X); ''', [3], )
, [3], ) await self.assert_query_result( r
test_edgeql_volatility_with_and_use_01
python
geldata/gel
tests/test_edgeql_volatility.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_volatility.py
Apache-2.0
async def test_edgeql_volatility_with_and_use_02(self): await self.assert_query_result( r''' WITH X := (SELECT Obj { x := random() }).x, SELECT count(DISTINCT X); ''', [3], ) await self.assert_query_result( r''' WITH X := (SELECT Obj { x := random() }).x, SELECT count(X); ''', [3], )
, [3], ) await self.assert_query_result( r
test_edgeql_volatility_with_and_use_02
python
geldata/gel
tests/test_edgeql_volatility.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_volatility.py
Apache-2.0
async def test_edgeql_volatility_with_05(self): await self.con.execute(r''' CREATE TYPE Foo { CREATE PROPERTY asdf -> tuple<float64> }; ''') await self.con.query(r''' WITH X := (random(),) SELECT X.0; ''') await self.con.query(r''' WITH X := {(random(),),(random(),)} SELECT X.0; ''')
) await self.con.query(r
test_edgeql_volatility_with_05
python
geldata/gel
tests/test_edgeql_volatility.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_volatility.py
Apache-2.0
async def test_edgeql_volatility_update_clause_01(self): # Spurious failure probability: 1/2^99 await self.con.execute(r''' FOR x in {_gen_series(4,100)} UNION ( INSERT Obj { n := x }) ''') await self.assert_query_result( r''' SELECT count(Obj) ''', [100], ) await self.assert_query_result( r''' WITH X := (UPDATE Obj FILTER random() > 0.5 SET { n := -1 }) SELECT count(X) NOT IN {0, 100} ''', [True], ) await self.assert_query_result( r''' WITH X := (SELECT Obj FILTER .n < 0) SELECT count(X) != 0 AND count(X) != 100 ''', [True], )
) await self.assert_query_result( r
test_edgeql_volatility_update_clause_01
python
geldata/gel
tests/test_edgeql_volatility.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_volatility.py
Apache-2.0
async def test_edgeql_volatility_delete_clause_01(self): # Spurious failure probability: 1/2^99 await self.con.execute(r''' FOR x in {_gen_series(4,100)} UNION ( INSERT Obj { n := x }) ''') await self.assert_query_result( r''' WITH X := (DELETE Obj FILTER random() > 0.5) SELECT count(X) NOT IN {0, 100} ''', [True], ) await self.assert_query_result( r''' SELECT count(Obj) != 0 AND count(Obj) != 100 ''', [True], )
) await self.assert_query_result( r
test_edgeql_volatility_delete_clause_01
python
geldata/gel
tests/test_edgeql_volatility.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_volatility.py
Apache-2.0
async def test_edgeql_volatility_select_tuples_03(self): await self.assert_query_result(r''' WITH X := ((SELECT Obj { m := next() }),), Y := ((SELECT Obj { m := next() }),), SELECT count((SELECT (X, Y) FILTER .0 = .1)); ''', [ 3, ]) await self.assert_query_result(r''' WITH X := ((SELECT Obj { m := next() }),), Y := ((SELECT Obj { m := next() }),), SELECT count((SELECT (X, Y) FILTER .0 < .1)); ''', [ 3, ]) await self.assert_query_result(r''' WITH X := ((SELECT Obj { m := next() }),), Y := (Obj,), SELECT count((SELECT (X, Y) FILTER .0 < .1)); ''', [ 3, ])
, [ 3, ]) await self.assert_query_result(r
test_edgeql_volatility_select_tuples_03
python
geldata/gel
tests/test_edgeql_volatility.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_volatility.py
Apache-2.0
async def test_edgeql_volatility_errors_01(self): async with self._run_and_rollback(): with self.assertRaisesRegex( edgedb.QueryError, "can not take cross product of volatile operation", _position=36): await self.con.execute( r""" SELECT Obj.n + random() """ ) async with self._run_and_rollback(): with self.assertRaisesRegex( edgedb.QueryError, "can not take cross product of volatile operation", _position=36): await self.con.execute( r""" SELECT (Obj.n, random()) """ ) async with self._run_and_rollback(): with self.assertRaisesRegex( edgedb.QueryError, "can not take cross product of volatile operation"): await self.con.execute( r""" SELECT ({1,2}, random()) """ ) async with self._run_and_rollback(): with self.assertRaisesRegex( edgedb.QueryError, "can not take cross product of volatile operation", _position=28): await self.con.execute( r""" SELECT random() + Obj.n """ ) async with self._run_and_rollback(): with self.assertRaisesRegex( edgedb.QueryError, "can not take cross product of volatile operation", _position=37): await self.con.execute( r""" SELECT {1,2} + (FOR x in {1,2,3} UNION (x*random())) """ ) async with self._run_and_rollback(): with self.assertRaisesRegex( edgedb.QueryError, "can not take cross product of volatile operation", _position=37): await self.con.execute( r""" SELECT ({1,2}, (INSERT Obj { n := 100 })) """ ) async with self._run_and_rollback(): with self.assertRaisesRegex( edgedb.QueryError, "can not take cross product of volatile operation", _position=65): await self.con.execute( r""" SELECT ({1,2}, (FOR i in {1,2,3} UNION ( INSERT Obj { n := i }))) """ ) async with self._run_and_rollback(): with self.assertRaisesRegex( edgedb.QueryError, "can not take cross product of volatile operation"): await self.con.execute( r""" WITH X := (WITH x := {1,2}, SELECT (x, Obj {m := vol_id(x)})).1 SELECT X; """ )
) async with self._run_and_rollback(): with self.assertRaisesRegex( edgedb.QueryError, "can not take cross product of volatile operation", _position=36): await self.con.execute( r
test_edgeql_volatility_errors_01
python
geldata/gel
tests/test_edgeql_volatility.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_volatility.py
Apache-2.0
async def test_edgeql_explain_insert_01(self): # Use an ad-hoc connection to avoid TRANSACTION_ISOLATION con = await self.connect(database=self.con.dbname) try: res = await self.explain(''' insert User { name := 'Fantix' } ''', execute=True, con=con) self.assert_plan(res['fine_grained'], { 'pipeline': [{'plan_type': 'NestedLoop'}], }) self.assertFalse(await con.query(''' select User { id, name } filter .name = 'Fantix' ''')) finally: await con.aclose()
, execute=True, con=con) self.assert_plan(res['fine_grained'], { 'pipeline': [{'plan_type': 'NestedLoop'}], }) self.assertFalse(await con.query(
test_edgeql_explain_insert_01
python
geldata/gel
tests/test_edgeql_explain.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_explain.py
Apache-2.0
async def test_edgeql_explain_insert_02(self): async with self.con.transaction(): await self.con.execute(''' insert User { name := 'Sully' } ''') res = await self.explain(''' insert User { name := 'Fantix' } ''', execute=True) self.assert_plan(res['fine_grained'], { 'pipeline': [{'plan_type': 'NestedLoop'}], }) self.assertTrue(await self.con.query(''' select User { id, name } filter .name = 'Sully' ''')) self.assertFalse(await self.con.query(''' select User { id, name } filter .name = 'Fantix' ''')) self.assertTrue(await self.con.query(''' select User { id, name } filter .name = 'Sully' ''')) self.assertFalse(await self.con.query(''' select User { id, name } filter .name = 'Fantix' '''))
) res = await self.explain(
test_edgeql_explain_insert_02
python
geldata/gel
tests/test_edgeql_explain.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_explain.py
Apache-2.0
async def test_edgeql_explain_options_01(self): res = await self.explain(''' select User ''', execute=False) self.assertNotIn( 'actual_startup_time', res['fine_grained']['pipeline'][0], ) self.assertEqual( {'buffers': False, 'execute': False}, res['arguments'], ) res = json.loads(await self.con.query_single(''' analyze (buffers := True) select User ''')) self.assertIn('shared_read_blocks', res['fine_grained']['pipeline'][0]) self.assertEqual({'buffers': True, 'execute': True}, res['arguments']) res = json.loads(await self.con.query_single(''' analyze (buffers := false) select User ''')) self.assertNotIn( 'shared_read_blocks', res['fine_grained']['pipeline'][0], ) self.assertEqual({'buffers': False, 'execute': True}, res['arguments'])
, execute=False) self.assertNotIn( 'actual_startup_time', res['fine_grained']['pipeline'][0], ) self.assertEqual( {'buffers': False, 'execute': False}, res['arguments'], ) res = json.loads(await self.con.query_single(
test_edgeql_explain_options_01
python
geldata/gel
tests/test_edgeql_explain.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_explain.py
Apache-2.0
async def test_edgeql_explain_options_02(self): async with self.assertRaisesRegexTx( edgedb.QueryError, r"unknown ANALYZE argument" ): await self.con.query_single(''' analyze (bogus_argument := True) select User ''') async with self.assertRaisesRegexTx( edgedb.QueryError, r"incorrect type" ): await self.con.query_single(''' analyze (execute := "hell yeah") select User ''')
) async with self.assertRaisesRegexTx( edgedb.QueryError, r"incorrect type" ): await self.con.query_single(
test_edgeql_explain_options_02
python
geldata/gel
tests/test_edgeql_explain.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_explain.py
Apache-2.0
async def test_edgeql_explain_ranges_contains_01(self): res = await self.explain(''' select RangeTest {id, rval} filter contains(.rval, 295) ''') self.assert_index_in_plan(res, 'rval') res = await self.explain(''' select RangeTest {id, mval} filter contains(.mval, 295) ''') self.assert_index_in_plan(res, 'mval') res = await self.explain(''' select RangeTest {id, rdate} filter contains(.rdate, <cal::local_date>'2000-01-05') ''') self.assert_index_in_plan(res, 'rdate') res = await self.explain(''' select RangeTest {id, mdate} filter contains(.mdate, <cal::local_date>'2000-01-05') ''') self.assert_index_in_plan(res, 'mdate')
) self.assert_index_in_plan(res, 'rval') res = await self.explain(
test_edgeql_explain_ranges_contains_01
python
geldata/gel
tests/test_edgeql_explain.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_explain.py
Apache-2.0
async def test_edgeql_explain_ranges_contains_02(self): res = await self.explain(''' select RangeTest {id, rval} filter contains(.rval, range(295, 299)) ''') self.assert_index_in_plan(res, 'rval') res = await self.explain(''' select RangeTest {id, mval} filter contains(.mval, range(295, 299)) ''') self.assert_index_in_plan(res, 'mval') res = await self.explain(''' select RangeTest {id, rdate} filter contains( .rdate, range(<cal::local_date>'2000-01-05', <cal::local_date>'2000-01-10') ) ''') self.assert_index_in_plan(res, 'rdate') res = await self.explain(''' select RangeTest {id, mdate} filter contains( .mdate, range(<cal::local_date>'2000-01-05', <cal::local_date>'2000-01-10') ) ''') self.assert_index_in_plan(res, 'mdate')
) self.assert_index_in_plan(res, 'rval') res = await self.explain(
test_edgeql_explain_ranges_contains_02
python
geldata/gel
tests/test_edgeql_explain.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_explain.py
Apache-2.0
async def test_edgeql_explain_ranges_contains_03(self): res = await self.explain(''' select RangeTest {id, mval} filter contains( .mval, multirange([ range(-299, 297), range(297, 299), ]) ) ''') self.assert_index_in_plan(res, 'mval') res = await self.explain(''' select RangeTest {id, mdate} filter contains( .mdate, multirange([ range(<cal::local_date>'2000-01-05', <cal::local_date>'2000-01-10'), range(<cal::local_date>'2010-01-05', <cal::local_date>'2010-01-10'), ]) ) ''') self.assert_index_in_plan(res, 'mdate')
) self.assert_index_in_plan(res, 'mval') res = await self.explain(
test_edgeql_explain_ranges_contains_03
python
geldata/gel
tests/test_edgeql_explain.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_explain.py
Apache-2.0
async def test_edgeql_explain_ranges_overlaps_01(self): # The field is the first arg in `overlaps` res = await self.explain(''' select RangeTest {id, rval} filter overlaps(.rval, range(295, 299)) ''') self.assert_index_in_plan(res, 'rval') res = await self.explain(''' select RangeTest {id, mval} filter overlaps(.mval, range(295, 299)) ''') self.assert_index_in_plan(res, 'mval') res = await self.explain(''' select RangeTest {id, rdate} filter overlaps( .rdate, range(<cal::local_date>'2000-01-05', <cal::local_date>'2000-01-10'), ) ''') self.assert_index_in_plan(res, 'rdate') res = await self.explain(''' select RangeTest {id, mdate} filter overlaps( .mdate, range(<cal::local_date>'2000-01-05', <cal::local_date>'2000-01-10'), ) ''') self.assert_index_in_plan(res, 'mdate')
) self.assert_index_in_plan(res, 'rval') res = await self.explain(
test_edgeql_explain_ranges_overlaps_01
python
geldata/gel
tests/test_edgeql_explain.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_explain.py
Apache-2.0
async def test_edgeql_explain_ranges_overlaps_02(self): # The field is the second arg in `overlaps` res = await self.explain(''' select RangeTest {id, rval} filter overlaps(range(295, 299), .rval) ''') self.assert_index_in_plan(res, 'rval') res = await self.explain(''' select RangeTest {id, mval} filter overlaps(range(295, 299), .mval) ''') self.assert_index_in_plan(res, 'mval') res = await self.explain(''' select RangeTest {id, rdate} filter overlaps( range(<cal::local_date>'2000-01-05', <cal::local_date>'2000-01-10'), .rdate, ) ''') self.assert_index_in_plan(res, 'rdate') res = await self.explain(''' select RangeTest {id, mdate} filter overlaps( range(<cal::local_date>'2000-01-05', <cal::local_date>'2000-01-10'), .mdate, ) ''') self.assert_index_in_plan(res, 'mdate')
) self.assert_index_in_plan(res, 'rval') res = await self.explain(
test_edgeql_explain_ranges_overlaps_02
python
geldata/gel
tests/test_edgeql_explain.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_explain.py
Apache-2.0
async def test_edgeql_explain_ranges_adjacent_01(self): # The field is the first arg in `adjacent` res = await self.explain(''' select RangeTest {id, rval} filter adjacent(.rval, range(295, 299)) ''') self.assert_index_in_plan(res, 'rval') res = await self.explain(''' select RangeTest {id, mval} filter adjacent(.mval, range(295, 299)) ''') self.assert_index_in_plan(res, 'mval') res = await self.explain(''' select RangeTest {id, rdate} filter adjacent( .rdate, range(<cal::local_date>'2000-01-05', <cal::local_date>'2000-01-10'), ) ''') self.assert_index_in_plan(res, 'rdate') res = await self.explain(''' select RangeTest {id, mdate} filter adjacent( .mdate, range(<cal::local_date>'2000-01-05', <cal::local_date>'2000-01-10'), ) ''') self.assert_index_in_plan(res, 'mdate')
) self.assert_index_in_plan(res, 'rval') res = await self.explain(
test_edgeql_explain_ranges_adjacent_01
python
geldata/gel
tests/test_edgeql_explain.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_explain.py
Apache-2.0
async def test_edgeql_explain_ranges_adjacent_02(self): # The field is the second arg in `adjacent` res = await self.explain(''' select RangeTest {id, rval} filter adjacent(range(295, 299), .rval) ''') self.assert_index_in_plan(res, 'rval') res = await self.explain(''' select RangeTest {id, mval} filter adjacent(range(295, 299), .mval) ''') self.assert_index_in_plan(res, 'mval') res = await self.explain(''' select RangeTest {id, rdate} filter adjacent( range(<cal::local_date>'2000-01-05', <cal::local_date>'2000-01-10'), .rdate, ) ''') self.assert_index_in_plan(res, 'rdate') res = await self.explain(''' select RangeTest {id, mdate} filter adjacent( range(<cal::local_date>'2000-01-05', <cal::local_date>'2000-01-10'), .mdate, ) ''') self.assert_index_in_plan(res, 'mdate')
) self.assert_index_in_plan(res, 'rval') res = await self.explain(
test_edgeql_explain_ranges_adjacent_02
python
geldata/gel
tests/test_edgeql_explain.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_explain.py
Apache-2.0
async def test_edgeql_explain_ranges_strictly_below_01(self): # The field is the first arg in `strictly_below` res = await self.explain(''' select RangeTest {id, rval} filter strictly_below(.rval, range(-50, 50)) ''') self.assert_index_in_plan(res, 'rval') res = await self.explain(''' select RangeTest {id, mval} filter strictly_below(.mval, range(-50, 50)) ''') self.assert_index_in_plan(res, 'mval') res = await self.explain(''' select RangeTest {id, rdate} filter strictly_below( .rdate, range(<cal::local_date>'2005-01-05', <cal::local_date>'2012-02-10'), ) ''') self.assert_index_in_plan(res, 'rdate') res = await self.explain(''' select RangeTest {id, mdate} filter strictly_below( .mdate, range(<cal::local_date>'2005-01-05', <cal::local_date>'2012-02-10'), ) ''') self.assert_index_in_plan(res, 'mdate')
) self.assert_index_in_plan(res, 'rval') res = await self.explain(
test_edgeql_explain_ranges_strictly_below_01
python
geldata/gel
tests/test_edgeql_explain.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_explain.py
Apache-2.0
async def test_edgeql_explain_ranges_strictly_below_02(self): # The field is the second arg in `strictly_below` res = await self.explain(''' select RangeTest {id, rval} filter strictly_below(range(-50, 50), .rval) ''') self.assert_index_in_plan(res, 'rval') res = await self.explain(''' select RangeTest {id, mval} filter strictly_below(range(-50, 50), .mval) ''') self.assert_index_in_plan(res, 'mval') res = await self.explain(''' select RangeTest {id, rdate} filter strictly_below( range(<cal::local_date>'2005-01-05', <cal::local_date>'2012-02-10'), .rdate, ) ''') self.assert_index_in_plan(res, 'rdate') res = await self.explain(''' select RangeTest {id, mdate} filter strictly_below( range(<cal::local_date>'2005-01-05', <cal::local_date>'2012-02-10'), .mdate, ) ''') self.assert_index_in_plan(res, 'mdate')
) self.assert_index_in_plan(res, 'rval') res = await self.explain(
test_edgeql_explain_ranges_strictly_below_02
python
geldata/gel
tests/test_edgeql_explain.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_explain.py
Apache-2.0
async def test_edgeql_explain_ranges_strictly_above_01(self): # The field is the first arg in `strictly_above` res = await self.explain(''' select RangeTest {id, rval} filter strictly_above(.rval, range(-50, 50)) ''') self.assert_index_in_plan(res, 'rval') res = await self.explain(''' select RangeTest {id, mval} filter strictly_above(.mval, range(-50, 50)) ''') self.assert_index_in_plan(res, 'mval') res = await self.explain(''' select RangeTest {id, rdate} filter strictly_above( .rdate, range(<cal::local_date>'2005-01-05', <cal::local_date>'2012-02-10'), ) ''') self.assert_index_in_plan(res, 'rdate') res = await self.explain(''' select RangeTest {id, mdate} filter strictly_above( .mdate, range(<cal::local_date>'2005-01-05', <cal::local_date>'2012-02-10'), ) ''') self.assert_index_in_plan(res, 'mdate')
) self.assert_index_in_plan(res, 'rval') res = await self.explain(
test_edgeql_explain_ranges_strictly_above_01
python
geldata/gel
tests/test_edgeql_explain.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_explain.py
Apache-2.0
async def test_edgeql_explain_ranges_strictly_above_02(self): # The field is the second arg in `strictly_above` res = await self.explain(''' select RangeTest {id, rval} filter strictly_above(range(-50, 50), .rval) ''') self.assert_index_in_plan(res, 'rval') res = await self.explain(''' select RangeTest {id, mval} filter strictly_above(range(-50, 50), .mval) ''') self.assert_index_in_plan(res, 'mval') res = await self.explain(''' select RangeTest {id, rdate} filter strictly_above( range(<cal::local_date>'2005-01-05', <cal::local_date>'2012-02-10'), .rdate, ) ''') self.assert_index_in_plan(res, 'rdate') res = await self.explain(''' select RangeTest {id, mdate} filter strictly_above( range(<cal::local_date>'2005-01-05', <cal::local_date>'2012-02-10'), .mdate, ) ''') self.assert_index_in_plan(res, 'mdate')
) self.assert_index_in_plan(res, 'rval') res = await self.explain(
test_edgeql_explain_ranges_strictly_above_02
python
geldata/gel
tests/test_edgeql_explain.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_explain.py
Apache-2.0
async def test_edgeql_explain_ranges_bounded_below_01(self): # The field is the first arg in `bounded_below` res = await self.explain(''' select RangeTest {id, rval} filter bounded_below(.rval, range(-50, 50)) ''') self.assert_index_in_plan(res, 'rval') res = await self.explain(''' select RangeTest {id, mval} filter bounded_below(.mval, range(-50, 50)) ''') self.assert_index_in_plan(res, 'mval') res = await self.explain(''' select RangeTest {id, rdate} filter bounded_below( .rdate, range(<cal::local_date>'2012-01-05', <cal::local_date>'2015-02-10'), ) ''') self.assert_index_in_plan(res, 'rdate') res = await self.explain(''' select RangeTest {id, mdate} filter bounded_below( .mdate, range(<cal::local_date>'2012-01-05', <cal::local_date>'2015-02-10'), ) ''') self.assert_index_in_plan(res, 'mdate')
) self.assert_index_in_plan(res, 'rval') res = await self.explain(
test_edgeql_explain_ranges_bounded_below_01
python
geldata/gel
tests/test_edgeql_explain.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_explain.py
Apache-2.0
async def test_edgeql_explain_ranges_bounded_above_01(self): # The field is the first arg in `bounded_above` res = await self.explain(''' select RangeTest {id, rval} filter bounded_above(.rval, range(-50, 50)) ''') self.assert_index_in_plan(res, 'rval') res = await self.explain(''' select RangeTest {id, mval} filter bounded_above(.mval, range(-50, 50)) ''') self.assert_index_in_plan(res, 'mval') res = await self.explain(''' select RangeTest {id, rdate} filter bounded_above( .rdate, range(<cal::local_date>'2005-01-05', <cal::local_date>'2012-02-10'), ) ''') self.assert_index_in_plan(res, 'rdate') res = await self.explain(''' select RangeTest {id, mdate} filter bounded_above( .mdate, range(<cal::local_date>'2005-01-05', <cal::local_date>'2012-02-10'), ) ''') self.assert_index_in_plan(res, 'mdate')
) self.assert_index_in_plan(res, 'rval') res = await self.explain(
test_edgeql_explain_ranges_bounded_above_01
python
geldata/gel
tests/test_edgeql_explain.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_explain.py
Apache-2.0
def main(argv): con = edgedb.create_client() dbs = con.query(''' select sys::Database.name ''') con.close() datas = {} for db in dbs: con = edgedb.create_client(database=db) output = json.loads(con.query_single(''' administer prepare_upgrade() ''')) datas[db] = output print(json.dumps(datas))
) con.close() datas = {} for db in dbs: con = edgedb.create_client(database=db) output = json.loads(con.query_single(
main
python
geldata/gel
tests/inplace-testing/prep-upgrades.py
https://github.com/geldata/gel/blob/master/tests/inplace-testing/prep-upgrades.py
Apache-2.0
def __init__(self, test_prog, global_prog=""): self._args = ( sys.executable, "-m", "edb.testbase.proc", textwrap.dedent(global_prog) + "\n" + textwrap.dedent( """\ import signal from edb.common import signalctl """ ), textwrap.dedent(test_prog), )
\ import signal from edb.common import signalctl
__init__
python
geldata/gel
tests/common/test_signalctl.py
https://github.com/geldata/gel/blob/master/tests/common/test_signalctl.py
Apache-2.0
def _test1(self, do_something=True): foo = 'foo' bar = 'bar' left = ''' [1, 2, 3, 4, 5, 6] ''' things = [] for thing in ['spam', 'eggs', 'ham']: things.append(f''' reify( {thing} ) ''') sep = ",\n" if do_something: orig = f''' do_something( {foo}, {bar}, {X(left)}, {X(sep.join(X(x) for x in things))} ) ''' else: orig = xdedent.LINE_BLANK return xdedent.xdedent(f''' call_something() {X(foo)} = 10 while True: if {bar}: {X(orig)} another more ''')
things = [] for thing in ['spam', 'eggs', 'ham']: things.append(f
_test1
python
geldata/gel
tests/common/test_xdedent.py
https://github.com/geldata/gel/blob/master/tests/common/test_xdedent.py
Apache-2.0
def __init__(self, edb_schema: s_schema.Schema) -> None: '''Create a graphql schema based on edgedb schema.''' self.edb_schema = edb_schema # extract and sort modules to have a consistent type ordering self.modules = list(sorted({ m.get_name(self.edb_schema) for m in self.edb_schema.get_objects(type=s_mod.Module) } - HIDDEN_MODULES)) self._gql_interfaces = {} self._gql_uniontypes: Set[s_name.QualName] = set() self._gql_objtypes_from_alias = {} self._gql_objtypes = {} self._gql_inobjtypes = {} self._gql_ordertypes = {} self._gql_enums = {} self._define_types() # Use a fake name as a placeholder. Query = s_name.QualName(module='__graphql__', name='Query') query = self._gql_objtypes[Query] = GraphQLObjectType( name='Query', fields=self.get_fields(Query), ) # If a database only has abstract types and scalars, no # mutations will be possible (such as in a blank database), # but we would still want the reflection to work without # error, even if all that can be discovered through GraphQL # then is the schema. Mutation = s_name.QualName(module='__graphql__', name='Mutation') fields = self.get_fields(Mutation) if not fields: mutation = None else: mutation = self._gql_objtypes[Mutation] = GraphQLObjectType( name='Mutation', fields=fields, ) # get a sorted list of types relevant for the Schema types = [ objt for name, objt in itertools.chain(self._gql_objtypes.items(), self._gql_inobjtypes.items()) # the Query is included separately if name not in TOP_LEVEL_TYPES ] types = sorted(types, key=lambda x: x.name) self._gql_schema = GraphQLSchema( query=query, mutation=mutation, types=types) # this map is used for GQL -> EQL translator needs self._type_map = {}
Create a graphql schema based on edgedb schema.
__init__
python
geldata/gel
edb/graphql/types.py
https://github.com/geldata/gel/blob/master/edb/graphql/types.py
Apache-2.0
def gql_to_edb_name(self, name: str) -> str: '''Convert the GraphQL field name into a Gel type/view name.''' if '__' in name: return name.replace('__', '::') else: return name
Convert the GraphQL field name into a Gel type/view name.
gql_to_edb_name
python
geldata/gel
edb/graphql/types.py
https://github.com/geldata/gel/blob/master/edb/graphql/types.py
Apache-2.0
def get(self, name: str, *, dummy: bool = False) -> GQLBaseType: '''Get a special GQL type either by name or based on Gel type.''' # normalize name and possibly add 'edb_base' to kwargs edb_base = None kwargs: Dict[str, Any] = {'dummy': dummy} if not name.startswith('__graphql__::'): # The name may potentially contain the suffix "_Type", # which in 99% cases indicates that it's a GraphQL # internal type generated from the EdgeDB base type, but # we technically need to check both. if name.endswith('_Type'): names = [name[:-len('_Type')], name] else: names = [name] for tname in names: if edb_base is None: module: Union[s_name.Name, str] if '::' in tname: edb_base = self.edb_schema.get( tname, type=s_types.Type, ) elif '__' in tname: # Looks like it's coming from a specific module edb_base = self.edb_schema.get( f"{tname.replace('__', '::')}", type=s_types.Type, ) else: for module in self.modules: edb_base = self.edb_schema.get( f'{module}::{tname}', type=s_types.Type, default=None, ) if edb_base: break # XXX: find a better way to do this for stype in [s_types.Array, s_types.Tuple, s_types.Range, s_types.MultiRange]: if edb_base is None: edb_base = self.edb_schema.get_global( stype, tname, default=None ) else: break if edb_base is None: raise AssertionError( f'unresolved type: {name}') kwargs['edb_base'] = edb_base # check if the type already exists fkey = (name, dummy) gqltype = self._type_map.get(fkey) if not gqltype: _type = GQLTypeMeta.edb_map.get(name, GQLShadowType) gqltype = _type(schema=self, **kwargs) self._type_map[fkey] = gqltype return gqltype
Get a special GQL type either by name or based on Gel type.
get
python
geldata/gel
edb/graphql/types.py
https://github.com/geldata/gel/blob/master/edb/graphql/types.py
Apache-2.0
def get_template( self, ) -> Tuple[qlast.Base, Optional[qlast.Expr], Optional[qlast.SelectQuery]]: '''Provide an EQL AST template to be filled. Return the overall ast, a reference to where the shape element with placeholder is, and a reference to the element which may be filtered. ''' if self.dummy: return parse_fragment(f'''to_json("xxx")'''), None, None eql = parse_fragment(f''' SELECT {self.edb_base_name} {{ xxx }} ''') filterable = eql assert isinstance(filterable, qlast.SelectQuery) shape = filterable.result return eql, shape, filterable
Provide an EQL AST template to be filled. Return the overall ast, a reference to where the shape element with placeholder is, and a reference to the element which may be filtered.
get_template
python
geldata/gel
edb/graphql/types.py
https://github.com/geldata/gel/blob/master/edb/graphql/types.py
Apache-2.0
async def connect(self, *, source_description: str, apply_init_script: bool = False, **kwargs: Unpack[pgconnparams.CreateParamsKwargs] ) -> pgcon.PGConnection: """Connect to this cluster, with optional overriding parameters. If overriding parameters are specified, they are applied to a copy of the connection parameters before the connection takes place.""" from edb.server import pgcon connection = copy.copy(self.get_connection_params()) addr = self._get_connection_addr() assert addr is not None connection.update(hosts=[addr]) connection.update(**kwargs) conn = await pgcon.pg_connect( connection, source_description=source_description, backend_params=self.get_runtime_params(), apply_init_script=apply_init_script, ) return conn
Connect to this cluster, with optional overriding parameters. If overriding parameters are specified, they are applied to a copy of the connection parameters before the connection takes place.
connect
python
geldata/gel
edb/server/pgcluster.py
https://github.com/geldata/gel/blob/master/edb/server/pgcluster.py
Apache-2.0
async def init(self, **settings: str) -> None: """Initialize cluster.""" if await self.get_status() != 'not-initialized': raise ClusterError( 'cluster in {!r} has already been initialized'.format( self._data_dir)) if settings: settings_args = ['--{}={}'.format(k.replace('_', '-'), v) for k, v in settings.items()] extra_args = ['-o'] + [' '.join(settings_args)] else: extra_args = [] await _run_logged_subprocess( [self._pg_ctl, 'init', '-D', str(self._data_dir)] + extra_args, logger=initdb_logger, )
Initialize cluster.
init
python
geldata/gel
edb/server/pgcluster.py
https://github.com/geldata/gel/blob/master/edb/server/pgcluster.py
Apache-2.0
async def start( self, wait: int = 60, *, server_settings: Optional[Mapping[str, str]] = None, **opts: str, ) -> None: """Start the cluster.""" status = await self.get_status() if status == 'running': return elif status == 'not-initialized': raise ClusterError( 'cluster in {!r} has not been initialized'.format( self._data_dir)) extra_args = ['--{}={}'.format(k, v) for k, v in opts.items()] start_settings = { 'listen_addresses': '', # we use Unix sockets 'unix_socket_permissions': '0700', 'unix_socket_directories': str(self._runstate_dir), # here we are not setting superuser_reserved_connections because # we're using superuser only now (so all connections available), # and we don't support reserving connections for now 'max_connections': str(self._instance_params.max_connections), # From Postgres docs: # # You might need to raise this value if you have queries that # touch many different tables in a single transaction, e.g., # query of a parent table with many children. # # EdgeDB queries might touch _lots_ of tables, especially in deep # inheritance hierarchies. This is especially important in low # `max_connections` scenarios. 'max_locks_per_transaction': 1024, 'max_pred_locks_per_transaction': 1024, "shared_preload_libraries": ",".join( [ "edb_stat_statements", ] ), "edb_stat_statements.track_planning": "true", } if os.getenv('EDGEDB_DEBUG_PGSERVER'): start_settings['log_min_messages'] = 'info' start_settings['log_statement'] = 'all' else: log_level_map = { 'd': 'INFO', 'i': 'WARNING', # NOTICE in Postgres is quite noisy 'w': 'WARNING', 'e': 'ERROR', 's': 'PANIC', } start_settings['log_min_messages'] = log_level_map[self._log_level] start_settings['log_statement'] = 'none' start_settings['log_line_prefix'] = '' if server_settings: start_settings.update(server_settings) ssl_key = start_settings.get('ssl_key_file') if ssl_key: # Make sure server certificate key file has correct permissions. keyfile = os.path.join(self._data_dir, 'srvkey.pem') assert isinstance(ssl_key, str) shutil.copy(ssl_key, keyfile) os.chmod(keyfile, 0o600) start_settings['ssl_key_file'] = keyfile for k, v in start_settings.items(): extra_args.extend(['-c', '{}={}'.format(k, v)]) self._daemon_process, *loggers = await _start_logged_subprocess( [self._postgres, '-D', str(self._data_dir), *extra_args], capture_stdout=False, capture_stderr=False, logger=postgres_logger, log_processor=postgres_log_processor, ) self._daemon_pid = self._daemon_process.pid sup = await supervisor.Supervisor.create(name="postgres loggers") for logger_coro in loggers: sup.create_task(logger_coro) self._daemon_supervisor = sup await self._test_connection(timeout=wait)
Start the cluster.
start
python
geldata/gel
edb/server/pgcluster.py
https://github.com/geldata/gel/blob/master/edb/server/pgcluster.py
Apache-2.0
async def reload(self) -> None: """Reload server configuration.""" status = await self.get_status() if status != 'running': raise ClusterError('cannot reload: cluster is not running') await _run_logged_subprocess( [self._pg_ctl, 'reload', '-D', str(self._data_dir)], logger=pg_ctl_logger, )
Reload server configuration.
reload
python
geldata/gel
edb/server/pgcluster.py
https://github.com/geldata/gel/blob/master/edb/server/pgcluster.py
Apache-2.0
def reset_hba(self) -> None: """Remove all records from pg_hba.conf.""" pg_hba = os.path.join(self._data_dir, 'pg_hba.conf') try: with open(pg_hba, 'w'): pass except IOError as e: raise ClusterError( 'cannot modify HBA records: {}'.format(e)) from e
Remove all records from pg_hba.conf.
reset_hba
python
geldata/gel
edb/server/pgcluster.py
https://github.com/geldata/gel/blob/master/edb/server/pgcluster.py
Apache-2.0
def add_hba_entry( self, *, type: str = 'host', database: str, user: str, address: Optional[str] = None, auth_method: str, auth_options: Optional[Mapping[str, Any]] = None, ) -> None: """Add a record to pg_hba.conf.""" if type not in {'local', 'host', 'hostssl', 'hostnossl'}: raise ValueError('invalid HBA record type: {!r}'.format(type)) pg_hba = os.path.join(self._data_dir, 'pg_hba.conf') record = '{} {} {}'.format(type, database, user) if type != 'local': if address is None: raise ValueError( '{!r} entry requires a valid address'.format(type)) else: record += ' {}'.format(address) record += ' {}'.format(auth_method) if auth_options is not None: record += ' ' + ' '.join( '{}={}'.format(k, v) for k, v in auth_options.items()) try: with open(pg_hba, 'a') as f: print(record, file=f) except IOError as e: raise ClusterError( 'cannot modify HBA records: {}'.format(e)) from e
Add a record to pg_hba.conf.
add_hba_entry
python
geldata/gel
edb/server/pgcluster.py
https://github.com/geldata/gel/blob/master/edb/server/pgcluster.py
Apache-2.0
async def _detect_capabilities( conn: pgcon.PGConnection, ) -> pgparams.BackendCapabilities: from edb.server import pgcon from edb.server.pgcon import errors caps = pgparams.BackendCapabilities.NONE try: cur_cluster_name = await conn.sql_fetch_val( b""" SELECT setting FROM pg_file_settings WHERE setting = 'cluster_name' AND sourcefile = (( SELECT setting FROM pg_settings WHERE name = 'data_directory' ) || '/postgresql.auto.conf') """, ) except pgcon.BackendPrivilegeError: configfile_access = False else: try: await conn.sql_execute(b""" ALTER SYSTEM SET cluster_name = 'edgedb-test' """) except pgcon.BackendPrivilegeError: configfile_access = False except pgcon.BackendError as e: # Stolon keeper symlinks postgresql.auto.conf to /dev/null # making ALTER SYSTEM fail with InternalServerError, # see https://github.com/sorintlab/stolon/pull/343 if 'could not fsync file "postgresql.auto.conf"' in e.args[0]: configfile_access = False else: raise else: configfile_access = True if cur_cluster_name: cn = pgcommon.quote_literal( cur_cluster_name.decode("utf-8")) await conn.sql_execute( f""" ALTER SYSTEM SET cluster_name = {cn} """.encode("utf-8"), ) else: await conn.sql_execute( b""" ALTER SYSTEM SET cluster_name = DEFAULT """, ) if configfile_access: caps |= pgparams.BackendCapabilities.CONFIGFILE_ACCESS await conn.sql_execute(b"START TRANSACTION") rname = str(uuidgen.uuid1mc()) try: await conn.sql_execute( f"CREATE ROLE {pgcommon.quote_ident(rname)} WITH SUPERUSER" .encode("utf-8"), ) except pgcon.BackendPrivilegeError: can_make_superusers = False except pgcon.BackendError as e: if e.code_is( errors.ERROR_INTERNAL_ERROR ) and "not in permitted superuser list" in str(e): # DigitalOcean raises a custom error: # XX000: Role ... not in permitted superuser list can_make_superusers = False else: raise else: can_make_superusers = True finally: await conn.sql_execute(b"ROLLBACK") if can_make_superusers: caps |= pgparams.BackendCapabilities.SUPERUSER_ACCESS coll = await conn.sql_fetch_val(b""" SELECT collname FROM pg_collation WHERE lower(replace(collname, '-', '')) = 'c.utf8' LIMIT 1; """) if coll is not None: caps |= pgparams.BackendCapabilities.C_UTF8_LOCALE roles = json.loads(await conn.sql_fetch_val( b""" SELECT json_build_object( 'rolcreaterole', rolcreaterole, 'rolcreatedb', rolcreatedb ) FROM pg_roles WHERE rolname = (SELECT current_user); """, )) if roles['rolcreaterole']: caps |= pgparams.BackendCapabilities.CREATE_ROLE if roles['rolcreatedb']: caps |= pgparams.BackendCapabilities.CREATE_DATABASE stats_ver = await conn.sql_fetch_val(b""" SELECT default_version FROM pg_available_extensions WHERE name = 'edb_stat_statements'; """) if stats_ver in (b"1.0",): caps |= pgparams.BackendCapabilities.STAT_STATEMENTS return caps
, ) except pgcon.BackendPrivilegeError: configfile_access = False else: try: await conn.sql_execute(b
get_remote_pg_cluster._detect_capabilities
python
geldata/gel
edb/server/pgcluster.py
https://github.com/geldata/gel/blob/master/edb/server/pgcluster.py
Apache-2.0
async def get_remote_pg_cluster( dsn: str, *, tenant_id: Optional[str] = None, specified_capabilities: Optional[srvargs.BackendCapabilitySets] = None, ) -> RemoteCluster: from edb.server import pgcon parsed = urllib.parse.urlparse(dsn) ha_backend = None if parsed.scheme not in {'postgresql', 'postgres'}: ha_backend = ha_base.get_backend(parsed) if ha_backend is None: raise ValueError( 'invalid DSN: scheme is expected to be "postgresql", ' '"postgres" or one of the supported HA backend, ' 'got {!r}'.format(parsed.scheme)) addr = await ha_backend.get_cluster_consensus() dsn = 'postgresql://{}:{}'.format(*addr) if parsed.query: # Allow passing through Postgres connection parameters from the HA # backend DSN as "pg" prefixed query strings. For example, an HA # backend DSN with `?pgpassword=123` will result an actual backend # DSN with `?password=123`. They have higher priority than the `PG` # prefixed environment variables like `PGPASSWORD`. pq = urllib.parse.parse_qs(parsed.query, strict_parsing=True) query = {} for k, v in pq.items(): if k.startswith("pg") and k not in ["pghost", "pgport"]: if isinstance(v, list): val = v[-1] else: val = cast(str, v) query[k[2:]] = val if query: dsn += f"?{urllib.parse.urlencode(query)}" if tenant_id is None: t_id = buildmeta.get_default_tenant_id() else: t_id = tenant_id async def _get_cluster_type( conn: pgcon.PGConnection, ) -> Tuple[Type[RemoteCluster], Optional[str]]: managed_clouds = { 'rds_superuser': RemoteCluster, # Amazon RDS 'cloudsqlsuperuser': RemoteCluster, # GCP Cloud SQL 'azure_pg_admin': RemoteCluster, # Azure Postgres } managed_cloud_super = await conn.sql_fetch_val( b""" SELECT rolname FROM pg_roles WHERE rolname IN (SELECT json_array_elements_text($1::json)) LIMIT 1 """, args=[json.dumps(list(managed_clouds)).encode("utf-8")], ) if managed_cloud_super is not None: rolname = managed_cloud_super.decode("utf-8") return managed_clouds[rolname], rolname else: return RemoteCluster, None async def _detect_capabilities( conn: pgcon.PGConnection, ) -> pgparams.BackendCapabilities: from edb.server import pgcon from edb.server.pgcon import errors caps = pgparams.BackendCapabilities.NONE try: cur_cluster_name = await conn.sql_fetch_val( b""" SELECT setting FROM pg_file_settings WHERE setting = 'cluster_name' AND sourcefile = (( SELECT setting FROM pg_settings WHERE name = 'data_directory' ) || '/postgresql.auto.conf') """, ) except pgcon.BackendPrivilegeError: configfile_access = False else: try: await conn.sql_execute(b""" ALTER SYSTEM SET cluster_name = 'edgedb-test' """) except pgcon.BackendPrivilegeError: configfile_access = False except pgcon.BackendError as e: # Stolon keeper symlinks postgresql.auto.conf to /dev/null # making ALTER SYSTEM fail with InternalServerError, # see https://github.com/sorintlab/stolon/pull/343 if 'could not fsync file "postgresql.auto.conf"' in e.args[0]: configfile_access = False else: raise else: configfile_access = True if cur_cluster_name: cn = pgcommon.quote_literal( cur_cluster_name.decode("utf-8")) await conn.sql_execute( f""" ALTER SYSTEM SET cluster_name = {cn} """.encode("utf-8"), ) else: await conn.sql_execute( b""" ALTER SYSTEM SET cluster_name = DEFAULT """, ) if configfile_access: caps |= pgparams.BackendCapabilities.CONFIGFILE_ACCESS await conn.sql_execute(b"START TRANSACTION") rname = str(uuidgen.uuid1mc()) try: await conn.sql_execute( f"CREATE ROLE {pgcommon.quote_ident(rname)} WITH SUPERUSER" .encode("utf-8"), ) except pgcon.BackendPrivilegeError: can_make_superusers = False except pgcon.BackendError as e: if e.code_is( errors.ERROR_INTERNAL_ERROR ) and "not in permitted superuser list" in str(e): # DigitalOcean raises a custom error: # XX000: Role ... not in permitted superuser list can_make_superusers = False else: raise else: can_make_superusers = True finally: await conn.sql_execute(b"ROLLBACK") if can_make_superusers: caps |= pgparams.BackendCapabilities.SUPERUSER_ACCESS coll = await conn.sql_fetch_val(b""" SELECT collname FROM pg_collation WHERE lower(replace(collname, '-', '')) = 'c.utf8' LIMIT 1; """) if coll is not None: caps |= pgparams.BackendCapabilities.C_UTF8_LOCALE roles = json.loads(await conn.sql_fetch_val( b""" SELECT json_build_object( 'rolcreaterole', rolcreaterole, 'rolcreatedb', rolcreatedb ) FROM pg_roles WHERE rolname = (SELECT current_user); """, )) if roles['rolcreaterole']: caps |= pgparams.BackendCapabilities.CREATE_ROLE if roles['rolcreatedb']: caps |= pgparams.BackendCapabilities.CREATE_DATABASE stats_ver = await conn.sql_fetch_val(b""" SELECT default_version FROM pg_available_extensions WHERE name = 'edb_stat_statements'; """) if stats_ver in (b"1.0",): caps |= pgparams.BackendCapabilities.STAT_STATEMENTS return caps async def _get_pg_settings( conn: pgcon.PGConnection, name: str, ) -> str: return await conn.sql_fetch_val( # type: ignore b"SELECT setting FROM pg_settings WHERE name = $1", args=[name.encode("utf-8")], ) async def _get_reserved_connections( conn: pgcon.PGConnection, ) -> int: rv = int( await _get_pg_settings(conn, 'superuser_reserved_connections') ) for name in [ 'rds.rds_superuser_reserved_connections', ]: value = await _get_pg_settings(conn, name) if value: rv += int(value) return rv probe_connection = pgconnparams.ConnectionParams(dsn=dsn) conn = await pgcon.pg_connect( probe_connection, source_description="remote cluster probe", backend_params=pgparams.get_default_runtime_params(), apply_init_script=False ) params = conn.connection addr = conn.addr try: data = json.loads(await conn.sql_fetch_val( b""" SELECT json_build_object( 'user', current_user, 'dbname', current_database(), 'connlimit', ( select rolconnlimit from pg_roles where rolname = current_user ) )""", )) params.update( user=data["user"], database=data["dbname"] ) cluster_type, superuser_name = await _get_cluster_type(conn) max_connections = data["connlimit"] pg_max_connections = await _get_pg_settings(conn, 'max_connections') if max_connections == -1 or not isinstance(max_connections, int): max_connections = pg_max_connections else: max_connections = min(max_connections, pg_max_connections) capabilities = await _detect_capabilities(conn) if ( specified_capabilities is not None and specified_capabilities.must_be_absent ): disabled = [] for cap in specified_capabilities.must_be_absent: if capabilities & cap: capabilities &= ~cap disabled.append(cap) if disabled: logger.info( f"the following backend capabilities are explicitly " f"disabled by server command line: " f"{', '.join(str(cap.name) for cap in disabled)}" ) if t_id != buildmeta.get_default_tenant_id(): # GOTCHA: This tenant_id check cannot protect us from running # multiple EdgeDB servers using the default tenant_id with # different catalog versions on the same backend. However, that # would fail during bootstrap in single-role/database mode. if not capabilities & pgparams.BackendCapabilities.CREATE_ROLE: raise ClusterError( "The remote backend doesn't support CREATE ROLE; " "multi-tenancy is disabled." ) if not capabilities & pgparams.BackendCapabilities.CREATE_DATABASE: raise ClusterError( "The remote backend doesn't support CREATE DATABASE; " "multi-tenancy is disabled." ) pg_ver_string = conn.get_server_parameter_status("server_version") if pg_ver_string is None: raise ClusterError( "remote server did not report its version " "in ParameterStatus") if capabilities & pgparams.BackendCapabilities.CREATE_DATABASE: # If we can create databases, assume we're free to create # extensions in them as well. ext_schema = "edgedbext" existing_exts = {} else: ext_schema = (await conn.sql_fetch_val( b''' SELECT COALESCE( (SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'heroku_ext'), 'edgedbext') ''', )).decode("utf-8") existing_exts_data = await conn.sql_fetch( b""" SELECT extname, nspname FROM pg_extension INNER JOIN pg_namespace ON (pg_extension.extnamespace = pg_namespace.oid) """ ) existing_exts = { r[0].decode("utf-8"): r[1].decode("utf-8") for r in existing_exts_data } instance_params = pgparams.BackendInstanceParams( capabilities=capabilities, version=buildmeta.parse_pg_version(pg_ver_string), base_superuser=superuser_name, max_connections=int(max_connections), reserved_connections=await _get_reserved_connections(conn), tenant_id=t_id, ext_schema=ext_schema, existing_exts=existing_exts, ) finally: conn.terminate() return cluster_type( connection_addr=addr, connection_params=params, instance_params=instance_params, ha_backend=ha_backend, )
, args=[json.dumps(list(managed_clouds)).encode("utf-8")], ) if managed_cloud_super is not None: rolname = managed_cloud_super.decode("utf-8") return managed_clouds[rolname], rolname else: return RemoteCluster, None async def _detect_capabilities( conn: pgcon.PGConnection, ) -> pgparams.BackendCapabilities: from edb.server import pgcon from edb.server.pgcon import errors caps = pgparams.BackendCapabilities.NONE try: cur_cluster_name = await conn.sql_fetch_val( b
get_remote_pg_cluster
python
geldata/gel
edb/server/pgcluster.py
https://github.com/geldata/gel/blob/master/edb/server/pgcluster.py
Apache-2.0
async def get_patch_count(self, conn: pgcon.PGConnection) -> int: """Get the number of applied patches.""" num_patches = await instdata.get_instdata( conn, 'num_patches', 'json') res: int = json.loads(num_patches) if num_patches else 0 return res
Get the number of applied patches.
get_patch_count
python
geldata/gel
edb/server/server.py
https://github.com/geldata/gel/blob/master/edb/server/server.py
Apache-2.0
def get_debug_info(self): """Used to render the /server-info endpoint in dev/test modes. Some tests depend on the exact layout of the returned structure. """ return dict( params=dict( dev_mode=self._devmode, test_mode=self._testmode, default_auth_methods=str(self._default_auth_method_spec), listen_hosts=self._listen_hosts, listen_port=self._listen_port, ), instance_config=config.debug_serialize_config( self._get_sys_config()), compiler_pool=( self._compiler_pool.get_debug_info() if self._compiler_pool else None ), )
Used to render the /server-info endpoint in dev/test modes. Some tests depend on the exact layout of the returned structure.
get_debug_info
python
geldata/gel
edb/server/server.py
https://github.com/geldata/gel/blob/master/edb/server/server.py
Apache-2.0
async def _prepare_patches( self, conn: pgcon.PGConnection ) -> dict[int, bootstrap.PatchEntry]: """Prepare all the patches""" num_patches = await self.get_patch_count(conn) if num_patches < len(pg_patches.PATCHES): logger.info("preparing patches for database upgrade") patches = {} patch_list = list(enumerate(pg_patches.PATCHES)) for num, (kind, patch) in patch_list[num_patches:]: from . import bootstrap # noqa: F402 idx = num_patches + num if not (entry := await self._get_patch_log(conn, idx)): patch_info = await bootstrap.gather_patch_info( num, kind, patch, conn ) entry = bootstrap.prepare_patch( num, kind, patch, self._std_schema, self._refl_schema, self._schema_class_layout, self._tenant.get_backend_runtime_params(), patch_info=patch_info, ) await bootstrap._store_static_bin_cache_conn( conn, f'patch_log_{idx}', pickle.dumps(entry)) patches[num] = entry _, _, updates = entry if 'std_and_reflection_schema' in updates: self._std_schema, self._refl_schema = updates[ 'std_and_reflection_schema'] # +config patches might modify config_spec, which requires # a reload of it from the schema. if '+config' in kind: config_spec = config.load_spec_from_schema(self._std_schema) self._config_settings = config_spec if 'local_intro_query' in updates: self._local_intro_query = updates['local_intro_query'] if 'global_intro_query' in updates: self._global_intro_query = updates['global_intro_query'] if 'classlayout' in updates: self._schema_class_layout = updates['classlayout'] if 'sysqueries' in updates: queries = json.loads(updates['sysqueries']) self._sys_queries = immutables.Map( {k: q.encode() for k, q in queries.items()}) if 'report_configs_typedesc' in updates: self._report_config_typedesc = ( updates['report_configs_typedesc']) return patches
Prepare all the patches
_prepare_patches
python
geldata/gel
edb/server/server.py
https://github.com/geldata/gel/blob/master/edb/server/server.py
Apache-2.0
async def _maybe_apply_patches( self, dbname: str, conn: pgcon.PGConnection, patches: dict[int, bootstrap.PatchEntry], sys: bool=False, ) -> None: """Apply any un-applied patches to the database.""" num_patches = await self.get_patch_count(conn) for num, (sql_b, syssql, keys) in patches.items(): if num_patches <= num: if sys: sql_b += syssql logger.info("applying patch %d to database '%s'", num, dbname) sql = tuple(x.encode('utf-8') for x in sql_b) # For certain things, we need to actually run it # against each user database. if keys.get('is_user_update'): from . import bootstrap kind, patch = pg_patches.PATCHES[num] patch_info = await bootstrap.gather_patch_info( num, kind, patch, conn ) # Reload the compiler state from this database in # particular, so we can compiler from exactly the # right state. (Since self._std_schema and the like might # be further advanced.) state = (await edbcompiler.new_compiler_from_pg(conn)).state assert state.global_intro_query and state.local_intro_query global_schema = self._parse_global_schema( await conn.sql_fetch_val( state.global_intro_query.encode('utf-8')), ) user_schema = self._parse_user_schema( await conn.sql_fetch_val( state.local_intro_query.encode('utf-8')), global_schema, ) entry = bootstrap.prepare_patch( num, kind, patch, state.std_schema, state.refl_schema, state.schema_class_layout, self._tenant.get_backend_runtime_params(), patch_info=patch_info, user_schema=user_schema, global_schema=global_schema, dbname=dbname, ) sql += tuple(x.encode('utf-8') for x in entry[0]) if sql: await conn.sql_execute(sql) logger.info( "finished applying patch %d to database '%s'", num, dbname)
Apply any un-applied patches to the database.
_maybe_apply_patches
python
geldata/gel
edb/server/server.py
https://github.com/geldata/gel/blob/master/edb/server/server.py
Apache-2.0
async def _maybe_patch(self) -> None: """Apply patches to all the databases""" async with self._tenant.use_sys_pgcon() as syscon: patches = await self._prepare_patches(syscon) if not patches: return dbnames = await self.get_dbnames(syscon) async with asyncio.TaskGroup() as g: # Cap the parallelism used when applying patches, to avoid # having huge numbers of in flight patches that make # little visible progress in the logs. sem = asyncio.Semaphore(16) # Patch all the databases for dbname in dbnames: if dbname != defines.EDGEDB_SYSTEM_DB: g.create_task( self._maybe_patch_db(dbname, patches, sem)) # Patch the template db, so that any newly created databases # will have the patches. g.create_task(self._maybe_patch_db( defines.EDGEDB_TEMPLATE_DB, patches, sem)) await self._tenant.ensure_database_not_connected( defines.EDGEDB_TEMPLATE_DB ) # Patch the system db last. The system db needs to go last so # that it only gets updated if all of the other databases have # been succesfully patched. This is important, since we don't check # other databases for patches unless the system db is patched. # # Driving everything from the system db like this lets us # always use the correct schema when compiling patches. async with self._tenant.use_sys_pgcon() as syscon: await self._maybe_apply_patches( defines.EDGEDB_SYSTEM_DB, syscon, patches, sys=True)
Apply patches to all the databases
_maybe_patch
python
geldata/gel
edb/server/server.py
https://github.com/geldata/gel/blob/master/edb/server/server.py
Apache-2.0
async def run_startup_script_and_exit(self): """Run the script specified in *startup_script* and exit immediately""" if self._startup_script is None: raise AssertionError('startup script is not defined') self._compiler_pool = await compiler_pool.create_compiler_pool( **self._get_compiler_args() ) self.compilation_config_serializer = ( await self._compiler_pool.make_compilation_config_serializer() ) try: await binary.run_script( server=self, tenant=self._tenant, database=self._startup_script.database, user=self._startup_script.user, script=self._startup_script.text, ) finally: await self._destroy_compiler_pool()
Run the script specified in *startup_script* and exit immediately
run_startup_script_and_exit
python
geldata/gel
edb/server/server.py
https://github.com/geldata/gel/blob/master/edb/server/server.py
Apache-2.0
def _cleanup_wildcard_addrs( hosts: Sequence[str], ) -> tuple[list[str], list[str], bool, bool]: """Filter out conflicting addresses in presence of INADDR_ANY wildcards. Attempting to bind to 0.0.0.0 (or ::) _and_ a non-wildcard address will usually result in EADDRINUSE. To avoid this, filter out all specific addresses if a wildcard is present in the *hosts* sequence. Returns a tuple: first element is the new list of hosts, second element is a list of rejected host addrs/names. """ ipv4_hosts = set() ipv6_hosts = set() named_hosts = set() ipv4_wc = ipaddress.ip_address('0.0.0.0') ipv6_wc = ipaddress.ip_address('::') for host in hosts: if host == "*": ipv4_hosts.add(ipv4_wc) ipv6_hosts.add(ipv6_wc) continue try: ip = ipaddress.IPv4Address(host) except ValueError: pass else: ipv4_hosts.add(ip) continue try: ip6 = ipaddress.IPv6Address(host) except ValueError: pass else: ipv6_hosts.add(ip6) continue named_hosts.add(host) if not ipv4_hosts and not ipv6_hosts: return (list(hosts), [], False, False) if ipv4_wc not in ipv4_hosts and ipv6_wc not in ipv6_hosts: return (list(hosts), [], False, False) if ipv4_wc in ipv4_hosts and ipv6_wc in ipv6_hosts: return ( ['0.0.0.0', '::'], [ str(a) for a in ((named_hosts | ipv4_hosts | ipv6_hosts) - {ipv4_wc, ipv6_wc}) ], True, True, ) if ipv4_wc in ipv4_hosts: return ( [str(a) for a in ({ipv4_wc} | ipv6_hosts)], [str(a) for a in ((named_hosts | ipv4_hosts) - {ipv4_wc})], True, False, ) if ipv6_wc in ipv6_hosts: return ( [str(a) for a in ({ipv6_wc} | ipv4_hosts)], [str(a) for a in ((named_hosts | ipv6_hosts) - {ipv6_wc})], False, True, ) raise AssertionError('unreachable')
Filter out conflicting addresses in presence of INADDR_ANY wildcards. Attempting to bind to 0.0.0.0 (or ::) _and_ a non-wildcard address will usually result in EADDRINUSE. To avoid this, filter out all specific addresses if a wildcard is present in the *hosts* sequence. Returns a tuple: first element is the new list of hosts, second element is a list of rejected host addrs/names.
_cleanup_wildcard_addrs
python
geldata/gel
edb/server/server.py
https://github.com/geldata/gel/blob/master/edb/server/server.py
Apache-2.0
async def _get_cluster_mode(ctx: BootstrapContext) -> ClusterMode: backend_params = ctx.cluster.get_runtime_params() tenant_id = backend_params.tenant_id # First, check the existence of EDGEDB_SUPERGROUP - the role which is # usually created at the beginning of bootstrap. is_default_tenant = tenant_id == buildmeta.get_default_tenant_id() ignore_others = is_default_tenant and ctx.args.ignore_other_tenants if is_default_tenant: result = await ctx.conn.sql_fetch_col( b""" SELECT r.rolname FROM pg_catalog.pg_roles AS r WHERE r.rolname LIKE ('%' || $1) """, args=[ edbdef.EDGEDB_SUPERGROUP.encode("utf-8"), ], ) else: result = await ctx.conn.sql_fetch_col( b""" SELECT r.rolname FROM pg_catalog.pg_roles AS r WHERE r.rolname = $1 """, args=[ ctx.cluster.get_role_name( edbdef.EDGEDB_SUPERGROUP).encode("utf-8"), ], ) if result: if not ignore_others: # Either our tenant slot is occupied, or there is # a default tenant present. return ClusterMode.regular # We were explicitly asked to ignore the other default tenant, # so check specifically if our tenant slot is occupied and ignore # the others. # This mode is used for in-place upgrade. for rolname in result: other_tenant_id = rolname[: -(len(edbdef.EDGEDB_SUPERGROUP) + 1)] if other_tenant_id == tenant_id.encode("utf-8"): return ClusterMode.regular # Then, check if the current database was bootstrapped in single-db mode. has_instdata = await ctx.conn.sql_fetch_val( trampoline.fixup_query(''' SELECT tablename FROM pg_catalog.pg_tables WHERE schemaname = 'edgedbinstdata_VER' AND tablename = 'instdata' ''').encode('utf-8'), ) if has_instdata: return ClusterMode.single_database # At last, check for single-role-bootstrapped instance by trying to find # the Gel System DB with the assumption that we are not running in # single-db mode. If not found, this is a pristine backend cluster. if is_default_tenant: result = await ctx.conn.sql_fetch_col( b''' SELECT datname FROM pg_database WHERE datname LIKE '%' || $1 ''', args=( edbdef.EDGEDB_SYSTEM_DB.encode("utf-8"), ), ) else: result = await ctx.conn.sql_fetch_col( b''' SELECT datname FROM pg_database WHERE datname = $1 ''', args=( ctx.cluster.get_db_name( edbdef.EDGEDB_SYSTEM_DB).encode("utf-8"), ), ) if result: if not ignore_others: # Either our tenant slot is occupied, or there is # a default tenant present. return ClusterMode.single_role # We were explicitly asked to ignore the other default tenant, # so check specifically if our tenant slot is occupied and ignore # the others. # This mode is used for in-place upgrade. for dbname in result: other_tenant_id = dbname[: -(len(edbdef.EDGEDB_SYSTEM_DB) + 1)] if other_tenant_id == tenant_id.encode("utf-8"): return ClusterMode.single_role return ClusterMode.pristine
, args=[ edbdef.EDGEDB_SUPERGROUP.encode("utf-8"), ], ) else: result = await ctx.conn.sql_fetch_col( b
_get_cluster_mode
python
geldata/gel
edb/server/bootstrap.py
https://github.com/geldata/gel/blob/master/edb/server/bootstrap.py
Apache-2.0
async def gather_patch_info( num: int, kind: str, patch: str, conn: pgcon.PGConnection, ) -> Optional[dict[str, list[str]]]: """Fetch info for a patch that needs to use the connection. Currently, the only thing we need is, for config updates, the order that columns appear in the config views in SQL. We need this because we need to preserve that order when we update the view. """ if '+config' in kind: # Find all the config views (they are pg_classes where # there is also a table with the same name but "_dummy" # at the end) and collect all their columns in order. return json.loads(await conn.sql_fetch_val('''\ select json_object_agg(v.relname, ( select json_agg(a.attname order by a.attnum) from pg_catalog.pg_attribute as a where v.oid = a.attrelid )) from pg_catalog.pg_class as v inner join pg_catalog.pg_tables as t on v.relname || '_dummy' = t.tablename '''.encode('utf-8'))) else: return None
Fetch info for a patch that needs to use the connection. Currently, the only thing we need is, for config updates, the order that columns appear in the config views in SQL. We need this because we need to preserve that order when we update the view.
gather_patch_info
python
geldata/gel
edb/server/bootstrap.py
https://github.com/geldata/gel/blob/master/edb/server/bootstrap.py
Apache-2.0
def prepare_patch( num: int, kind: str, patch: str, schema: s_schema.Schema, reflschema: s_schema.Schema, schema_class_layout: s_refl.SchemaClassLayout, backend_params: params.BackendRuntimeParams, patch_info: Optional[dict[str, list[str]]], user_schema: Optional[s_schema.Schema]=None, global_schema: Optional[s_schema.Schema]=None, *, dbname: Optional[str]=None, ) -> PatchEntry: val = f'{pg_common.quote_literal(json.dumps(num + 1))}::jsonb' # TODO: This is an INSERT because 2.0 shipped without num_patches. # We can just make this an UPDATE for 3.0 update = trampoline.fixup_query(f"""\ INSERT INTO edgedbinstdata_VER.instdata (key, json) VALUES('num_patches', {val}) ON CONFLICT (key) DO UPDATE SET json = {val}; """) existing_view_columns = patch_info if '+testmode' in kind: if schema.get('cfg::TestSessionConfig', default=None): kind = kind.replace('+testmode', '') else: return (update,), (), {} # Pure SQL patches are simple if kind == 'sql': return (patch, update), (), {} # metaschema-sql: just recreate a function from metaschema if kind == 'metaschema-sql': func = getattr(metaschema, patch) create = dbops.CreateFunction(func(), or_replace=True) block = dbops.PLTopBlock() create.generate(block) return (block.to_string(), update), (), {} if kind == 'repair': assert not patch if not user_schema: return (update,), (), dict(is_user_update=True) assert global_schema # TODO: Implement the last-repair-only optimization? try: logger.info("repairing database '%s'", dbname) sql = prepare_repair_patch( schema, reflschema, user_schema, global_schema, schema_class_layout, backend_params ) except errors.EdgeDBError as e: if isinstance(e, errors.InternalServerError): raise raise errors.SchemaError( f'Could not repair schema inconsistencies in ' f'database branch "{dbname}". Probably the schema is ' f'no longer valid due to a bug fix.\n' f'Downgrade to the last working version, fix ' f'the schema issue, and try again.' ) from e return (update, sql), (), {} # EdgeQL and reflection schema patches need to be compiled. current_block = dbops.PLTopBlock() preblock = current_block.add_block() subblock = current_block.add_block() std_plans = [] updates: dict[str, Any] = {} global_schema_update = kind == 'ext-pkg' sys_update_only = global_schema_update or kind.endswith('+globalonly') if kind == 'ext-pkg': # N.B: We process this without actually having the global # schema present, so we don't do any check for if it already # exists. The backend code will overwrite an older version's # JSON in the global metadata if it was already present. patch = s_std.get_std_module_text(sn.UnqualName(f'ext/{patch}')) if ( kind == 'edgeql' or kind == 'ext-pkg' or kind.startswith('edgeql+schema') ): assert '+user_ext' not in kind for ddl_cmd in edgeql.parse_block(patch): assert isinstance(ddl_cmd, qlast.DDLCommand) # First apply it to the regular schema, just so we can update # stdschema delta_command = s_ddl.delta_from_ddl( ddl_cmd, modaliases={}, schema=schema, stdmode=True) schema, _, _ = _process_delta_params( delta_command, schema, backend_params) # We need to extract all ids of new objects created when # applying it to the regular schema, so that we can make sure # to use the same ids in the reflschema. schema_object_ids = _get_schema_object_ids(delta_command) # Then apply it to the reflschema, which we will use to drive # the actual table updating. delta_command = s_ddl.delta_from_ddl( ddl_cmd, modaliases={}, schema=reflschema, schema_object_ids=schema_object_ids, stdmode=True) reflschema, plan, tplan = _process_delta_params( delta_command, reflschema, backend_params) std_plans.append(delta_command) plan.generate(subblock) tplan.generate(subblock) metadata_user_schema = reflschema elif kind.startswith('edgeql+user_ext'): assert '+schema' not in kind # There isn't anything to do on the system database for # userext updates. if user_schema is None: return (update,), (), dict(is_user_update=True) # Only run a userext update if the extension we are trying to # update is installed. extension_name = kind.split('|')[-1] extension = user_schema.get_global( s_exts.Extension, extension_name, default=None) if not extension: return (update,), (), {} assert global_schema cschema = s_schema.ChainedSchema( schema, user_schema, global_schema, ) for ddl_cmd in edgeql.parse_block(patch): assert isinstance(ddl_cmd, qlast.DDLCommand) delta_command = s_ddl.delta_from_ddl( ddl_cmd, modaliases={}, schema=cschema, stdmode=False, testmode=True, ) cschema, plan, tplan = _process_delta_params( delta_command, cschema, backend_params) std_plans.append(delta_command) plan.generate(subblock) tplan.generate(subblock) if '+config' in kind: views = metaschema.get_config_views(cschema, existing_view_columns) views.generate(subblock) metadata_user_schema = cschema.get_top_schema() elif kind == 'sql-introspection': support_view_commands = dbops.CommandGroup() support_view_commands.add_commands( metaschema._generate_sql_information_schema( backend_params.instance_params.version ) ) support_view_commands.generate(subblock) _generate_drop_views(list(support_view_commands), preblock) metadata_user_schema = reflschema else: raise AssertionError(f'unknown patch type {kind}') if kind.startswith('edgeql+schema'): # If we are modifying the schema layout, we need to rerun # generate_structure to collect schema changes not reflected # in the public schema and to discover the new introspection # query. reflection = s_refl.generate_structure( reflschema, make_funcs=False, ) reflschema, plan, tplan = _process_delta_params( reflection.intro_schema_delta, reflschema, backend_params) plan.generate(subblock) tplan.generate(subblock) compiler = edbcompiler.new_compiler( std_schema=schema, reflection_schema=reflschema, schema_class_layout=schema_class_layout ) local_intro_sql, global_intro_sql = compile_intro_queries_stdlib( compiler=compiler, user_schema=reflschema, reflection=reflection, ) updates.update(dict( classlayout=reflection.class_layout, local_intro_query=local_intro_sql.encode('utf-8'), global_intro_query=global_intro_sql.encode('utf-8'), )) # This part is wildly hinky # We need to delete all the support views and recreate them at the end support_view_commands = dbops.CommandGroup() support_view_commands.add_commands([ dbops.CreateView(view) for view in metaschema._generate_schema_alias_views( reflschema, sn.UnqualName('schema') ) + metaschema._generate_schema_alias_views( reflschema, sn.UnqualName('sys') ) ]) support_view_commands.add_commands( metaschema._generate_sql_information_schema( backend_params.instance_params.version ) ) wrapper_views = metaschema._get_wrapper_views() support_view_commands.add_commands(list(wrapper_views)) trampolines = metaschema.trampoline_command(wrapper_views) _generate_drop_views( tuple(support_view_commands) + tuple(trampolines), preblock, ) # Now add the trampolines to support_view_commands support_view_commands.add_commands([t.make() for t in trampolines]) # We want to limit how much unconditional work we do, so only recreate # extension views if requested. if '+exts' in kind: for extview in metaschema._generate_extension_views(reflschema): support_view_commands.add_command( dbops.CreateView(extview, or_replace=True)) # Though we always update the instdata for the config system, # because it is currently the most convenient way to make sure # all the versioned fields get updated. config_spec = config.load_spec_from_schema(schema) # Similarly, only do config system updates if requested. if '+config' in kind: support_view_commands.add_command( metaschema.get_config_views(schema, existing_view_columns)) support_view_commands.add_command( metaschema._get_regenerated_config_support_functions( config_spec ) ) ( sysqueries, report_configs_typedesc_1_0, report_configs_typedesc_2_0, ) = compile_sys_queries( reflschema, compiler, config_spec, ) updates.update(dict( sysqueries=json.dumps(sysqueries).encode('utf-8'), report_configs_typedesc_1_0=report_configs_typedesc_1_0, report_configs_typedesc_2_0=report_configs_typedesc_2_0, configspec=config.spec_to_json(config_spec).encode('utf-8'), )) support_view_commands.generate(subblock) compiler = edbcompiler.new_compiler( std_schema=schema, reflection_schema=reflschema, schema_class_layout=schema_class_layout ) compilerctx = edbcompiler.new_compiler_context( compiler_state=compiler.state, user_schema=metadata_user_schema, bootstrap_mode=user_schema is None, ) for std_plan in std_plans: edbcompiler.compile_schema_storage_in_delta( ctx=compilerctx, delta=std_plan, block=subblock, ) patch = current_block.to_string() if debug.flags.delta_execute: debug.header('Patch Script') debug.dump_code(patch, lexer='sql') if not global_schema_update: updates.update(dict( std_and_reflection_schema=(schema, reflschema), )) bins = ( 'std_and_reflection_schema', 'global_schema', 'classlayout', 'report_configs_typedesc_1_0', 'report_configs_typedesc_2_0', ) rawbin = ( 'report_configs_typedesc_1_0', 'report_configs_typedesc_2_0', ) jsons = ( 'sysqueries', 'configspec', ) # This is unversioned because it is consumed by a function in metaschema. # (And only by a function in metaschema.) unversioned = ( 'configspec', ) # Just for the system database, we need to update the cached pickle # of everything. version_key = patches.get_version_key(num + 1) sys_updates: tuple[str, ...] = () spatches: tuple[str, ...] = (patch,) for k, v in updates.items(): key = f"'{k}{version_key}'" if k not in unversioned else f"'{k}'" if k in bins: if k not in rawbin: v = pickle.dumps(v, protocol=pickle.HIGHEST_PROTOCOL) val = f'{pg_common.quote_bytea_literal(v)}' sys_updates += (trampoline.fixup_query(f''' INSERT INTO edgedbinstdata_VER.instdata (key, bin) VALUES({key}, {val}) ON CONFLICT (key) DO UPDATE SET bin = {val}; '''),) else: typ, col = ('jsonb', 'json') if k in jsons else ('text', 'text') val = f'{pg_common.quote_literal(v.decode("utf-8"))}::{typ}' sys_updates += (trampoline.fixup_query(f''' INSERT INTO edgedbinstdata_VER.instdata (key, {col}) VALUES({key}, {val}) ON CONFLICT (key) DO UPDATE SET {col} = {val}; '''),) if k in unversioned: spatches += (sys_updates[-1],) # If we're updating the global schema (for extension packages, # perhaps), only run the script once, on the system connection. # Since the state is global, we only should update it once. regular_updates: tuple[str, ...] if sys_update_only: regular_updates = (update,) sys_updates = (patch,) + sys_updates else: regular_updates = spatches + (update,) # FIXME: This is a hack to make the is_user_update cases # work (by ensuring we can always read their current state), # but this is actually a pretty dumb approach and we can do # better. regular_updates += sys_updates return regular_updates, sys_updates, updates
) existing_view_columns = patch_info if '+testmode' in kind: if schema.get('cfg::TestSessionConfig', default=None): kind = kind.replace('+testmode', '') else: return (update,), (), {} # Pure SQL patches are simple if kind == 'sql': return (patch, update), (), {} # metaschema-sql: just recreate a function from metaschema if kind == 'metaschema-sql': func = getattr(metaschema, patch) create = dbops.CreateFunction(func(), or_replace=True) block = dbops.PLTopBlock() create.generate(block) return (block.to_string(), update), (), {} if kind == 'repair': assert not patch if not user_schema: return (update,), (), dict(is_user_update=True) assert global_schema # TODO: Implement the last-repair-only optimization? try: logger.info("repairing database '%s'", dbname) sql = prepare_repair_patch( schema, reflschema, user_schema, global_schema, schema_class_layout, backend_params ) except errors.EdgeDBError as e: if isinstance(e, errors.InternalServerError): raise raise errors.SchemaError( f'Could not repair schema inconsistencies in ' f'database branch "{dbname}". Probably the schema is ' f'no longer valid due to a bug fix.\n' f'Downgrade to the last working version, fix ' f'the schema issue, and try again.' ) from e return (update, sql), (), {} # EdgeQL and reflection schema patches need to be compiled. current_block = dbops.PLTopBlock() preblock = current_block.add_block() subblock = current_block.add_block() std_plans = [] updates: dict[str, Any] = {} global_schema_update = kind == 'ext-pkg' sys_update_only = global_schema_update or kind.endswith('+globalonly') if kind == 'ext-pkg': # N.B: We process this without actually having the global # schema present, so we don't do any check for if it already # exists. The backend code will overwrite an older version's # JSON in the global metadata if it was already present. patch = s_std.get_std_module_text(sn.UnqualName(f'ext/{patch}')) if ( kind == 'edgeql' or kind == 'ext-pkg' or kind.startswith('edgeql+schema') ): assert '+user_ext' not in kind for ddl_cmd in edgeql.parse_block(patch): assert isinstance(ddl_cmd, qlast.DDLCommand) # First apply it to the regular schema, just so we can update # stdschema delta_command = s_ddl.delta_from_ddl( ddl_cmd, modaliases={}, schema=schema, stdmode=True) schema, _, _ = _process_delta_params( delta_command, schema, backend_params) # We need to extract all ids of new objects created when # applying it to the regular schema, so that we can make sure # to use the same ids in the reflschema. schema_object_ids = _get_schema_object_ids(delta_command) # Then apply it to the reflschema, which we will use to drive # the actual table updating. delta_command = s_ddl.delta_from_ddl( ddl_cmd, modaliases={}, schema=reflschema, schema_object_ids=schema_object_ids, stdmode=True) reflschema, plan, tplan = _process_delta_params( delta_command, reflschema, backend_params) std_plans.append(delta_command) plan.generate(subblock) tplan.generate(subblock) metadata_user_schema = reflschema elif kind.startswith('edgeql+user_ext'): assert '+schema' not in kind # There isn't anything to do on the system database for # userext updates. if user_schema is None: return (update,), (), dict(is_user_update=True) # Only run a userext update if the extension we are trying to # update is installed. extension_name = kind.split('|')[-1] extension = user_schema.get_global( s_exts.Extension, extension_name, default=None) if not extension: return (update,), (), {} assert global_schema cschema = s_schema.ChainedSchema( schema, user_schema, global_schema, ) for ddl_cmd in edgeql.parse_block(patch): assert isinstance(ddl_cmd, qlast.DDLCommand) delta_command = s_ddl.delta_from_ddl( ddl_cmd, modaliases={}, schema=cschema, stdmode=False, testmode=True, ) cschema, plan, tplan = _process_delta_params( delta_command, cschema, backend_params) std_plans.append(delta_command) plan.generate(subblock) tplan.generate(subblock) if '+config' in kind: views = metaschema.get_config_views(cschema, existing_view_columns) views.generate(subblock) metadata_user_schema = cschema.get_top_schema() elif kind == 'sql-introspection': support_view_commands = dbops.CommandGroup() support_view_commands.add_commands( metaschema._generate_sql_information_schema( backend_params.instance_params.version ) ) support_view_commands.generate(subblock) _generate_drop_views(list(support_view_commands), preblock) metadata_user_schema = reflschema else: raise AssertionError(f'unknown patch type {kind}') if kind.startswith('edgeql+schema'): # If we are modifying the schema layout, we need to rerun # generate_structure to collect schema changes not reflected # in the public schema and to discover the new introspection # query. reflection = s_refl.generate_structure( reflschema, make_funcs=False, ) reflschema, plan, tplan = _process_delta_params( reflection.intro_schema_delta, reflschema, backend_params) plan.generate(subblock) tplan.generate(subblock) compiler = edbcompiler.new_compiler( std_schema=schema, reflection_schema=reflschema, schema_class_layout=schema_class_layout ) local_intro_sql, global_intro_sql = compile_intro_queries_stdlib( compiler=compiler, user_schema=reflschema, reflection=reflection, ) updates.update(dict( classlayout=reflection.class_layout, local_intro_query=local_intro_sql.encode('utf-8'), global_intro_query=global_intro_sql.encode('utf-8'), )) # This part is wildly hinky # We need to delete all the support views and recreate them at the end support_view_commands = dbops.CommandGroup() support_view_commands.add_commands([ dbops.CreateView(view) for view in metaschema._generate_schema_alias_views( reflschema, sn.UnqualName('schema') ) + metaschema._generate_schema_alias_views( reflschema, sn.UnqualName('sys') ) ]) support_view_commands.add_commands( metaschema._generate_sql_information_schema( backend_params.instance_params.version ) ) wrapper_views = metaschema._get_wrapper_views() support_view_commands.add_commands(list(wrapper_views)) trampolines = metaschema.trampoline_command(wrapper_views) _generate_drop_views( tuple(support_view_commands) + tuple(trampolines), preblock, ) # Now add the trampolines to support_view_commands support_view_commands.add_commands([t.make() for t in trampolines]) # We want to limit how much unconditional work we do, so only recreate # extension views if requested. if '+exts' in kind: for extview in metaschema._generate_extension_views(reflschema): support_view_commands.add_command( dbops.CreateView(extview, or_replace=True)) # Though we always update the instdata for the config system, # because it is currently the most convenient way to make sure # all the versioned fields get updated. config_spec = config.load_spec_from_schema(schema) # Similarly, only do config system updates if requested. if '+config' in kind: support_view_commands.add_command( metaschema.get_config_views(schema, existing_view_columns)) support_view_commands.add_command( metaschema._get_regenerated_config_support_functions( config_spec ) ) ( sysqueries, report_configs_typedesc_1_0, report_configs_typedesc_2_0, ) = compile_sys_queries( reflschema, compiler, config_spec, ) updates.update(dict( sysqueries=json.dumps(sysqueries).encode('utf-8'), report_configs_typedesc_1_0=report_configs_typedesc_1_0, report_configs_typedesc_2_0=report_configs_typedesc_2_0, configspec=config.spec_to_json(config_spec).encode('utf-8'), )) support_view_commands.generate(subblock) compiler = edbcompiler.new_compiler( std_schema=schema, reflection_schema=reflschema, schema_class_layout=schema_class_layout ) compilerctx = edbcompiler.new_compiler_context( compiler_state=compiler.state, user_schema=metadata_user_schema, bootstrap_mode=user_schema is None, ) for std_plan in std_plans: edbcompiler.compile_schema_storage_in_delta( ctx=compilerctx, delta=std_plan, block=subblock, ) patch = current_block.to_string() if debug.flags.delta_execute: debug.header('Patch Script') debug.dump_code(patch, lexer='sql') if not global_schema_update: updates.update(dict( std_and_reflection_schema=(schema, reflschema), )) bins = ( 'std_and_reflection_schema', 'global_schema', 'classlayout', 'report_configs_typedesc_1_0', 'report_configs_typedesc_2_0', ) rawbin = ( 'report_configs_typedesc_1_0', 'report_configs_typedesc_2_0', ) jsons = ( 'sysqueries', 'configspec', ) # This is unversioned because it is consumed by a function in metaschema. # (And only by a function in metaschema.) unversioned = ( 'configspec', ) # Just for the system database, we need to update the cached pickle # of everything. version_key = patches.get_version_key(num + 1) sys_updates: tuple[str, ...] = () spatches: tuple[str, ...] = (patch,) for k, v in updates.items(): key = f"'{k}{version_key}'" if k not in unversioned else f"'{k}'" if k in bins: if k not in rawbin: v = pickle.dumps(v, protocol=pickle.HIGHEST_PROTOCOL) val = f'{pg_common.quote_bytea_literal(v)}' sys_updates += (trampoline.fixup_query(f
prepare_patch
python
geldata/gel
edb/server/bootstrap.py
https://github.com/geldata/gel/blob/master/edb/server/bootstrap.py
Apache-2.0
async def create_branch( cluster: pgcluster.BaseCluster, schema: s_schema.Schema, conn: metaschema.PGConnection, src_dbname: str, tgt_dbname: str, mode: str, backend_id_fixup_sql: bytes, ) -> None: """Create a new database (branch) based on an existing one.""" # Dump the edgedbpub schema that holds user data and any extensions. schema_dump = await cluster.dump_database( src_dbname, include_schemas=('edgedbpub',), include_extensions=('*',), schema_only=True, ) # Tuples types are always kept in edgedbpub, but some already # exist from the std schema, so we need to skip those. We also # need to skip recreating the schema. This requires doing some # annoying postprocessing. to_skip = [ str(obj.id) for obj in schema.get_objects(type=s_types.Tuple) ] old_lines = schema_dump.decode('utf-8').split('\n') new_lines = [] skipping = False for line in old_lines: if line == ');' and skipping: skipping = False continue elif line.startswith('CREATE SCHEMA'): continue elif line.startswith('CREATE TYPE'): if any(skip in line for skip in to_skip): skipping = True elif line == 'SET transaction_timeout = 0;': continue if skipping: continue new_lines.append(line) s_schema_dump = '\n'.join(new_lines) await conn.sql_execute(s_schema_dump.encode('utf-8')) # Copy database config variables over directly copy_cfg_query = f''' select edgedb._copy_database_configs( {pg_common.quote_literal(src_dbname)}) '''.encode('utf-8') await conn.sql_execute(copy_cfg_query) # HACK: Empty out all schema multi property tables. This is # because the original template has the stdschema in it, and so we # use --on-conflict-do-nothing to avoid conflicts since the dump # will have that in it too. That works, except for multi properties # where it won't conflict, and modules, which might have a different # 'default' module on each side. (Since it isn't in the stdschema, # and could have an old id persisted from an in-place upgrade.) to_delete: set[s_obj.Object] = { prop for prop in schema.get_objects(type=s_props.Property) if prop.get_cardinality(schema).is_multi() and prop.get_name(schema).module not in irtyputils.VIEW_MODULES } to_delete.add(schema.get('schema::Module')) for target in to_delete: name = pg_common.get_backend_name(schema, target, catenate=True) await conn.sql_execute(f'delete from {name}'.encode('utf-8')) await conn.sql_execute(trampoline.fixup_query(f''' delete from edgedbinstdata_VER.instdata where key = 'configspec_ext' ''').encode('utf-8')) # Do the dump/restore for the data. We always need to copy over # edgedbstd, since it has the reflected schema. We copy over # edgedbpub when it is a data branch. data_arg = ['--table=edgedbpub.*'] if mode == qlast.BranchType.DATA else [] dump_args = [ '--data-only', '--table=edgedbstd.*', f'--table={pg_common.versioned_schema("edgedbstd")}.*', '--table=edgedb._db_config', f'--table={pg_common.versioned_schema("edgedbinstdata")}.instdata', *data_arg, '--disable-triggers', # We need to use --inserts so that we can use --on-conflict-do-nothing. # (See above, in discussion of the HACK.) '--inserts', '--rows-per-insert=100', '--on-conflict-do-nothing', ] await cluster._copy_database( src_dbname, tgt_dbname, dump_args, [], ) # Restore the search_path as the dump might have altered it. await conn.sql_execute( b"SELECT pg_catalog.set_config('search_path', 'edgedb', false)") # Fixup the backend ids in the schema to match what is actually in pg. await conn.sql_execute(backend_id_fixup_sql)
Create a new database (branch) based on an existing one.
create_branch
python
geldata/gel
edb/server/bootstrap.py
https://github.com/geldata/gel/blob/master/edb/server/bootstrap.py
Apache-2.0
def compile_intro_queries_stdlib( *, compiler: edbcompiler.Compiler, user_schema: s_schema.Schema, global_schema: s_schema.Schema=s_schema.EMPTY_SCHEMA, reflection: s_refl.SchemaReflectionParts, ) -> Tuple[str, str]: compilerctx = edbcompiler.new_compiler_context( compiler_state=compiler.state, user_schema=user_schema, global_schema=global_schema, schema_reflection_mode=True, output_format=edbcompiler.OutputFormat.JSON_ELEMENTS, ) # The introspection query bits are returned in chunks # because it's a large UNION and we currently generate SQL # that is much harder for Postgres to plan as opposed to a # straight flat UNION. sql_intro_local_parts = [] sql_intro_global_parts = [] for intropart in reflection.local_intro_parts: sql_intro_local_parts.append( compile_single_query( intropart, compilerctx=compilerctx, ), ) for intropart in reflection.global_intro_parts: sql_intro_global_parts.append( compile_single_query( intropart, compilerctx=compilerctx, ), ) local_intro_sql = ' UNION ALL '.join( f'({x})' for x in sql_intro_local_parts) local_intro_sql = f''' WITH intro(c) AS ({local_intro_sql}) SELECT json_agg(intro.c) FROM intro ''' global_intro_sql = ' UNION ALL '.join( f'({x})' for x in sql_intro_global_parts) global_intro_sql = f''' WITH intro(c) AS ({global_intro_sql}) SELECT json_agg(intro.c) FROM intro ''' return local_intro_sql, global_intro_sql
global_intro_sql = ' UNION ALL '.join( f'({x})' for x in sql_intro_global_parts) global_intro_sql = f
compile_intro_queries_stdlib
python
geldata/gel
edb/server/bootstrap.py
https://github.com/geldata/gel/blob/master/edb/server/bootstrap.py
Apache-2.0
def compile_sys_queries( schema: s_schema.Schema, compiler: edbcompiler.Compiler, config_spec: config.Spec, ) -> tuple[dict[str, str], bytes, bytes]: queries = {} _, sql = compile_bootstrap_script( compiler, schema, 'SELECT cfg::_get_config_json_internal()', expected_cardinality_one=True, ) queries['config'] = sql _, sql = compile_bootstrap_script( compiler, schema, "SELECT cfg::_get_config_json_internal(sources := ['database'])", expected_cardinality_one=True, ) queries['dbconfig'] = sql _, sql = compile_bootstrap_script( compiler, schema, """ SELECT cfg::_get_config_json_internal(max_source := 'system override') """, expected_cardinality_one=True, ) queries['sysconfig'] = sql _, sql = compile_bootstrap_script( compiler, schema, """ SELECT cfg::_get_config_json_internal(max_source := 'postgres client') """, expected_cardinality_one=True, ) queries['sysconfig_default'] = sql _, sql = compile_bootstrap_script( compiler, schema, f"""SELECT ( SELECT sys::Branch FILTER .name != "{edbdef.EDGEDB_TEMPLATE_DB}" ).name""", expected_cardinality_one=False, ) queries['listdbs'] = sql role_query = ''' SELECT sys::Role { name, superuser, password, }; ''' _, sql = compile_bootstrap_script( compiler, schema, role_query, expected_cardinality_one=False, ) queries['roles'] = sql tids_query = ''' SELECT schema::ScalarType { id, backend_id, } FILTER .id IN <uuid>json_array_unpack(<json>$ids); ''' _, sql = compile_bootstrap_script( compiler, schema, tids_query, expected_cardinality_one=False, ) queries['backend_tids'] = sql # When we restore a database from a dump, OIDs for non-system # Postgres types might get skewed as they are not part of the dump. # A good example of that is `std::bigint` which is implemented as # a custom domain type. The OIDs are stored under # `schema::Object.backend_id` property and are injected into # array query arguments. # # The code below re-syncs backend_id properties of Gel builtin # types with the actual OIDs in the DB. backend_id_fixup_edgeql = ''' UPDATE schema::ScalarType FILTER NOT (.abstract ?? False) AND NOT (.transient ?? False) SET { backend_id := sys::_get_pg_type_for_edgedb_type( .id, .__type__.name, <uuid>{}, [is schema::ScalarType].sql_type ?? ( select [is schema::ScalarType] .bases[is schema::ScalarType] limit 1 ).sql_type, ) }; UPDATE schema::Tuple FILTER NOT (.abstract ?? False) AND NOT (.transient ?? False) SET { backend_id := sys::_get_pg_type_for_edgedb_type( .id, .__type__.name, <uuid>{}, [is schema::ScalarType].sql_type ?? ( select [is schema::ScalarType] .bases[is schema::ScalarType] limit 1 ).sql_type, ) }; UPDATE {schema::Range, schema::MultiRange} FILTER NOT (.abstract ?? False) AND NOT (.transient ?? False) SET { backend_id := sys::_get_pg_type_for_edgedb_type( .id, .__type__.name, .element_type.id, <str>{}, ) }; UPDATE schema::Array FILTER NOT (.abstract ?? False) AND NOT (.transient ?? False) SET { backend_id := sys::_get_pg_type_for_edgedb_type( .id, .__type__.name, .element_type.id, <str>{}, ) }; ''' _, sql = compile_bootstrap_script( compiler, schema, backend_id_fixup_edgeql, ) queries['backend_id_fixup'] = sql report_settings: list[str] = [] for setname in config_spec: setting = config_spec[setname] if setting.report: report_settings.append(setname) report_configs_query = f''' SELECT assert_single(cfg::Config {{ {', '.join(report_settings)} }}); ''' units = edbcompiler.compile( ctx=edbcompiler.new_compiler_context( compiler_state=compiler.state, user_schema=schema, expected_cardinality_one=True, json_parameters=False, output_format=edbcompiler.OutputFormat.BINARY, bootstrap_mode=True, ), source=edgeql.Source.from_string(report_configs_query), ).units assert len(units) == 1 report_configs_typedesc_2_0 = units[0].out_type_id + units[0].out_type_data queries['report_configs'] = units[0].sql.decode() units = edbcompiler.compile( ctx=edbcompiler.new_compiler_context( compiler_state=compiler.state, user_schema=schema, expected_cardinality_one=True, json_parameters=False, output_format=edbcompiler.OutputFormat.BINARY, bootstrap_mode=True, protocol_version=(1, 0), ), source=edgeql.Source.from_string(report_configs_query), ).units assert len(units) == 1 report_configs_typedesc_1_0 = units[0].out_type_id + units[0].out_type_data return ( queries, report_configs_typedesc_1_0, report_configs_typedesc_2_0, )
SELECT cfg::_get_config_json_internal(max_source := 'system override') """, expected_cardinality_one=True, ) queries['sysconfig'] = sql _, sql = compile_bootstrap_script( compiler, schema,
compile_sys_queries
python
geldata/gel
edb/server/bootstrap.py
https://github.com/geldata/gel/blob/master/edb/server/bootstrap.py
Apache-2.0
async def _check_catalog_compatibility( ctx: BootstrapContext, ) -> PGConnectionProxy: tenant_id = ctx.cluster.get_runtime_params().tenant_id if ctx.mode == ClusterMode.single_database: sys_db = await ctx.conn.sql_fetch_val( trampoline.fixup_query(""" SELECT current_database() FROM edgedbinstdata_VER.instdata WHERE key = $1 AND json->>'tenant_id' = $2 """).encode('utf-8'), args=[ f"{edbdef.EDGEDB_TEMPLATE_DB}metadata".encode("utf-8"), tenant_id.encode("utf-8"), ], ) else: is_default_tenant = tenant_id == buildmeta.get_default_tenant_id() if is_default_tenant: sys_db = await ctx.conn.sql_fetch_val( b""" SELECT datname FROM pg_database WHERE datname LIKE '%' || $1 ORDER BY datname = $1, datname DESC LIMIT 1 """, args=[ edbdef.EDGEDB_SYSTEM_DB.encode("utf-8"), ], ) else: sys_db = await ctx.conn.sql_fetch_val( b""" SELECT datname FROM pg_database WHERE datname = $1 """, args=[ ctx.cluster.get_db_name( edbdef.EDGEDB_SYSTEM_DB).encode("utf-8"), ], ) if not sys_db: raise errors.ConfigurationError( 'database instance is corrupt', details=( f'The database instance does not appear to have been fully ' f'initialized or has been corrupted.' ) ) conn = PGConnectionProxy( ctx.cluster, source_description="_check_catalog_compatibility", dbname=sys_db.decode("utf-8") ) try: # versioned=False so we can properly fail on version/catalog mismatches. instancedata = await _get_instance_data(conn, versioned=False) datadir_version = instancedata.get('version') if datadir_version: datadir_major = datadir_version.get('major') expected_ver = buildmeta.get_version() datadir_catver = instancedata.get('catver') expected_catver = edbdef.EDGEDB_CATALOG_VERSION status = dict( data_catalog_version=datadir_catver, expected_catalog_version=expected_catver, ) if datadir_major != expected_ver.major: for status_sink in ctx.args.status_sinks: status_sink(f'INCOMPATIBLE={json.dumps(status)}') raise errors.ConfigurationError( 'database instance incompatible with this version of Gel', details=( f'The database instance was initialized with ' f'Gel version {datadir_major}, ' f'which is incompatible with this version ' f'{expected_ver.major}' ), hint=( f'You need to either recreate the instance and upgrade ' f'using dump/restore, or do an inplace upgrade.' ) ) if datadir_catver != expected_catver: for status_sink in ctx.args.status_sinks: status_sink(f'INCOMPATIBLE={json.dumps(status)}') raise errors.ConfigurationError( 'database instance incompatible with this version of Gel', details=( f'The database instance was initialized with ' f'Gel format version {datadir_catver}, ' f'but this version of the server expects ' f'format version {expected_catver}' ), hint=( f'You need to either recreate the instance and upgrade ' f'using dump/restore, or do an inplace upgrade.' ) ) except Exception: conn.terminate() raise return conn
).encode('utf-8'), args=[ f"{edbdef.EDGEDB_TEMPLATE_DB}metadata".encode("utf-8"), tenant_id.encode("utf-8"), ], ) else: is_default_tenant = tenant_id == buildmeta.get_default_tenant_id() if is_default_tenant: sys_db = await ctx.conn.sql_fetch_val( b
_check_catalog_compatibility
python
geldata/gel
edb/server/bootstrap.py
https://github.com/geldata/gel/blob/master/edb/server/bootstrap.py
Apache-2.0
async def ensure_bootstrapped( cluster: pgcluster.BaseCluster, args: edbargs.ServerConfig, ) -> tuple[bool, edbcompiler.Compiler]: """Bootstraps Gel instance if it hasn't been bootstrapped already. Returns True if bootstrap happened and False if the instance was already bootstrapped, along with the bootstrap compiler state. """ pgconn = PGConnectionProxy( cluster, source_description="ensure_bootstrapped" ) ctx = BootstrapContext(cluster=cluster, conn=pgconn, args=args) try: mode = await _get_cluster_mode(ctx) ctx = dataclasses.replace(ctx, mode=mode) if mode == ClusterMode.pristine: compiler = await _bootstrap(ctx) return True, compiler else: compiler = await _start(ctx) return False, compiler finally: pgconn.terminate()
Bootstraps Gel instance if it hasn't been bootstrapped already. Returns True if bootstrap happened and False if the instance was already bootstrapped, along with the bootstrap compiler state.
ensure_bootstrapped
python
geldata/gel
edb/server/bootstrap.py
https://github.com/geldata/gel/blob/master/edb/server/bootstrap.py
Apache-2.0
def oxford_comma(els: Sequence[str]) -> str: '''Who gives a fuck?''' assert els if len(els) == 1: return els[0] elif len(els) == 2: return f'{els[0]} and {els[1]}' else: return f'{", ".join(els[:-1])}, and {els[-1]}'
Who gives a fuck?
oxford_comma
python
geldata/gel
edb/server/args.py
https://github.com/geldata/gel/blob/master/edb/server/args.py
Apache-2.0
def _compile_schema_fixup( ctx: bootstrap.BootstrapContext, schema: s_schema.ChainedSchema, keys: dict[str, Any], ) -> str: """Compile any schema-specific fixes that need to be applied.""" current_block = dbops.PLTopBlock() backend_params = ctx.cluster.get_runtime_params() # Recompile functions that reference stdlib types (like # std::BaseObject or schema::Object), since new subtypes may have # been added. to_recompile = schema._top_schema.get_objects(type=s_func.Function) for func in to_recompile: if func.get_name(schema).get_root_module_name() == s_schema.EXT_MODULE: continue # If none of the types referenced in the function are standard # library types, we don't need to recompile. if not ( (expr := func.get_nativecode(schema)) and expr.refs and any( isinstance(dep, s_objtypes.ObjectType) and _is_stdlib_target(dep, schema) for dep in expr.refs.objects(schema) ) ): continue alter_func = func.init_delta_command( schema, sd.AlterObject ) alter_func.set_attribute_value( 'nativecode', func.get_nativecode(schema) ) alter_func.canonical = True # N.B: We are ignoring the schema changes, since we aren't # updating the schema version. _, plan, _ = bootstrap._process_delta_params( sd.DeltaRoot.from_commands(alter_func), schema, backend_params, stdmode=False, **keys, ) plan.generate(current_block) # Regenerate on_target_delete triggers for any links targeting a # stdlib type. links = schema._top_schema.get_objects(type=s_links.Link) for link in links: if link.get_name(schema).get_root_module_name() == s_schema.EXT_MODULE: continue source = link.get_source(schema) if ( not source or not source.is_material_object_type(schema) or link.get_computable(schema) or link.get_shortname(schema).name == '__type__' or not _is_stdlib_target(link.get_target(schema), schema) ): continue pol = link.get_on_target_delete(schema) # HACK: Set the policy in a temporary in-memory schema to be # something else, so that we can set it back to the real value # and pgdelta will generate code for it. fake_pol = ( s_links.LinkTargetDeleteAction.Allow if pol == s_links.LinkTargetDeleteAction.Restrict else s_links.LinkTargetDeleteAction.Restrict ) fake_schema = link.set_field_value(schema, 'on_target_delete', fake_pol) alter_delta, alter_link, _ = link.init_delta_branch( schema, sd.CommandContext(), sd.AlterObject ) alter_link.set_attribute_value('on_target_delete', pol) # N.B: We are ignoring the schema changes, since we aren't # updating the schema version. _, plan, _ = bootstrap._process_delta_params( sd.DeltaRoot.from_commands(alter_delta), fake_schema, backend_params, stdmode=False, **keys, ) plan.generate(current_block) return current_block.to_string()
Compile any schema-specific fixes that need to be applied.
_compile_schema_fixup
python
geldata/gel
edb/server/inplace_upgrade.py
https://github.com/geldata/gel/blob/master/edb/server/inplace_upgrade.py
Apache-2.0
async def inplace_upgrade( cluster: pgcluster.BaseCluster, args: edbargs.ServerConfig, ) -> None: """Perform some or all of the inplace upgrade operations""" pgconn = bootstrap.PGConnectionProxy( cluster, source_description="inplace_upgrade" ) ctx = bootstrap.BootstrapContext(cluster=cluster, conn=pgconn, args=args) try: # XXX: Do we need to do this? mode = await bootstrap._get_cluster_mode(ctx) ctx = dataclasses.replace(ctx, mode=mode) if args.inplace_upgrade_rollback: await _rollback_all(ctx) if args.inplace_upgrade_prepare: await _upgrade_all(ctx) if args.inplace_upgrade_finalize: await _finalize_all(ctx) finally: pgconn.terminate()
Perform some or all of the inplace upgrade operations
inplace_upgrade
python
geldata/gel
edb/server/inplace_upgrade.py
https://github.com/geldata/gel/blob/master/edb/server/inplace_upgrade.py
Apache-2.0
async def introspect_db( self, dbname: str, *, conn: Optional[pgcon.PGConnection]=None, reintrospection: bool=False, ) -> None: """Use this method to (re-)introspect a DB. If the DB is already registered in self._dbindex, its schema, config, etc. would simply be updated. If it's missing an entry for it would be created. All remote notifications of remote events should use this method to refresh the state. Even if the remote event was a simple config change, a lot of other events could happen before it was sent to us by a remote server and us receiving it. E.g. a DB could have been dropped and recreated again. It's safer to refresh the entire state than refreshing individual components of it. Besides, DDL and database-level config modifications are supposed to be rare events. This supports passing in a connection to use as well, so that we can synchronously introspect on config changes without risking deadlock by acquiring two connections at once. Returns True if the query cache mode changed. """ cm = ( contextlib.nullcontext(conn) if conn else self._with_intro_pgcon(dbname) ) async with cm as conn: if not conn: return # Acquire a per-db lock for doing the introspection, to avoid # race conditions where an older introspection might overwrite # a newer one. async with self.get_introspection_lock(dbname): await self._introspect_db( dbname, conn=conn, reintrospection=reintrospection )
Use this method to (re-)introspect a DB. If the DB is already registered in self._dbindex, its schema, config, etc. would simply be updated. If it's missing an entry for it would be created. All remote notifications of remote events should use this method to refresh the state. Even if the remote event was a simple config change, a lot of other events could happen before it was sent to us by a remote server and us receiving it. E.g. a DB could have been dropped and recreated again. It's safer to refresh the entire state than refreshing individual components of it. Besides, DDL and database-level config modifications are supposed to be rare events. This supports passing in a connection to use as well, so that we can synchronously introspect on config changes without risking deadlock by acquiring two connections at once. Returns True if the query cache mode changed.
introspect_db
python
geldata/gel
edb/server/tenant.py
https://github.com/geldata/gel/blob/master/edb/server/tenant.py
Apache-2.0
async def _introspect_db( self, dbname: str, conn: pgcon.PGConnection, reintrospection: bool, ) -> None: from edb.pgsql import trampoline logger.info("introspecting database '%s'", dbname) assert self._dbindex is not None if db := self._dbindex.maybe_get_db(dbname): cache_mode_val = db.lookup_config('query_cache_mode') else: cache_mode_val = self._dbindex.lookup_config('query_cache_mode') old_cache_mode = config.QueryCacheMode.effective(cache_mode_val) # Introspection user_schema_json = ( await self._server.introspect_user_schema_json(conn) ) reflection_cache_json = await conn.sql_fetch_val( trampoline.fixup_query(""" SELECT json_agg(o.c) FROM ( SELECT json_build_object( 'eql_hash', t.eql_hash, 'argnames', array_to_json(t.argnames) ) AS c FROM ROWS FROM(edgedb_VER._get_cached_reflection()) AS t(eql_hash text, argnames text[]) ) AS o; """).encode('utf-8'), ) reflection_cache = immutables.Map( { r["eql_hash"]: tuple(r["argnames"]) for r in json.loads(reflection_cache_json) } ) backend_ids_json = await conn.sql_fetch_val( trampoline.fixup_query(""" SELECT json_object_agg( "id"::text, json_build_array("backend_id", "name") )::text FROM edgedb_VER."_SchemaType" """).encode('utf-8'), ) backend_ids = json.loads(backend_ids_json) db_config_json = await self._server.introspect_db_config(conn) extensions = await self._introspect_extensions(conn) query_cache: list[tuple[bytes, ...]] | None = None if ( not reintrospection and old_cache_mode is not config.QueryCacheMode.InMemory ): query_cache = await self._load_query_cache(conn) # Analysis compiler_pool = self._server.get_compiler_pool() parsed_db = await compiler_pool.parse_user_schema_db_config( user_schema_json, db_config_json, self.get_global_schema_pickle() ) db = self._dbindex.register_db( dbname, user_schema_pickle=parsed_db.user_schema_pickle, schema_version=parsed_db.schema_version, db_config=parsed_db.database_config, reflection_cache=reflection_cache, backend_ids=backend_ids, extensions=extensions, ext_config_settings=parsed_db.ext_config_settings, feature_used_metrics=parsed_db.feature_used_metrics, ) db.set_state_serializer( parsed_db.protocol_version, parsed_db.state_serializer, ) cache_mode = config.QueryCacheMode.effective( db.lookup_config('query_cache_mode') ) if query_cache and cache_mode is not config.QueryCacheMode.InMemory: db.hydrate_cache(query_cache) elif old_cache_mode is not cache_mode: logger.info( "clearing query cache for database '%s'", dbname) await conn.sql_execute( b'SELECT edgedb._clear_query_cache()') assert self._dbindex self._dbindex.get_db(dbname).clear_query_cache()
).encode('utf-8'), ) reflection_cache = immutables.Map( { r["eql_hash"]: tuple(r["argnames"]) for r in json.loads(reflection_cache_json) } ) backend_ids_json = await conn.sql_fetch_val( trampoline.fixup_query(
_introspect_db
python
geldata/gel
edb/server/tenant.py
https://github.com/geldata/gel/blob/master/edb/server/tenant.py
Apache-2.0
async def _early_introspect_db(self, dbname: str) -> None: """We need to always introspect the extensions for each database. Otherwise, we won't know to accept connections for graphql or http, for example, until a native connection is made. """ current_tenant.set(self.get_instance_name()) logger.info("introspecting extensions for database '%s'", dbname) async with self._with_intro_pgcon(dbname) as conn: if not conn: return assert self._dbindex is not None if not self._dbindex.has_db(dbname): extensions = await self._introspect_extensions(conn) # Re-check in case we have a concurrent introspection task. if not self._dbindex.has_db(dbname): self._dbindex.register_db( dbname, user_schema_pickle=None, schema_version=None, db_config=None, reflection_cache=None, backend_ids=None, extensions=extensions, ext_config_settings=None, early=True, ) # Early introspection runs *before* we start accepting tasks. # This means that if we are one of multiple frontends, and we # get a ensure-database-not-used message, we aren't able to # handle it. This can result in us hanging onto a connection # that another frontend wants to get rid of. # # We still want to use the pool, though, since it limits our # connections in the way we want. # # Hack around this by pruning the connection ourself. await self._pg_pool.prune_inactive_connections(dbname)
We need to always introspect the extensions for each database. Otherwise, we won't know to accept connections for graphql or http, for example, until a native connection is made.
_early_introspect_db
python
geldata/gel
edb/server/tenant.py
https://github.com/geldata/gel/blob/master/edb/server/tenant.py
Apache-2.0
async def _load_query_cache( self, conn: pgcon.PGConnection, keys: Optional[Iterable[uuid.UUID]] = None, ) -> list[tuple[bytes, ...]] | None: if keys is None: return await conn.sql_fetch( b''' SELECT "schema_version", "input", "output" FROM "edgedb"."_query_cache" ''', use_prep_stmt=True, ) else: # If keys were specified, just load those keys. # TODO: Or should we do something time based? return await conn.sql_fetch( b''' SELECT "schema_version", "input", "output" ROWS FROM json_array_elements($1) j(ikey) INNER JOIN "edgedb"."_query_cache" ON (to_jsonb(ARRAY[ikey])->>0)::uuid = key ''', args=(json.dumps(keys).encode('utf-8'),), use_prep_stmt=True, )
, use_prep_stmt=True, ) else: # If keys were specified, just load those keys. # TODO: Or should we do something time based? return await conn.sql_fetch( b
_load_query_cache
python
geldata/gel
edb/server/tenant.py
https://github.com/geldata/gel/blob/master/edb/server/tenant.py
Apache-2.0
def with_context( self, *, base_url: Optional[str] = None, headers: HeaderType = None, url_munger: Optional[Callable[[str], str]] = None, ) -> Self: """Create an HttpClient with common optional base URL and headers that will be applied to all requests.""" return HttpClientContext( http_client=self, base_url=base_url, headers=headers, url_munger=url_munger, ) # type: ignore
Create an HttpClient with common optional base URL and headers that will be applied to all requests.
with_context
python
geldata/gel
edb/server/http.py
https://github.com/geldata/gel/blob/master/edb/server/http.py
Apache-2.0
async def acquire(self, dbname: str) -> C: """Acquire a connection from the database. This connection must be released.""" if not self._task: raise asyncio.CancelledError() for i in range(config.CONNECT_FAILURE_RETRIES + 1): id = self._next_conn_id self._next_conn_id += 1 acquire: asyncio.Future[int] = asyncio.Future() self._acquires[id] = acquire self._pool._acquire(id, dbname) self._try_read() # This may throw! try: conn = await acquire c = self._conns[conn] self._conns_held[c] = id return c except Exception as e: # 3D000 - INVALID CATALOG NAME, database does not exist # Skip retry and propagate the error immediately if getattr(e, 'fields', {}).get('C') == '3D000': raise # Allow the final exception to escape if i == config.CONNECT_FAILURE_RETRIES: logger.exception( 'Failed to acquire connection, will not ' f'retry {dbname} ({self._cur_capacity}' 'active)' ) raise logger.exception( 'Failed to acquire connection, will retry: ' f'{dbname} ({self._cur_capacity} active)' ) raise AssertionError("Unreachable end of loop")
Acquire a connection from the database. This connection must be released.
acquire
python
geldata/gel
edb/server/connpool/pool2.py
https://github.com/geldata/gel/blob/master/edb/server/connpool/pool2.py
Apache-2.0
def release(self, dbname: str, conn: C, discard: bool = False) -> None: """Releases a connection back into the pool, discarding or returning it in the background.""" id = self._conns_held.pop(conn) if discard: self._pool._discard(id) else: self._pool._release(id) self._try_read()
Releases a connection back into the pool, discarding or returning it in the background.
release
python
geldata/gel
edb/server/connpool/pool2.py
https://github.com/geldata/gel/blob/master/edb/server/connpool/pool2.py
Apache-2.0
def _check_object_set_uniqueness( setting: spec.Setting, objs: Iterable[types.CompositeConfigType] ) -> frozenset[types.CompositeConfigType]: """Check the unique constraints for an object set""" new_values = set() exclusive_keys: dict[tuple[str, str], Any] = {} for new_value in objs: tspec = new_value._tspec for name in tspec.fields: if (val := getattr(new_value, name, None)) is None: continue if (site := tspec.get_field_unique_site(name)): key = (site.name, name) current = exclusive_keys.setdefault(key, set()) if val in current: raise errors.ConstraintViolationError( f'{setting.type.__name__}.{name} ' f'violates exclusivity constraint' ) current.add(val) if new_value in new_values: raise errors.ConstraintViolationError( f'{setting.type.__name__} has no unique values' ) new_values.add(new_value) if len(new_values) > MAX_CONFIG_SET_SIZE: raise errors.ConfigurationError( f'invalid value for the ' f'{setting.name!r} setting: set is too large') return frozenset(new_values)
Check the unique constraints for an object set
_check_object_set_uniqueness
python
geldata/gel
edb/server/config/ops.py
https://github.com/geldata/gel/blob/master/edb/server/config/ops.py
Apache-2.0
def from_pyvalue(cls, v, *, tspec, spec, allow_missing=False): """Subclasses override this to allow creation from Python scalars.""" raise NotImplementedError
Subclasses override this to allow creation from Python scalars.
from_pyvalue
python
geldata/gel
edb/server/config/types.py
https://github.com/geldata/gel/blob/master/edb/server/config/types.py
Apache-2.0
def is_process_running(pid: int): """Check if there is a running process with `pid`.""" try: os.kill(pid, 0) return True except OSError as ex: if ex.errno == errno.ESRCH: return False else: raise
Check if there is a running process with `pid`.
is_process_running
python
geldata/gel
edb/server/daemon/lib.py
https://github.com/geldata/gel/blob/master/edb/server/daemon/lib.py
Apache-2.0
def lock_file(fileno: int): """Lock file. Returns ``True`` if succeeded, ``False`` otherwise.""" try: # Try to lock file exclusively and in non-blocking fashion fcntl.flock(fileno, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: return False else: return True
Lock file. Returns ``True`` if succeeded, ``False`` otherwise.
lock_file
python
geldata/gel
edb/server/daemon/lib.py
https://github.com/geldata/gel/blob/master/edb/server/daemon/lib.py
Apache-2.0
def make_readonly(path: str): """Make a file read-only.""" assert os.path.isfile(path) os.chmod(path, stat.S_IROTH | stat.S_IRUSR | stat.S_IRGRP)
Make a file read-only.
make_readonly
python
geldata/gel
edb/server/daemon/lib.py
https://github.com/geldata/gel/blob/master/edb/server/daemon/lib.py
Apache-2.0
def change_working_directory(path: str): """Change the working directory for this process.""" try: os.chdir(path) except OSError as ex: raise DaemonError( 'Unable to change working directory to {!r}'.format(path)) from ex
Change the working directory for this process.
change_working_directory
python
geldata/gel
edb/server/daemon/lib.py
https://github.com/geldata/gel/blob/master/edb/server/daemon/lib.py
Apache-2.0
def change_process_gid(gid: int): """Change the GID of this process. Requires appropriate OS privileges for this process. """ try: os.setgid(gid) except OSError as ex: raise DaemonError( 'Unable to change the owning GID to {!r}'.format(gid)) from ex
Change the GID of this process. Requires appropriate OS privileges for this process.
change_process_gid
python
geldata/gel
edb/server/daemon/lib.py
https://github.com/geldata/gel/blob/master/edb/server/daemon/lib.py
Apache-2.0
def change_process_uid(uid: int): """Change the UID of this process. Requires appropriate OS privileges for this process. """ try: os.setuid(uid) except OSError as ex: raise DaemonError( 'Unable to change the owning UID to {!r}'.format(uid)) from ex
Change the UID of this process. Requires appropriate OS privileges for this process.
change_process_uid
python
geldata/gel
edb/server/daemon/lib.py
https://github.com/geldata/gel/blob/master/edb/server/daemon/lib.py
Apache-2.0
def change_umask(mask: int): """Change process umask.""" try: os.umask(mask) except (OSError, OverflowError) as ex: raise DaemonError('Unable to set process umask to {:#o}'.format( mask)) from ex
Change process umask.
change_umask
python
geldata/gel
edb/server/daemon/lib.py
https://github.com/geldata/gel/blob/master/edb/server/daemon/lib.py
Apache-2.0
def prevent_core_dump(): """Prevent this process from generating a core dump.""" core_resource = resource.RLIMIT_CORE try: resource.getrlimit(core_resource) except ValueError as ex: raise DaemonError( 'Unable to limit core dump size: ' 'system does not support RLIMIT_CORE resource limit') from ex # Set hard & soft limits to 0, i.e. no core dump at all resource.setrlimit(core_resource, (0, 0))
Prevent this process from generating a core dump.
prevent_core_dump
python
geldata/gel
edb/server/daemon/lib.py
https://github.com/geldata/gel/blob/master/edb/server/daemon/lib.py
Apache-2.0
def detach_process_context(): """Datach process context. Does it in three steps: 1. Forks and exists parent process. This detaches us from shell, and since the child will have a new PID but will inherit the Group PID from parent, the new process will not be a group leader. 2. Call 'setsid' to create a new session. This makes the process a session leader of a new session, process becomes the process group leader of a new process group and it doesn't have a controlling terminal. 3. Form and exit parent again. This guarantees that the daemon is not a session leader, which prevents it from acquiring a controlling terminal. Reference: “Advanced Programming in the Unix Environment”, section 13.3, by W. Richard Stevens. """ def fork_and_exit_parent(error_message): try: if os.fork() > 0: # Don't need to call 'sys.exit', as we don't want to # run any python interpreter clean-up handlers os._exit(0) except OSError as ex: raise DaemonError( '{}: [{}] {}'.format(error_message, ex.errno, ex.strerror)) from ex fork_and_exit_parent(error_message='Failed the first fork') os.setsid() fork_and_exit_parent(error_message='Failed the second fork')
Datach process context. Does it in three steps: 1. Forks and exists parent process. This detaches us from shell, and since the child will have a new PID but will inherit the Group PID from parent, the new process will not be a group leader. 2. Call 'setsid' to create a new session. This makes the process a session leader of a new session, process becomes the process group leader of a new process group and it doesn't have a controlling terminal. 3. Form and exit parent again. This guarantees that the daemon is not a session leader, which prevents it from acquiring a controlling terminal. Reference: “Advanced Programming in the Unix Environment”, section 13.3, by W. Richard Stevens.
detach_process_context
python
geldata/gel
edb/server/daemon/lib.py
https://github.com/geldata/gel/blob/master/edb/server/daemon/lib.py
Apache-2.0
def is_process_started_by_init(): """Determine if the current process is started by 'init'.""" # The 'init' process has its PID set to 1. return os.getppid() == 1
Determine if the current process is started by 'init'.
is_process_started_by_init
python
geldata/gel
edb/server/daemon/lib.py
https://github.com/geldata/gel/blob/master/edb/server/daemon/lib.py
Apache-2.0
def is_socket(fd): """Determine if the file descriptor is a socket.""" file_socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW) try: file_socket.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE) except socket.error as ex: return ex.args[0] != errno.ENOTSOCK else: return True
Determine if the file descriptor is a socket.
is_socket
python
geldata/gel
edb/server/daemon/lib.py
https://github.com/geldata/gel/blob/master/edb/server/daemon/lib.py
Apache-2.0
def is_process_started_by_superserver(): """Determine if the current process is started by the superserver.""" # The internet superserver creates a network socket, and # attaches it to the standard streams of the child process. try: fileno = sys.__stdin__.fileno() except Exception: return False else: return is_socket(fileno)
Determine if the current process is started by the superserver.
is_process_started_by_superserver
python
geldata/gel
edb/server/daemon/lib.py
https://github.com/geldata/gel/blob/master/edb/server/daemon/lib.py
Apache-2.0
def is_detach_process_context_required(): """Determine whether detaching process context is required. Returns ``True`` if: - Process was started by `init`; or - Process was started by `inetd`. """ return not is_process_started_by_init( ) and not is_process_started_by_superserver()
Determine whether detaching process context is required. Returns ``True`` if: - Process was started by `init`; or - Process was started by `inetd`.
is_detach_process_context_required
python
geldata/gel
edb/server/daemon/lib.py
https://github.com/geldata/gel/blob/master/edb/server/daemon/lib.py
Apache-2.0
def get_max_fileno(default: int=2048): """Return the maximum number of open file descriptors.""" limit = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if limit == resource.RLIM_INFINITY: return default return limit
Return the maximum number of open file descriptors.
get_max_fileno
python
geldata/gel
edb/server/daemon/lib.py
https://github.com/geldata/gel/blob/master/edb/server/daemon/lib.py
Apache-2.0
def try_close_fileno(fileno: int): """Try to close fileno.""" try: os.close(fileno) except OSError as ex: if ex.errno != errno.EBADF: raise DaemonError( 'Failed to close file descriptor {}'.format(fileno))
Try to close fileno.
try_close_fileno
python
geldata/gel
edb/server/daemon/lib.py
https://github.com/geldata/gel/blob/master/edb/server/daemon/lib.py
Apache-2.0
def close_all_open_files(exclude: Optional[set] = None): """Close all open file descriptors.""" maxfd = get_max_fileno() if exclude: for fd in reversed(range(maxfd)): if fd not in exclude: try_close_fileno(fd) else: for fd in reversed(range(maxfd)): try_close_fileno(fd)
Close all open file descriptors.
close_all_open_files
python
geldata/gel
edb/server/daemon/lib.py
https://github.com/geldata/gel/blob/master/edb/server/daemon/lib.py
Apache-2.0
def redirect_stream(stream_name: str, target_stream: io.FileIO): """Redirect a system stream to the specified file. If ``target_stream`` is None - redirect to devnull. """ if target_stream is None: target_fd = os.open(os.devnull, os.O_RDWR) else: target_fd = target_stream.fileno() system_stream = getattr(sys, stream_name) os.dup2(target_fd, system_stream.fileno()) setattr(sys, '__{}__'.format(stream_name), system_stream)
Redirect a system stream to the specified file. If ``target_stream`` is None - redirect to devnull.
redirect_stream
python
geldata/gel
edb/server/daemon/lib.py
https://github.com/geldata/gel/blob/master/edb/server/daemon/lib.py
Apache-2.0
def validate_stream(stream, *, stream_name): """Check if `stream` is an open io.IOBase instance.""" if not isinstance(stream, io.IOBase): raise DaemonError( 'Invalid {} stream object, an instance of io.IOBase is expected'. format(stream_name)) if stream.closed: raise DaemonError('Stream {} is already closed'.format(stream_name))
Check if `stream` is an open io.IOBase instance.
validate_stream
python
geldata/gel
edb/server/daemon/lib.py
https://github.com/geldata/gel/blob/master/edb/server/daemon/lib.py
Apache-2.0
def remaining_time(self, max_delay: float) -> float: """How long before this timer is ready in seconds.""" if self.urgent: if self.time is None: return 0 else: # 1ms extra, just in case now = asyncio.get_running_loop().time() delay = self.time - now + 0.001 return min(max(0, delay), max_delay) else: # If not urgent, wait as long as possible return max_delay
How long before this timer is ready in seconds.
remaining_time
python
geldata/gel
edb/server/protocol/request_scheduler.py
https://github.com/geldata/gel/blob/master/edb/server/protocol/request_scheduler.py
Apache-2.0
def combine(timers: Iterable[Timer]) -> Optional[Timer]: """Combine the timers to determine the when to take the next action. If the timers are (1, False), (2, False), (3, True), it may be wasteful to act at times [1, 2, 3]. Instead, we would prefer to act only once, at time 3, since only the third action was urgent. """ for target_urgency in [True, False]: if any( timer.time is None and timer.urgent == target_urgency for timer in timers ): # An action should be taken right away. return Timer(None, target_urgency) urgent_times = [ timer.time for timer in timers if timer.time is not None and timer.urgent == target_urgency ] if len(urgent_times) > 0: # An action should be taken after some delay return Timer(min(urgent_times), target_urgency) # Nothing to do return None
Combine the timers to determine the when to take the next action. If the timers are (1, False), (2, False), (3, True), it may be wasteful to act at times [1, 2, 3]. Instead, we would prefer to act only once, at time 3, since only the third action was urgent.
combine
python
geldata/gel
edb/server/protocol/request_scheduler.py
https://github.com/geldata/gel/blob/master/edb/server/protocol/request_scheduler.py
Apache-2.0
async def get_params( self, context: Context, ) -> Optional[Sequence[Params[_T]]]: """Get parameters for the requests to run.""" raise NotImplementedError
Get parameters for the requests to run.
get_params
python
geldata/gel
edb/server/protocol/request_scheduler.py
https://github.com/geldata/gel/blob/master/edb/server/protocol/request_scheduler.py
Apache-2.0
def finalize(self, execution_report: ExecutionReport) -> None: """An optional final step after executing requests""" pass
An optional final step after executing requests
finalize
python
geldata/gel
edb/server/protocol/request_scheduler.py
https://github.com/geldata/gel/blob/master/edb/server/protocol/request_scheduler.py
Apache-2.0
def next_delay( self, success_count: int, deferred_costs: dict[str, int], error_count: int, naptime: float ) -> Timer: """When should the service should be processed again.""" if self.limits is not None: # Find the limit with the largest delay limit_delays: dict[str, Optional[float]] = {} for limit_names, service_limit in self.limits.items(): if service_limit is None: # If no information is available, assume no limits limit_delays[limit_names] = None else: base_delay = service_limit.base_delay( deferred_costs[limit_names], guess=self.guess_delay, ) if base_delay is None: limit_delays[limit_names] = None else: # If delay_factor is very high, it may take quite a long # time for it to return to 1. A maximum delay prevents # this service from never getting checked. limit_delays[limit_names] = ( min( base_delay * service_limit.delay_factor, self.delay_max, ) ) delay = _get_maximum_delay(limit_delays) else: # We have absolutely no information about the delay, assume naptime. delay = naptime if error_count > 0: # There was an error, wait before trying again. # Use the larger of delay or naptime. delay = max(delay, naptime) if delay is not None else naptime urgent = False elif any( deferred_cost > 0 for deferred_cost in deferred_costs.values() ): # There is some deferred work, apply the delay and run immediately. urgent = True elif success_count > 0: # Some work was done successfully. Run again to ensure no more work # needs to be done. delay = None urgent = True else: # No work left to do, wait before trying again. # Use the larger of delay or naptime. delay = max(delay, naptime) if delay is not None else naptime urgent = False return Timer.create_delay(delay, urgent)
When should the service should be processed again.
next_delay
python
geldata/gel
edb/server/protocol/request_scheduler.py
https://github.com/geldata/gel/blob/master/edb/server/protocol/request_scheduler.py
Apache-2.0
def update_total(self, latest: Limits) -> Limits: """Update total based on the latest information. The total will change rarely. Always take the latest value if it exists """ if latest.total is not None: self.total = latest.total return self
Update total based on the latest information. The total will change rarely. Always take the latest value if it exists
update_total
python
geldata/gel
edb/server/protocol/request_scheduler.py
https://github.com/geldata/gel/blob/master/edb/server/protocol/request_scheduler.py
Apache-2.0