effect
stringclasses 48
values | original_source_type
stringlengths 0
23k
| opens_and_abbrevs
listlengths 2
92
| isa_cross_project_example
bool 1
class | source_definition
stringlengths 9
57.9k
| partial_definition
stringlengths 7
23.3k
| is_div
bool 2
classes | is_type
null | is_proof
bool 2
classes | completed_definiton
stringlengths 1
250k
| dependencies
dict | effect_flags
sequencelengths 0
2
| ideal_premises
sequencelengths 0
236
| mutual_with
sequencelengths 0
11
| file_context
stringlengths 0
407k
| interleaved
bool 1
class | is_simply_typed
bool 2
classes | file_name
stringlengths 5
48
| vconfig
dict | is_simple_lemma
null | source_type
stringlengths 10
23k
| proof_features
sequencelengths 0
1
| name
stringlengths 8
95
| source
dict | verbose_type
stringlengths 1
7.42k
| source_range
dict |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Prims.Tot | val smul_add_felem5:
#m1:scale64
-> #m2:scale64_5
-> #m3:scale128_5
-> u1:uint64{felem_fits1 u1 m1}
-> f2:felem5{felem_fits5 f2 m2}
-> acc1:felem_wide5{felem_wide_fits5 acc1 m3 /\ m3 +* m1 *^ m2 <=* s128x5 67108864}
-> acc2:felem_wide5{
wide_as_nat5 acc2 == wide_as_nat5 acc1 + uint_v u1 * as_nat5 f2 /\
felem_wide_fits5 acc2 (m3 +* m1 *^ m2)} | [
{
"abbrev": false,
"full_module": "Hacl.Spec.Curve25519.Field51.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Curve25519.Field51.Definition",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let smul_add_felem5 #m1 #m2 #m3 u1 (f20, f21, f22, f23, f24) (o0, o1, o2, o3, o4) =
let (m20, m21, m22, m23, m24) = m2 in
let (m30, m31, m32, m33, m34) = m3 in
[@inline_let]
let o0' = mul_add_wide128 #m1 #m20 #m30 u1 f20 o0 in
[@inline_let]
let o1' = mul_add_wide128 #m1 #m21 #m31 u1 f21 o1 in
[@inline_let]
let o2' = mul_add_wide128 #m1 #m22 #m32 u1 f22 o2 in
[@inline_let]
let o3' = mul_add_wide128 #m1 #m23 #m33 u1 f23 o3 in
[@inline_let]
let o4' = mul_add_wide128 #m1 #m24 #m34 u1 f24 o4 in
[@inline_let]
let out = (o0', o1', o2', o3', o4') in
lemma_smul_add_felem5 u1 (f20, f21, f22, f23, f24) (o0, o1, o2, o3, o4);
out | val smul_add_felem5:
#m1:scale64
-> #m2:scale64_5
-> #m3:scale128_5
-> u1:uint64{felem_fits1 u1 m1}
-> f2:felem5{felem_fits5 f2 m2}
-> acc1:felem_wide5{felem_wide_fits5 acc1 m3 /\ m3 +* m1 *^ m2 <=* s128x5 67108864}
-> acc2:felem_wide5{
wide_as_nat5 acc2 == wide_as_nat5 acc1 + uint_v u1 * as_nat5 f2 /\
felem_wide_fits5 acc2 (m3 +* m1 *^ m2)}
let smul_add_felem5 #m1 #m2 #m3 u1 (f20, f21, f22, f23, f24) (o0, o1, o2, o3, o4) = | false | null | false | let m20, m21, m22, m23, m24 = m2 in
let m30, m31, m32, m33, m34 = m3 in
[@@ inline_let ]let o0' = mul_add_wide128 #m1 #m20 #m30 u1 f20 o0 in
[@@ inline_let ]let o1' = mul_add_wide128 #m1 #m21 #m31 u1 f21 o1 in
[@@ inline_let ]let o2' = mul_add_wide128 #m1 #m22 #m32 u1 f22 o2 in
[@@ inline_let ]let o3' = mul_add_wide128 #m1 #m23 #m33 u1 f23 o3 in
[@@ inline_let ]let o4' = mul_add_wide128 #m1 #m24 #m34 u1 f24 o4 in
[@@ inline_let ]let out = (o0', o1', o2', o3', o4') in
lemma_smul_add_felem5 u1 (f20, f21, f22, f23, f24) (o0, o1, o2, o3, o4);
out | {
"checked_file": "Hacl.Spec.Curve25519.Field51.fst.checked",
"dependencies": [
"Spec.Curve25519.fst.checked",
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Curve25519.Field51.Lemmas.fst.checked",
"Hacl.Spec.Curve25519.Field51.Definition.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Curve25519.Field51.fst"
} | [
"total"
] | [
"Hacl.Spec.Curve25519.Field51.Definition.scale64",
"Hacl.Spec.Curve25519.Field51.Definition.scale64_5",
"Hacl.Spec.Curve25519.Field51.Definition.scale128_5",
"Lib.IntTypes.uint64",
"Prims.b2t",
"Hacl.Spec.Curve25519.Field51.Definition.felem_fits1",
"Hacl.Spec.Curve25519.Field51.Definition.felem5",
"Hacl.Spec.Curve25519.Field51.Definition.felem_fits5",
"Hacl.Spec.Curve25519.Field51.Definition.felem_wide5",
"Prims.l_and",
"Hacl.Spec.Curve25519.Field51.Definition.felem_wide_fits5",
"Hacl.Spec.Curve25519.Field51.Definition.op_Less_Equals_Star",
"Hacl.Spec.Curve25519.Field51.Definition.op_Plus_Star",
"Hacl.Spec.Curve25519.Field51.Definition.op_Star_Hat",
"Hacl.Spec.Curve25519.Field51.Definition.s128x5",
"FStar.Pervasives.Native.Mktuple2",
"FStar.Pervasives.Native.tuple5",
"Lib.IntTypes.uint128",
"Prims.nat",
"Prims.unit",
"Hacl.Spec.Curve25519.Field51.Lemmas.lemma_smul_add_felem5",
"FStar.Pervasives.Native.Mktuple5",
"Lib.IntTypes.int_t",
"Lib.IntTypes.U128",
"Lib.IntTypes.SEC",
"Prims.eq2",
"Prims.int",
"Lib.IntTypes.v",
"Prims.op_Addition",
"Prims.op_Multiply",
"Lib.IntTypes.U64",
"Hacl.Spec.Curve25519.Field51.Definition.felem_wide_fits1",
"Hacl.Spec.Curve25519.Field51.mul_add_wide128",
"Hacl.Spec.Curve25519.Field51.Definition.wide_as_nat5",
"FStar.Mul.op_Star",
"Lib.IntTypes.uint_v",
"Hacl.Spec.Curve25519.Field51.Definition.as_nat5"
] | [] | module Hacl.Spec.Curve25519.Field51
open Lib.Sequence
open Lib.IntTypes
open FStar.Mul
open Spec.Curve25519
open Hacl.Spec.Curve25519.Field51.Definition
open Hacl.Spec.Curve25519.Field51.Lemmas
#reset-options "--z3rlimit 50 --fuel 0 --ifuel 0 --using_facts_from '* -FStar.Seq'"
inline_for_extraction noextract
val fadd5:
f1:felem5{felem_fits5 f1 (1, 2, 1, 1, 1)}
-> f2:felem5{felem_fits5 f2 (1, 2, 1, 1, 1)}
-> out:felem5{felem_fits5 out (2, 4, 2, 2, 2) /\
feval out == fadd (feval f1) (feval f2)}
let fadd5 (f10, f11, f12, f13, f14) (f20, f21, f22, f23, f24) =
let o0 = f10 +! f20 in
let o1 = f11 +! f21 in
let o2 = f12 +! f22 in
let o3 = f13 +! f23 in
let o4 = f14 +! f24 in
let out = (o0, o1, o2, o3, o4) in
FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (f10, f11, f12, f13, f14)) (as_nat5 (f20, f21, f22, f23, f24)) prime;
FStar.Math.Lemmas.lemma_mod_plus_distr_r
((as_nat5 (f10, f11, f12, f13, f14)) % prime) (as_nat5 (f20, f21, f22, f23, f24)) prime;
out
inline_for_extraction noextract
val fadd_zero:
f1:felem5{felem_fits5 f1 (1, 2, 1, 1, 1)}
-> out:felem5{felem_fits5 out (9, 10, 9, 9, 9) /\
feval out == feval f1}
let fadd_zero (f10, f11, f12, f13, f14) =
let o0 = f10 +! u64 0x3fffffffffff68 in
let o1 = f11 +! u64 0x3ffffffffffff8 in
let o2 = f12 +! u64 0x3ffffffffffff8 in
let o3 = f13 +! u64 0x3ffffffffffff8 in
let o4 = f14 +! u64 0x3ffffffffffff8 in
lemma_add_zero (f10, f11, f12, f13, f14);
(o0, o1, o2, o3, o4)
inline_for_extraction noextract
val fsub5:
f1:felem5{felem_fits5 f1 (1, 2, 1, 1, 1)}
-> f2:felem5{felem_fits5 f2 (1, 2, 1, 1, 1)}
-> out:felem5{felem_fits5 out (9, 10, 9, 9, 9) /\
feval out == fsub (feval f1) (feval f2)}
let fsub5 (f10, f11, f12, f13, f14) (f20, f21, f22, f23, f24) =
//assert_norm (0x3fffffffffff68 == pow2 54 - 152);
//assert_norm (0x3ffffffffffff8 == pow2 54 - 8);
let (t0, t1, t2, t3, t4) = fadd_zero (f10, f11, f12, f13, f14) in
let o0 = t0 -! f20 in
let o1 = t1 -! f21 in
let o2 = t2 -! f22 in
let o3 = t3 -! f23 in
let o4 = t4 -! f24 in
let out = (o0, o1, o2, o3, o4) in
FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (t0, t1, t2, t3, t4)) (- as_nat5 (f20, f21, f22, f23, f24)) prime;
lemma_mod_sub_distr ((as_nat5 (t0, t1, t2, t3, t4)) % prime) (as_nat5 (f20, f21, f22, f23, f24)) prime;
out
val lemma_fsub:
f1:felem5{felem_fits5 f1 (1, 2, 1, 1, 1)}
-> f2:felem5{felem_fits5 f2 (1, 2, 1, 1, 1)}
-> Lemma (let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o0 = f10 +! u64 0x3fffffffffff68 -! f20 in
let o1 = f11 +! u64 0x3ffffffffffff8 -! f21 in
let o2 = f12 +! u64 0x3ffffffffffff8 -! f22 in
let o3 = f13 +! u64 0x3ffffffffffff8 -! f23 in
let o4 = f14 +! u64 0x3ffffffffffff8 -! f24 in
let out = (o0, o1, o2, o3, o4) in
out == fsub5 (f10, f11, f12, f13, f14) (f20, f21, f22, f23, f24))
let lemma_fsub f1 f2 = ()
inline_for_extraction noextract
val mul_wide64:
#m1:scale64
-> #m2:scale64
-> x:uint64{felem_fits1 x m1}
-> y:uint64{felem_fits1 y m2 /\ m1 * m2 <= 67108864}
-> z:uint128{uint_v z == uint_v x * uint_v y /\ felem_wide_fits1 z (m1 * m2)}
#push-options "--z3rlimit 5"
let mul_wide64 #m1 #m2 x y =
let open FStar.Math.Lemmas in
lemma_mult_le_left (v x) (v y) (m2 * max51); //v x * v y <= v x * (m2 * max51)
lemma_mult_le_right (m2 * max51) (v x) (m1 * max51); // v x * (m2 * max51) <= (m1 * max51) * (m2 * max51)
paren_mul_right (m1 * max51) m2 max51; //(m1 * max51) * (m2 * max51) = ((m1 * max51) * m2) * max51
paren_mul_right m1 max51 m2; //(m1 * max51) * m2 = m1 * (max51 * m2)
swap_mul max51 m2; //max51 * m2 = m2 * max51
paren_mul_right m1 m2 max51; //m1 * (m2 * max51) = (m1 * m2) * max51
paren_mul_right (m1 * m2) max51 max51; //((m1 * m2) * max51) * max51 = (m1 * m2) * (max51 * max51)
assert (v x * v y <= m1 * max51 * m2 * max51);
assert (v x * v y <= m1 * m2 * max51 * max51);
mul64_wide x y
#pop-options
inline_for_extraction noextract
val smul_felem5:
#m1:scale64
-> #m2:scale64_5
-> u1:uint64{felem_fits1 u1 m1}
-> f2:felem5{felem_fits5 f2 m2 /\ m1 *^ m2 <=* s128x5 67108864}
-> out:felem_wide5{felem_wide_fits5 out (m1 *^ m2) /\
wide_as_nat5 out == uint_v u1 * as_nat5 f2}
let smul_felem5 #m1 #m2 u1 (f20, f21, f22, f23, f24) =
let (m20, m21, m22, m23, m24) = m2 in
[@inline_let]
let o0 = mul_wide64 #m1 #m20 u1 f20 in
[@inline_let]
let o1 = mul_wide64 #m1 #m21 u1 f21 in
[@inline_let]
let o2 = mul_wide64 #m1 #m22 u1 f22 in
[@inline_let]
let o3 = mul_wide64 #m1 #m23 u1 f23 in
[@inline_let]
let o4 = mul_wide64 #m1 #m24 u1 f24 in
[@inline_let]
let out = (o0, o1, o2, o3, o4) in
lemma_smul_felem5 u1 (f20, f21, f22, f23, f24);
out
inline_for_extraction noextract
val mul_add_wide128:
#m1:scale64
-> #m2:scale64
-> #m3:scale128
-> x:uint64{felem_fits1 x m1}
-> y:uint64{felem_fits1 y m2}
-> z:uint128{felem_wide_fits1 z m3 /\ m3 + m1 * m2 <= 67108864}
-> r:uint128{uint_v r == uint_v z + uint_v x * uint_v y /\ felem_wide_fits1 r (m3 + m1 * m2)}
let mul_add_wide128 #m1 #m2 #m3 x y z =
z +! mul_wide64 #m1 #m2 x y
#push-options "--z3rlimit 100"
inline_for_extraction noextract
val smul_add_felem5:
#m1:scale64
-> #m2:scale64_5
-> #m3:scale128_5
-> u1:uint64{felem_fits1 u1 m1}
-> f2:felem5{felem_fits5 f2 m2}
-> acc1:felem_wide5{felem_wide_fits5 acc1 m3 /\ m3 +* m1 *^ m2 <=* s128x5 67108864}
-> acc2:felem_wide5{
wide_as_nat5 acc2 == wide_as_nat5 acc1 + uint_v u1 * as_nat5 f2 /\ | false | false | Hacl.Spec.Curve25519.Field51.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val smul_add_felem5:
#m1:scale64
-> #m2:scale64_5
-> #m3:scale128_5
-> u1:uint64{felem_fits1 u1 m1}
-> f2:felem5{felem_fits5 f2 m2}
-> acc1:felem_wide5{felem_wide_fits5 acc1 m3 /\ m3 +* m1 *^ m2 <=* s128x5 67108864}
-> acc2:felem_wide5{
wide_as_nat5 acc2 == wide_as_nat5 acc1 + uint_v u1 * as_nat5 f2 /\
felem_wide_fits5 acc2 (m3 +* m1 *^ m2)} | [] | Hacl.Spec.Curve25519.Field51.smul_add_felem5 | {
"file_name": "code/curve25519/Hacl.Spec.Curve25519.Field51.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
u1: Lib.IntTypes.uint64{Hacl.Spec.Curve25519.Field51.Definition.felem_fits1 u1 m1} ->
f2:
Hacl.Spec.Curve25519.Field51.Definition.felem5
{Hacl.Spec.Curve25519.Field51.Definition.felem_fits5 f2 m2} ->
acc1:
Hacl.Spec.Curve25519.Field51.Definition.felem_wide5
{ Hacl.Spec.Curve25519.Field51.Definition.felem_wide_fits5 acc1 m3 /\
m3 +* m1 *^ m2 <=* Hacl.Spec.Curve25519.Field51.Definition.s128x5 67108864 }
-> acc2:
Hacl.Spec.Curve25519.Field51.Definition.felem_wide5
{ Hacl.Spec.Curve25519.Field51.Definition.wide_as_nat5 acc2 ==
Hacl.Spec.Curve25519.Field51.Definition.wide_as_nat5 acc1 +
Lib.IntTypes.uint_v u1 * Hacl.Spec.Curve25519.Field51.Definition.as_nat5 f2 /\
Hacl.Spec.Curve25519.Field51.Definition.felem_wide_fits5 acc2 (m3 +* m1 *^ m2) } | {
"end_col": 5,
"end_line": 168,
"start_col": 83,
"start_line": 152
} |
Prims.Pure | val mul64_wide_add3:
#m0:scale64 -> #m1:scale64 -> #m2:scale64
-> #m3:scale64 -> #m4:scale64 -> #m5:scale64
-> a0:uint64{felem_fits1 a0 m0}
-> a1:uint64{felem_fits1 a1 m1}
-> b0:uint64{felem_fits1 b0 m2}
-> b1:uint64{felem_fits1 b1 m3}
-> c0:uint64{felem_fits1 c0 m4}
-> c1:uint64{felem_fits1 c1 m5} ->
Pure uint128
(requires m0 * m1 + m2 * m3 + m4 * m5 < 8192)
(ensures fun res ->
felem_wide_fits1 res (m0 * m1 + m2 * m3 + m4 * m5) /\
v res == v a0 * v a1 + v b0 * v b1 + v c0 * v c1) | [
{
"abbrev": false,
"full_module": "Hacl.Spec.Curve25519.Field51.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Curve25519.Field51.Definition",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mul64_wide_add3 #m0 #m1 #m2 #m3 #m4 #m5 a0 a1 b0 b1 c0 c1 =
assert_norm (pow2 13 = 8192);
mul64_wide_add3_lemma #m0 #m1 #m2 #m3 #m4 #m5 a0 a1 b0 b1 c0 c1;
mul64_wide a0 a1 +! mul64_wide b0 b1 +! mul64_wide c0 c1 | val mul64_wide_add3:
#m0:scale64 -> #m1:scale64 -> #m2:scale64
-> #m3:scale64 -> #m4:scale64 -> #m5:scale64
-> a0:uint64{felem_fits1 a0 m0}
-> a1:uint64{felem_fits1 a1 m1}
-> b0:uint64{felem_fits1 b0 m2}
-> b1:uint64{felem_fits1 b1 m3}
-> c0:uint64{felem_fits1 c0 m4}
-> c1:uint64{felem_fits1 c1 m5} ->
Pure uint128
(requires m0 * m1 + m2 * m3 + m4 * m5 < 8192)
(ensures fun res ->
felem_wide_fits1 res (m0 * m1 + m2 * m3 + m4 * m5) /\
v res == v a0 * v a1 + v b0 * v b1 + v c0 * v c1)
let mul64_wide_add3 #m0 #m1 #m2 #m3 #m4 #m5 a0 a1 b0 b1 c0 c1 = | false | null | false | assert_norm (pow2 13 = 8192);
mul64_wide_add3_lemma #m0 #m1 #m2 #m3 #m4 #m5 a0 a1 b0 b1 c0 c1;
mul64_wide a0 a1 +! mul64_wide b0 b1 +! mul64_wide c0 c1 | {
"checked_file": "Hacl.Spec.Curve25519.Field51.fst.checked",
"dependencies": [
"Spec.Curve25519.fst.checked",
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Curve25519.Field51.Lemmas.fst.checked",
"Hacl.Spec.Curve25519.Field51.Definition.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Curve25519.Field51.fst"
} | [] | [
"Hacl.Spec.Curve25519.Field51.Definition.scale64",
"Lib.IntTypes.uint64",
"Prims.b2t",
"Hacl.Spec.Curve25519.Field51.Definition.felem_fits1",
"Lib.IntTypes.op_Plus_Bang",
"Lib.IntTypes.U128",
"Lib.IntTypes.SEC",
"Lib.IntTypes.mul64_wide",
"Prims.unit",
"Hacl.Spec.Curve25519.Field51.Lemmas.mul64_wide_add3_lemma",
"FStar.Pervasives.assert_norm",
"Prims.op_Equality",
"Prims.int",
"Prims.pow2",
"Lib.IntTypes.uint128"
] | [] | module Hacl.Spec.Curve25519.Field51
open Lib.Sequence
open Lib.IntTypes
open FStar.Mul
open Spec.Curve25519
open Hacl.Spec.Curve25519.Field51.Definition
open Hacl.Spec.Curve25519.Field51.Lemmas
#reset-options "--z3rlimit 50 --fuel 0 --ifuel 0 --using_facts_from '* -FStar.Seq'"
inline_for_extraction noextract
val fadd5:
f1:felem5{felem_fits5 f1 (1, 2, 1, 1, 1)}
-> f2:felem5{felem_fits5 f2 (1, 2, 1, 1, 1)}
-> out:felem5{felem_fits5 out (2, 4, 2, 2, 2) /\
feval out == fadd (feval f1) (feval f2)}
let fadd5 (f10, f11, f12, f13, f14) (f20, f21, f22, f23, f24) =
let o0 = f10 +! f20 in
let o1 = f11 +! f21 in
let o2 = f12 +! f22 in
let o3 = f13 +! f23 in
let o4 = f14 +! f24 in
let out = (o0, o1, o2, o3, o4) in
FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (f10, f11, f12, f13, f14)) (as_nat5 (f20, f21, f22, f23, f24)) prime;
FStar.Math.Lemmas.lemma_mod_plus_distr_r
((as_nat5 (f10, f11, f12, f13, f14)) % prime) (as_nat5 (f20, f21, f22, f23, f24)) prime;
out
inline_for_extraction noextract
val fadd_zero:
f1:felem5{felem_fits5 f1 (1, 2, 1, 1, 1)}
-> out:felem5{felem_fits5 out (9, 10, 9, 9, 9) /\
feval out == feval f1}
let fadd_zero (f10, f11, f12, f13, f14) =
let o0 = f10 +! u64 0x3fffffffffff68 in
let o1 = f11 +! u64 0x3ffffffffffff8 in
let o2 = f12 +! u64 0x3ffffffffffff8 in
let o3 = f13 +! u64 0x3ffffffffffff8 in
let o4 = f14 +! u64 0x3ffffffffffff8 in
lemma_add_zero (f10, f11, f12, f13, f14);
(o0, o1, o2, o3, o4)
inline_for_extraction noextract
val fsub5:
f1:felem5{felem_fits5 f1 (1, 2, 1, 1, 1)}
-> f2:felem5{felem_fits5 f2 (1, 2, 1, 1, 1)}
-> out:felem5{felem_fits5 out (9, 10, 9, 9, 9) /\
feval out == fsub (feval f1) (feval f2)}
let fsub5 (f10, f11, f12, f13, f14) (f20, f21, f22, f23, f24) =
//assert_norm (0x3fffffffffff68 == pow2 54 - 152);
//assert_norm (0x3ffffffffffff8 == pow2 54 - 8);
let (t0, t1, t2, t3, t4) = fadd_zero (f10, f11, f12, f13, f14) in
let o0 = t0 -! f20 in
let o1 = t1 -! f21 in
let o2 = t2 -! f22 in
let o3 = t3 -! f23 in
let o4 = t4 -! f24 in
let out = (o0, o1, o2, o3, o4) in
FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (t0, t1, t2, t3, t4)) (- as_nat5 (f20, f21, f22, f23, f24)) prime;
lemma_mod_sub_distr ((as_nat5 (t0, t1, t2, t3, t4)) % prime) (as_nat5 (f20, f21, f22, f23, f24)) prime;
out
val lemma_fsub:
f1:felem5{felem_fits5 f1 (1, 2, 1, 1, 1)}
-> f2:felem5{felem_fits5 f2 (1, 2, 1, 1, 1)}
-> Lemma (let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o0 = f10 +! u64 0x3fffffffffff68 -! f20 in
let o1 = f11 +! u64 0x3ffffffffffff8 -! f21 in
let o2 = f12 +! u64 0x3ffffffffffff8 -! f22 in
let o3 = f13 +! u64 0x3ffffffffffff8 -! f23 in
let o4 = f14 +! u64 0x3ffffffffffff8 -! f24 in
let out = (o0, o1, o2, o3, o4) in
out == fsub5 (f10, f11, f12, f13, f14) (f20, f21, f22, f23, f24))
let lemma_fsub f1 f2 = ()
inline_for_extraction noextract
val mul_wide64:
#m1:scale64
-> #m2:scale64
-> x:uint64{felem_fits1 x m1}
-> y:uint64{felem_fits1 y m2 /\ m1 * m2 <= 67108864}
-> z:uint128{uint_v z == uint_v x * uint_v y /\ felem_wide_fits1 z (m1 * m2)}
#push-options "--z3rlimit 5"
let mul_wide64 #m1 #m2 x y =
let open FStar.Math.Lemmas in
lemma_mult_le_left (v x) (v y) (m2 * max51); //v x * v y <= v x * (m2 * max51)
lemma_mult_le_right (m2 * max51) (v x) (m1 * max51); // v x * (m2 * max51) <= (m1 * max51) * (m2 * max51)
paren_mul_right (m1 * max51) m2 max51; //(m1 * max51) * (m2 * max51) = ((m1 * max51) * m2) * max51
paren_mul_right m1 max51 m2; //(m1 * max51) * m2 = m1 * (max51 * m2)
swap_mul max51 m2; //max51 * m2 = m2 * max51
paren_mul_right m1 m2 max51; //m1 * (m2 * max51) = (m1 * m2) * max51
paren_mul_right (m1 * m2) max51 max51; //((m1 * m2) * max51) * max51 = (m1 * m2) * (max51 * max51)
assert (v x * v y <= m1 * max51 * m2 * max51);
assert (v x * v y <= m1 * m2 * max51 * max51);
mul64_wide x y
#pop-options
inline_for_extraction noextract
val smul_felem5:
#m1:scale64
-> #m2:scale64_5
-> u1:uint64{felem_fits1 u1 m1}
-> f2:felem5{felem_fits5 f2 m2 /\ m1 *^ m2 <=* s128x5 67108864}
-> out:felem_wide5{felem_wide_fits5 out (m1 *^ m2) /\
wide_as_nat5 out == uint_v u1 * as_nat5 f2}
let smul_felem5 #m1 #m2 u1 (f20, f21, f22, f23, f24) =
let (m20, m21, m22, m23, m24) = m2 in
[@inline_let]
let o0 = mul_wide64 #m1 #m20 u1 f20 in
[@inline_let]
let o1 = mul_wide64 #m1 #m21 u1 f21 in
[@inline_let]
let o2 = mul_wide64 #m1 #m22 u1 f22 in
[@inline_let]
let o3 = mul_wide64 #m1 #m23 u1 f23 in
[@inline_let]
let o4 = mul_wide64 #m1 #m24 u1 f24 in
[@inline_let]
let out = (o0, o1, o2, o3, o4) in
lemma_smul_felem5 u1 (f20, f21, f22, f23, f24);
out
inline_for_extraction noextract
val mul_add_wide128:
#m1:scale64
-> #m2:scale64
-> #m3:scale128
-> x:uint64{felem_fits1 x m1}
-> y:uint64{felem_fits1 y m2}
-> z:uint128{felem_wide_fits1 z m3 /\ m3 + m1 * m2 <= 67108864}
-> r:uint128{uint_v r == uint_v z + uint_v x * uint_v y /\ felem_wide_fits1 r (m3 + m1 * m2)}
let mul_add_wide128 #m1 #m2 #m3 x y z =
z +! mul_wide64 #m1 #m2 x y
#push-options "--z3rlimit 100"
inline_for_extraction noextract
val smul_add_felem5:
#m1:scale64
-> #m2:scale64_5
-> #m3:scale128_5
-> u1:uint64{felem_fits1 u1 m1}
-> f2:felem5{felem_fits5 f2 m2}
-> acc1:felem_wide5{felem_wide_fits5 acc1 m3 /\ m3 +* m1 *^ m2 <=* s128x5 67108864}
-> acc2:felem_wide5{
wide_as_nat5 acc2 == wide_as_nat5 acc1 + uint_v u1 * as_nat5 f2 /\
felem_wide_fits5 acc2 (m3 +* m1 *^ m2)}
let smul_add_felem5 #m1 #m2 #m3 u1 (f20, f21, f22, f23, f24) (o0, o1, o2, o3, o4) =
let (m20, m21, m22, m23, m24) = m2 in
let (m30, m31, m32, m33, m34) = m3 in
[@inline_let]
let o0' = mul_add_wide128 #m1 #m20 #m30 u1 f20 o0 in
[@inline_let]
let o1' = mul_add_wide128 #m1 #m21 #m31 u1 f21 o1 in
[@inline_let]
let o2' = mul_add_wide128 #m1 #m22 #m32 u1 f22 o2 in
[@inline_let]
let o3' = mul_add_wide128 #m1 #m23 #m33 u1 f23 o3 in
[@inline_let]
let o4' = mul_add_wide128 #m1 #m24 #m34 u1 f24 o4 in
[@inline_let]
let out = (o0', o1', o2', o3', o4') in
lemma_smul_add_felem5 u1 (f20, f21, f22, f23, f24) (o0, o1, o2, o3, o4);
out
#pop-options
inline_for_extraction noextract
val precomp_r19:
f2:felem5{felem_fits5 f2 (9, 10, 9, 9, 9)}
-> r19:felem5{felem_fits5 r19 (171, 190, 171, 171, 171)}
let precomp_r19 (f20, f21, f22, f23, f24) =
[@inline_let]
let r190 = f20 *! u64 19 in
[@inline_let]
let r191 = f21 *! u64 19 in
[@inline_let]
let r192 = f22 *! u64 19 in
[@inline_let]
let r193 = f23 *! u64 19 in
[@inline_let]
let r194 = f24 *! u64 19 in
(r190, r191, r192, r193, r194)
inline_for_extraction noextract
val mul_felem5:
f1:felem5{felem_fits5 f1 (9, 10, 9, 9, 9)}
-> r:felem5{felem_fits5 r (9, 10, 9, 9, 9)}
-> r19:felem5{felem_fits5 r19 (171, 190, 171, 171, 171) /\ r19 == precomp_r19 r}
-> out:felem_wide5{felem_wide_fits5 out (6579, 4797, 3340, 1881, 423) /\
feval_wide out == fmul (feval f1) (feval r)}
let mul_felem5 (f10, f11, f12, f13, f14) (r0, r1, r2, r3, r4) (r190, r191, r192, r193, r194) =
let (o0, o1, o2, o3, o4) = smul_felem5 #9 #(9, 10, 9, 9, 9) f10 (r0, r1, r2, r3, r4) in
let (o0, o1, o2, o3, o4) = smul_add_felem5 #10 #(171, 9, 10, 9, 9) #(81, 90, 81, 81, 81)
f11 (r194, r0, r1, r2, r3) (o0, o1, o2, o3, o4) in
let (o0, o1, o2, o3, o4) = smul_add_felem5 #9 #(171, 171, 9, 10, 9) #(1791, 180, 181, 171, 171)
f12 (r193, r194, r0, r1, r2) (o0, o1, o2, o3, o4) in
let (o0, o1, o2, o3, o4) = smul_add_felem5 #9 #(171, 171, 171, 9, 10) #(3330, 1719, 262, 261, 252)
f13 (r192, r193, r194, r0, r1) (o0, o1, o2, o3, o4) in
let (o0, o1, o2, o3, o4) = smul_add_felem5 #9 #(190, 171, 171, 171, 9) #(4869, 3258, 1801, 342, 342)
f14 (r191, r192, r193, r194, r0) (o0, o1, o2, o3, o4) in
lemma_fmul5 (f10, f11, f12, f13, f14) (r0, r1, r2, r3, r4);
(o0, o1, o2, o3, o4)
inline_for_extraction noextract
val carry51:
l:uint64
-> cin:uint64
-> Pure (uint64 & uint64)
(requires felem_fits1 l 2 /\ felem_fits1 cin 8190)
(ensures fun (l0, l1) ->
v l + v cin == v l1 * pow2 51 + v l0 /\
felem_fits1 l0 1 /\ uint_v l1 < pow2 13)
let carry51 l cin =
let l' = l +! cin in
lemma_carry51 l cin;
(l' &. mask51, l' >>. 51ul)
inline_for_extraction noextract
val carry51_wide:
#m:scale64{m < 8192}
-> l:uint128{felem_wide_fits1 l m}
-> cin:uint64
-> Pure (uint64 & uint64)
(requires True)
(ensures fun (l0, l1) ->
v l + v cin == v l1 * pow2 51 + v l0 /\
felem_fits1 l0 1 /\ felem_fits1 l1 (m + 1))
let carry51_wide #m l cin =
let l' = l +! to_u128 cin in
lemma_carry51_wide #m l cin;
((to_u64 l') &. mask51, to_u64 (l' >>. 51ul))
let mul_inv_t (f:felem5) =
let (o0, o1, o2, o3, o4) = f in
if v o1 >= pow2 51 then
felem_fits5 f (1, 2, 1, 1, 1) /\ v o1 % pow2 51 < 8192
else felem_fits5 f (1, 1, 1, 1, 1)
#push-options "--ifuel 1"
val lemma_mul_inv:
f:felem5{felem_fits5 f (1, 1, 1, 1, 1)}
-> cin:uint64{v cin < pow2 51}
-> Lemma
(let (i0, i1, i2, i3, i4) = f in
assert_norm (pow51 = pow2 51);
let i1' = i1 +! cin in
let out = (i0, i1', i2, i3, i4) in
if (v i1 + v cin) / pow2 51 > 0 then
felem_fits5 out (1, 2, 1, 1, 1) /\
(v i1 + v cin) % pow2 51 < v cin
else felem_fits5 out (1, 1, 1, 1, 1))
let lemma_mul_inv f cin =
assert_norm (pow51 = pow2 51)
#pop-options
#push-options "--z3rlimit 100"
inline_for_extraction noextract
val carry_wide5:
inp:felem_wide5{felem_wide_fits5 inp (6579, 4797, 3340, 1881, 423)}
-> Pure felem5
(requires True)
(ensures fun out ->
mul_inv_t out /\ feval out == feval_wide inp)
let carry_wide5 (i0, i1, i2, i3, i4) =
assert_norm (6579 < pow2 13);
assert_norm (pow2 13 < max51);
let tmp0, c0 = carry51_wide #6579 i0 (u64 0) in
let tmp1, c1 = carry51_wide #4797 i1 c0 in
let tmp2, c2 = carry51_wide #3340 i2 c1 in
let tmp3, c3 = carry51_wide #1881 i3 c2 in
let tmp4, c4 = carry51_wide #423 i4 c3 in
lemma_carry5_simplify c0 c1 c2 c3 c4 tmp0 tmp1 tmp2 tmp3 tmp4;
let tmp0', c5 = carry51 tmp0 (c4 *! u64 19) in
[@inline_let]
let tmp1' = tmp1 +! c5 in
lemma_mul_inv (tmp0', tmp1, tmp2, tmp3, tmp4) c5;
(tmp0', tmp1', tmp2, tmp3, tmp4)
#pop-options
inline_for_extraction noextract
val fmul5:
f1:felem5{felem_fits5 f1 (9, 10, 9, 9, 9)}
-> f2:felem5{felem_fits5 f2 (9, 10, 9, 9, 9)}
-> out:felem5{mul_inv_t out /\
feval out == fmul (feval f1) (feval f2)}
let fmul5 (f10, f11, f12, f13, f14) (f20, f21, f22, f23, f24) =
let (tmp0, tmp1, tmp2, tmp3, tmp4) = precomp_r19 (f20, f21, f22, f23, f24) in
let (tmp_w0, tmp_w1, tmp_w2, tmp_w3, tmp_w4) =
mul_felem5 (f10, f11, f12, f13, f14) (f20, f21, f22, f23, f24) (tmp0, tmp1, tmp2, tmp3, tmp4) in
carry_wide5 (tmp_w0, tmp_w1, tmp_w2, tmp_w3, tmp_w4)
inline_for_extraction noextract
val fmul25:
f1:felem5{felem_fits5 f1 (9, 10, 9, 9, 9)}
-> f2:felem5{felem_fits5 f2 (9, 10, 9, 9, 9)}
-> f3:felem5{felem_fits5 f3 (9, 10, 9, 9, 9)}
-> f4:felem5{felem_fits5 f4 (9, 10, 9, 9, 9)}
-> Pure (felem5 & felem5)
(requires True)
(ensures fun (out1, out2) ->
mul_inv_t out1 /\ mul_inv_t out2 /\
feval out1 == fmul (feval f1) (feval f2) /\
feval out2 == fmul (feval f3) (feval f4))
let fmul25 (f10, f11, f12, f13, f14) (f20, f21, f22, f23, f24) (f30, f31, f32, f33, f34) (f40, f41, f42, f43, f44) =
let (tmp10, tmp11, tmp12, tmp13, tmp14) = precomp_r19 (f20, f21, f22, f23, f24) in
let (tmp20, tmp21, tmp22, tmp23, tmp24) = precomp_r19 (f40, f41, f42, f43, f44) in
let (tmp_w10, tmp_w11, tmp_w12, tmp_w13, tmp_w14) =
mul_felem5 (f10, f11, f12, f13, f14) (f20, f21, f22, f23, f24) (tmp10, tmp11, tmp12, tmp13, tmp14) in
let (tmp_w20, tmp_w21, tmp_w22, tmp_w23, tmp_w24) =
mul_felem5 (f30, f31, f32, f33, f34) (f40, f41, f42, f43, f44) (tmp20, tmp21, tmp22, tmp23, tmp24) in
let (o10,o11,o12,o13,o14) = carry_wide5 (tmp_w10, tmp_w11, tmp_w12, tmp_w13, tmp_w14) in
let (o20,o21,o22,o23,o24) = carry_wide5 (tmp_w20, tmp_w21, tmp_w22, tmp_w23, tmp_w24) in
((o10,o11,o12,o13,o14), (o20,o21,o22,o23,o24))
inline_for_extraction noextract
val fmul15:
f1:felem5{felem_fits5 f1 (9, 10, 9, 9, 9)}
-> f2:uint64{felem_fits1 f2 1}
-> Pure felem5
(requires True)
(ensures fun out ->
mul_inv_t out /\ feval out == (feval f1 * v f2) % prime)
let fmul15 (f10, f11, f12, f13, f14) f2 =
let (tmp_w0, tmp_w1, tmp_w2, tmp_w3, tmp_w4) =
smul_felem5 #1 #(9, 10, 9, 9, 9) f2 (f10, f11, f12, f13, f14) in
let out = (tmp_w0, tmp_w1, tmp_w2, tmp_w3, tmp_w4) in
[@inline_let]
let res = carry_wide5 (tmp_w0, tmp_w1, tmp_w2, tmp_w3, tmp_w4) in
FStar.Math.Lemmas.lemma_mod_mul_distr_l (as_nat5 (f10, f11, f12, f13, f14)) (uint_v f2) prime;
assert (feval res == feval_wide (tmp_w0, tmp_w1, tmp_w2, tmp_w3, tmp_w4));
assert (feval res == (wide_as_nat5 (tmp_w0, tmp_w1, tmp_w2, tmp_w3, tmp_w4)) % prime);
assert (feval res == (v f2 * as_nat5 (f10, f11, f12, f13, f14)) % prime);
FStar.Math.Lemmas.swap_mul (v f2) (as_nat5 (f10, f11, f12, f13, f14));
assert (feval res == (as_nat5 (f10, f11, f12, f13, f14) * v f2) % prime);
res
// inline_for_extraction noextract
// val fsqr_felem5:
// f:felem5{felem_fits5 f (9, 10, 9, 9, 9)}
// -> out:felem_wide5{felem_wide_fits5 out (6579, 4797, 3340, 1881, 423)}
// let fsqr_felem5 (f0, f1, f2, f3, f4) =
// let (o0, o1, o2, o3, o4) = smul_felem5 #9 #(9, 20, 18, 18, 18) f0 (f0, u64 2 *! f1, u64 2 *! f2, u64 2 *! f3, u64 2 *! f4) in
// let (o0, o1, o2, o3, o4) = smul_add_felem5 #10 #(342, 0, 10, 18, 18) #(81, 180, 162, 162, 162)
// f1 (u64 38 *! f4, u64 0, f1, u64 2 *! f2, u64 2 *! f3) (o0, o1, o2, o3, o4) in
// let (o0, o1, o2, o3, o4) = smul_add_felem5 #9 #(342, 342, 0, 0, 9) #(3501, 180, 262, 342, 342)
// f2 (u64 38 *! f3, u64 38 *! f4, u64 0, u64 0, f2) (o0, o1, o2, o3, o4) in
// let (o0, o1, o2, o3, o4) = smul_add_felem5 #9 #(0, 171, 342, 0, 0) #(6579, 3258, 262, 342, 423)
// f3 (u64 0, u64 19 *. f3, u64 38 *. f4, u64 0, u64 0) (o0, o1, o2, o3, o4) in
// let (o0, o1, o2, o3, o4) = smul_add_felem5 #9 #(0, 0, 0, 171, 0) #(6579, 4797, 3340, 342, 423)
// f4 (u64 0, u64 0, u64 0, u64 19 *. f4, u64 0) (o0, o1, o2, o3, o4) in
// (o0, o1, o2, o3, o4)
inline_for_extraction noextract
val mul64_wide_add3:
#m0:scale64 -> #m1:scale64 -> #m2:scale64
-> #m3:scale64 -> #m4:scale64 -> #m5:scale64
-> a0:uint64{felem_fits1 a0 m0}
-> a1:uint64{felem_fits1 a1 m1}
-> b0:uint64{felem_fits1 b0 m2}
-> b1:uint64{felem_fits1 b1 m3}
-> c0:uint64{felem_fits1 c0 m4}
-> c1:uint64{felem_fits1 c1 m5} ->
Pure uint128
(requires m0 * m1 + m2 * m3 + m4 * m5 < 8192)
(ensures fun res ->
felem_wide_fits1 res (m0 * m1 + m2 * m3 + m4 * m5) /\
v res == v a0 * v a1 + v b0 * v b1 + v c0 * v c1) | false | false | Hacl.Spec.Curve25519.Field51.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mul64_wide_add3:
#m0:scale64 -> #m1:scale64 -> #m2:scale64
-> #m3:scale64 -> #m4:scale64 -> #m5:scale64
-> a0:uint64{felem_fits1 a0 m0}
-> a1:uint64{felem_fits1 a1 m1}
-> b0:uint64{felem_fits1 b0 m2}
-> b1:uint64{felem_fits1 b1 m3}
-> c0:uint64{felem_fits1 c0 m4}
-> c1:uint64{felem_fits1 c1 m5} ->
Pure uint128
(requires m0 * m1 + m2 * m3 + m4 * m5 < 8192)
(ensures fun res ->
felem_wide_fits1 res (m0 * m1 + m2 * m3 + m4 * m5) /\
v res == v a0 * v a1 + v b0 * v b1 + v c0 * v c1) | [] | Hacl.Spec.Curve25519.Field51.mul64_wide_add3 | {
"file_name": "code/curve25519/Hacl.Spec.Curve25519.Field51.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
a0: Lib.IntTypes.uint64{Hacl.Spec.Curve25519.Field51.Definition.felem_fits1 a0 m0} ->
a1: Lib.IntTypes.uint64{Hacl.Spec.Curve25519.Field51.Definition.felem_fits1 a1 m1} ->
b0: Lib.IntTypes.uint64{Hacl.Spec.Curve25519.Field51.Definition.felem_fits1 b0 m2} ->
b1: Lib.IntTypes.uint64{Hacl.Spec.Curve25519.Field51.Definition.felem_fits1 b1 m3} ->
c0: Lib.IntTypes.uint64{Hacl.Spec.Curve25519.Field51.Definition.felem_fits1 c0 m4} ->
c1: Lib.IntTypes.uint64{Hacl.Spec.Curve25519.Field51.Definition.felem_fits1 c1 m5}
-> Prims.Pure Lib.IntTypes.uint128 | {
"end_col": 58,
"end_line": 380,
"start_col": 2,
"start_line": 378
} |
Prims.Pure | val carry_wide5:
inp:felem_wide5{felem_wide_fits5 inp (6579, 4797, 3340, 1881, 423)}
-> Pure felem5
(requires True)
(ensures fun out ->
mul_inv_t out /\ feval out == feval_wide inp) | [
{
"abbrev": false,
"full_module": "Hacl.Spec.Curve25519.Field51.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Curve25519.Field51.Definition",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let carry_wide5 (i0, i1, i2, i3, i4) =
assert_norm (6579 < pow2 13);
assert_norm (pow2 13 < max51);
let tmp0, c0 = carry51_wide #6579 i0 (u64 0) in
let tmp1, c1 = carry51_wide #4797 i1 c0 in
let tmp2, c2 = carry51_wide #3340 i2 c1 in
let tmp3, c3 = carry51_wide #1881 i3 c2 in
let tmp4, c4 = carry51_wide #423 i4 c3 in
lemma_carry5_simplify c0 c1 c2 c3 c4 tmp0 tmp1 tmp2 tmp3 tmp4;
let tmp0', c5 = carry51 tmp0 (c4 *! u64 19) in
[@inline_let]
let tmp1' = tmp1 +! c5 in
lemma_mul_inv (tmp0', tmp1, tmp2, tmp3, tmp4) c5;
(tmp0', tmp1', tmp2, tmp3, tmp4) | val carry_wide5:
inp:felem_wide5{felem_wide_fits5 inp (6579, 4797, 3340, 1881, 423)}
-> Pure felem5
(requires True)
(ensures fun out ->
mul_inv_t out /\ feval out == feval_wide inp)
let carry_wide5 (i0, i1, i2, i3, i4) = | false | null | false | assert_norm (6579 < pow2 13);
assert_norm (pow2 13 < max51);
let tmp0, c0 = carry51_wide #6579 i0 (u64 0) in
let tmp1, c1 = carry51_wide #4797 i1 c0 in
let tmp2, c2 = carry51_wide #3340 i2 c1 in
let tmp3, c3 = carry51_wide #1881 i3 c2 in
let tmp4, c4 = carry51_wide #423 i4 c3 in
lemma_carry5_simplify c0 c1 c2 c3 c4 tmp0 tmp1 tmp2 tmp3 tmp4;
let tmp0', c5 = carry51 tmp0 (c4 *! u64 19) in
[@@ inline_let ]let tmp1' = tmp1 +! c5 in
lemma_mul_inv (tmp0', tmp1, tmp2, tmp3, tmp4) c5;
(tmp0', tmp1', tmp2, tmp3, tmp4) | {
"checked_file": "Hacl.Spec.Curve25519.Field51.fst.checked",
"dependencies": [
"Spec.Curve25519.fst.checked",
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Curve25519.Field51.Lemmas.fst.checked",
"Hacl.Spec.Curve25519.Field51.Definition.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Curve25519.Field51.fst"
} | [] | [
"Hacl.Spec.Curve25519.Field51.Definition.felem_wide5",
"Hacl.Spec.Curve25519.Field51.Definition.felem_wide_fits5",
"FStar.Pervasives.Native.Mktuple5",
"Prims.nat",
"Lib.IntTypes.uint128",
"Lib.IntTypes.uint64",
"Prims.unit",
"Hacl.Spec.Curve25519.Field51.lemma_mul_inv",
"Lib.IntTypes.int_t",
"Lib.IntTypes.U64",
"Lib.IntTypes.SEC",
"Lib.IntTypes.op_Plus_Bang",
"Hacl.Spec.Curve25519.Field51.Definition.felem5",
"FStar.Pervasives.Native.tuple2",
"Hacl.Spec.Curve25519.Field51.carry51",
"Lib.IntTypes.op_Star_Bang",
"Lib.IntTypes.u64",
"Hacl.Spec.Curve25519.Field51.Lemmas.lemma_carry5_simplify",
"Hacl.Spec.Curve25519.Field51.carry51_wide",
"FStar.Pervasives.assert_norm",
"Prims.b2t",
"Prims.op_LessThan",
"Prims.pow2",
"Hacl.Spec.Curve25519.Field51.Definition.max51"
] | [] | module Hacl.Spec.Curve25519.Field51
open Lib.Sequence
open Lib.IntTypes
open FStar.Mul
open Spec.Curve25519
open Hacl.Spec.Curve25519.Field51.Definition
open Hacl.Spec.Curve25519.Field51.Lemmas
#reset-options "--z3rlimit 50 --fuel 0 --ifuel 0 --using_facts_from '* -FStar.Seq'"
inline_for_extraction noextract
val fadd5:
f1:felem5{felem_fits5 f1 (1, 2, 1, 1, 1)}
-> f2:felem5{felem_fits5 f2 (1, 2, 1, 1, 1)}
-> out:felem5{felem_fits5 out (2, 4, 2, 2, 2) /\
feval out == fadd (feval f1) (feval f2)}
let fadd5 (f10, f11, f12, f13, f14) (f20, f21, f22, f23, f24) =
let o0 = f10 +! f20 in
let o1 = f11 +! f21 in
let o2 = f12 +! f22 in
let o3 = f13 +! f23 in
let o4 = f14 +! f24 in
let out = (o0, o1, o2, o3, o4) in
FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (f10, f11, f12, f13, f14)) (as_nat5 (f20, f21, f22, f23, f24)) prime;
FStar.Math.Lemmas.lemma_mod_plus_distr_r
((as_nat5 (f10, f11, f12, f13, f14)) % prime) (as_nat5 (f20, f21, f22, f23, f24)) prime;
out
inline_for_extraction noextract
val fadd_zero:
f1:felem5{felem_fits5 f1 (1, 2, 1, 1, 1)}
-> out:felem5{felem_fits5 out (9, 10, 9, 9, 9) /\
feval out == feval f1}
let fadd_zero (f10, f11, f12, f13, f14) =
let o0 = f10 +! u64 0x3fffffffffff68 in
let o1 = f11 +! u64 0x3ffffffffffff8 in
let o2 = f12 +! u64 0x3ffffffffffff8 in
let o3 = f13 +! u64 0x3ffffffffffff8 in
let o4 = f14 +! u64 0x3ffffffffffff8 in
lemma_add_zero (f10, f11, f12, f13, f14);
(o0, o1, o2, o3, o4)
inline_for_extraction noextract
val fsub5:
f1:felem5{felem_fits5 f1 (1, 2, 1, 1, 1)}
-> f2:felem5{felem_fits5 f2 (1, 2, 1, 1, 1)}
-> out:felem5{felem_fits5 out (9, 10, 9, 9, 9) /\
feval out == fsub (feval f1) (feval f2)}
let fsub5 (f10, f11, f12, f13, f14) (f20, f21, f22, f23, f24) =
//assert_norm (0x3fffffffffff68 == pow2 54 - 152);
//assert_norm (0x3ffffffffffff8 == pow2 54 - 8);
let (t0, t1, t2, t3, t4) = fadd_zero (f10, f11, f12, f13, f14) in
let o0 = t0 -! f20 in
let o1 = t1 -! f21 in
let o2 = t2 -! f22 in
let o3 = t3 -! f23 in
let o4 = t4 -! f24 in
let out = (o0, o1, o2, o3, o4) in
FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (t0, t1, t2, t3, t4)) (- as_nat5 (f20, f21, f22, f23, f24)) prime;
lemma_mod_sub_distr ((as_nat5 (t0, t1, t2, t3, t4)) % prime) (as_nat5 (f20, f21, f22, f23, f24)) prime;
out
val lemma_fsub:
f1:felem5{felem_fits5 f1 (1, 2, 1, 1, 1)}
-> f2:felem5{felem_fits5 f2 (1, 2, 1, 1, 1)}
-> Lemma (let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o0 = f10 +! u64 0x3fffffffffff68 -! f20 in
let o1 = f11 +! u64 0x3ffffffffffff8 -! f21 in
let o2 = f12 +! u64 0x3ffffffffffff8 -! f22 in
let o3 = f13 +! u64 0x3ffffffffffff8 -! f23 in
let o4 = f14 +! u64 0x3ffffffffffff8 -! f24 in
let out = (o0, o1, o2, o3, o4) in
out == fsub5 (f10, f11, f12, f13, f14) (f20, f21, f22, f23, f24))
let lemma_fsub f1 f2 = ()
inline_for_extraction noextract
val mul_wide64:
#m1:scale64
-> #m2:scale64
-> x:uint64{felem_fits1 x m1}
-> y:uint64{felem_fits1 y m2 /\ m1 * m2 <= 67108864}
-> z:uint128{uint_v z == uint_v x * uint_v y /\ felem_wide_fits1 z (m1 * m2)}
#push-options "--z3rlimit 5"
let mul_wide64 #m1 #m2 x y =
let open FStar.Math.Lemmas in
lemma_mult_le_left (v x) (v y) (m2 * max51); //v x * v y <= v x * (m2 * max51)
lemma_mult_le_right (m2 * max51) (v x) (m1 * max51); // v x * (m2 * max51) <= (m1 * max51) * (m2 * max51)
paren_mul_right (m1 * max51) m2 max51; //(m1 * max51) * (m2 * max51) = ((m1 * max51) * m2) * max51
paren_mul_right m1 max51 m2; //(m1 * max51) * m2 = m1 * (max51 * m2)
swap_mul max51 m2; //max51 * m2 = m2 * max51
paren_mul_right m1 m2 max51; //m1 * (m2 * max51) = (m1 * m2) * max51
paren_mul_right (m1 * m2) max51 max51; //((m1 * m2) * max51) * max51 = (m1 * m2) * (max51 * max51)
assert (v x * v y <= m1 * max51 * m2 * max51);
assert (v x * v y <= m1 * m2 * max51 * max51);
mul64_wide x y
#pop-options
inline_for_extraction noextract
val smul_felem5:
#m1:scale64
-> #m2:scale64_5
-> u1:uint64{felem_fits1 u1 m1}
-> f2:felem5{felem_fits5 f2 m2 /\ m1 *^ m2 <=* s128x5 67108864}
-> out:felem_wide5{felem_wide_fits5 out (m1 *^ m2) /\
wide_as_nat5 out == uint_v u1 * as_nat5 f2}
let smul_felem5 #m1 #m2 u1 (f20, f21, f22, f23, f24) =
let (m20, m21, m22, m23, m24) = m2 in
[@inline_let]
let o0 = mul_wide64 #m1 #m20 u1 f20 in
[@inline_let]
let o1 = mul_wide64 #m1 #m21 u1 f21 in
[@inline_let]
let o2 = mul_wide64 #m1 #m22 u1 f22 in
[@inline_let]
let o3 = mul_wide64 #m1 #m23 u1 f23 in
[@inline_let]
let o4 = mul_wide64 #m1 #m24 u1 f24 in
[@inline_let]
let out = (o0, o1, o2, o3, o4) in
lemma_smul_felem5 u1 (f20, f21, f22, f23, f24);
out
inline_for_extraction noextract
val mul_add_wide128:
#m1:scale64
-> #m2:scale64
-> #m3:scale128
-> x:uint64{felem_fits1 x m1}
-> y:uint64{felem_fits1 y m2}
-> z:uint128{felem_wide_fits1 z m3 /\ m3 + m1 * m2 <= 67108864}
-> r:uint128{uint_v r == uint_v z + uint_v x * uint_v y /\ felem_wide_fits1 r (m3 + m1 * m2)}
let mul_add_wide128 #m1 #m2 #m3 x y z =
z +! mul_wide64 #m1 #m2 x y
#push-options "--z3rlimit 100"
inline_for_extraction noextract
val smul_add_felem5:
#m1:scale64
-> #m2:scale64_5
-> #m3:scale128_5
-> u1:uint64{felem_fits1 u1 m1}
-> f2:felem5{felem_fits5 f2 m2}
-> acc1:felem_wide5{felem_wide_fits5 acc1 m3 /\ m3 +* m1 *^ m2 <=* s128x5 67108864}
-> acc2:felem_wide5{
wide_as_nat5 acc2 == wide_as_nat5 acc1 + uint_v u1 * as_nat5 f2 /\
felem_wide_fits5 acc2 (m3 +* m1 *^ m2)}
let smul_add_felem5 #m1 #m2 #m3 u1 (f20, f21, f22, f23, f24) (o0, o1, o2, o3, o4) =
let (m20, m21, m22, m23, m24) = m2 in
let (m30, m31, m32, m33, m34) = m3 in
[@inline_let]
let o0' = mul_add_wide128 #m1 #m20 #m30 u1 f20 o0 in
[@inline_let]
let o1' = mul_add_wide128 #m1 #m21 #m31 u1 f21 o1 in
[@inline_let]
let o2' = mul_add_wide128 #m1 #m22 #m32 u1 f22 o2 in
[@inline_let]
let o3' = mul_add_wide128 #m1 #m23 #m33 u1 f23 o3 in
[@inline_let]
let o4' = mul_add_wide128 #m1 #m24 #m34 u1 f24 o4 in
[@inline_let]
let out = (o0', o1', o2', o3', o4') in
lemma_smul_add_felem5 u1 (f20, f21, f22, f23, f24) (o0, o1, o2, o3, o4);
out
#pop-options
inline_for_extraction noextract
val precomp_r19:
f2:felem5{felem_fits5 f2 (9, 10, 9, 9, 9)}
-> r19:felem5{felem_fits5 r19 (171, 190, 171, 171, 171)}
let precomp_r19 (f20, f21, f22, f23, f24) =
[@inline_let]
let r190 = f20 *! u64 19 in
[@inline_let]
let r191 = f21 *! u64 19 in
[@inline_let]
let r192 = f22 *! u64 19 in
[@inline_let]
let r193 = f23 *! u64 19 in
[@inline_let]
let r194 = f24 *! u64 19 in
(r190, r191, r192, r193, r194)
inline_for_extraction noextract
val mul_felem5:
f1:felem5{felem_fits5 f1 (9, 10, 9, 9, 9)}
-> r:felem5{felem_fits5 r (9, 10, 9, 9, 9)}
-> r19:felem5{felem_fits5 r19 (171, 190, 171, 171, 171) /\ r19 == precomp_r19 r}
-> out:felem_wide5{felem_wide_fits5 out (6579, 4797, 3340, 1881, 423) /\
feval_wide out == fmul (feval f1) (feval r)}
let mul_felem5 (f10, f11, f12, f13, f14) (r0, r1, r2, r3, r4) (r190, r191, r192, r193, r194) =
let (o0, o1, o2, o3, o4) = smul_felem5 #9 #(9, 10, 9, 9, 9) f10 (r0, r1, r2, r3, r4) in
let (o0, o1, o2, o3, o4) = smul_add_felem5 #10 #(171, 9, 10, 9, 9) #(81, 90, 81, 81, 81)
f11 (r194, r0, r1, r2, r3) (o0, o1, o2, o3, o4) in
let (o0, o1, o2, o3, o4) = smul_add_felem5 #9 #(171, 171, 9, 10, 9) #(1791, 180, 181, 171, 171)
f12 (r193, r194, r0, r1, r2) (o0, o1, o2, o3, o4) in
let (o0, o1, o2, o3, o4) = smul_add_felem5 #9 #(171, 171, 171, 9, 10) #(3330, 1719, 262, 261, 252)
f13 (r192, r193, r194, r0, r1) (o0, o1, o2, o3, o4) in
let (o0, o1, o2, o3, o4) = smul_add_felem5 #9 #(190, 171, 171, 171, 9) #(4869, 3258, 1801, 342, 342)
f14 (r191, r192, r193, r194, r0) (o0, o1, o2, o3, o4) in
lemma_fmul5 (f10, f11, f12, f13, f14) (r0, r1, r2, r3, r4);
(o0, o1, o2, o3, o4)
inline_for_extraction noextract
val carry51:
l:uint64
-> cin:uint64
-> Pure (uint64 & uint64)
(requires felem_fits1 l 2 /\ felem_fits1 cin 8190)
(ensures fun (l0, l1) ->
v l + v cin == v l1 * pow2 51 + v l0 /\
felem_fits1 l0 1 /\ uint_v l1 < pow2 13)
let carry51 l cin =
let l' = l +! cin in
lemma_carry51 l cin;
(l' &. mask51, l' >>. 51ul)
inline_for_extraction noextract
val carry51_wide:
#m:scale64{m < 8192}
-> l:uint128{felem_wide_fits1 l m}
-> cin:uint64
-> Pure (uint64 & uint64)
(requires True)
(ensures fun (l0, l1) ->
v l + v cin == v l1 * pow2 51 + v l0 /\
felem_fits1 l0 1 /\ felem_fits1 l1 (m + 1))
let carry51_wide #m l cin =
let l' = l +! to_u128 cin in
lemma_carry51_wide #m l cin;
((to_u64 l') &. mask51, to_u64 (l' >>. 51ul))
let mul_inv_t (f:felem5) =
let (o0, o1, o2, o3, o4) = f in
if v o1 >= pow2 51 then
felem_fits5 f (1, 2, 1, 1, 1) /\ v o1 % pow2 51 < 8192
else felem_fits5 f (1, 1, 1, 1, 1)
#push-options "--ifuel 1"
val lemma_mul_inv:
f:felem5{felem_fits5 f (1, 1, 1, 1, 1)}
-> cin:uint64{v cin < pow2 51}
-> Lemma
(let (i0, i1, i2, i3, i4) = f in
assert_norm (pow51 = pow2 51);
let i1' = i1 +! cin in
let out = (i0, i1', i2, i3, i4) in
if (v i1 + v cin) / pow2 51 > 0 then
felem_fits5 out (1, 2, 1, 1, 1) /\
(v i1 + v cin) % pow2 51 < v cin
else felem_fits5 out (1, 1, 1, 1, 1))
let lemma_mul_inv f cin =
assert_norm (pow51 = pow2 51)
#pop-options
#push-options "--z3rlimit 100"
inline_for_extraction noextract
val carry_wide5:
inp:felem_wide5{felem_wide_fits5 inp (6579, 4797, 3340, 1881, 423)}
-> Pure felem5
(requires True)
(ensures fun out ->
mul_inv_t out /\ feval out == feval_wide inp) | false | false | Hacl.Spec.Curve25519.Field51.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val carry_wide5:
inp:felem_wide5{felem_wide_fits5 inp (6579, 4797, 3340, 1881, 423)}
-> Pure felem5
(requires True)
(ensures fun out ->
mul_inv_t out /\ feval out == feval_wide inp) | [] | Hacl.Spec.Curve25519.Field51.carry_wide5 | {
"file_name": "code/curve25519/Hacl.Spec.Curve25519.Field51.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
inp:
Hacl.Spec.Curve25519.Field51.Definition.felem_wide5
{Hacl.Spec.Curve25519.Field51.Definition.felem_wide_fits5 inp (6579, 4797, 3340, 1881, 423)}
-> Prims.Pure Hacl.Spec.Curve25519.Field51.Definition.felem5 | {
"end_col": 34,
"end_line": 282,
"start_col": 2,
"start_line": 269
} |
Prims.Tot | val carry_felem5_full:
inp:felem5{mul_inv_t inp}
-> out:felem5{feval out == feval inp /\ felem_fits5 out (1, 1, 1, 1, 1)} | [
{
"abbrev": false,
"full_module": "Hacl.Spec.Curve25519.Field51.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Curve25519.Field51.Definition",
"short_module": null
},
{
"abbrev": false,
"full_module": "Spec.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let carry_felem5_full (f0, f1, f2, f3, f4) =
assert_norm (pow51 = pow2 51);
let tmp0, c0 = carry51 f0 (u64 0) in
let tmp1, c1 = carry51 f1 c0 in
assert (if v f1 < pow2 51 then v tmp1 < pow2 51 else v tmp1 < 8192);
let tmp2, c2 = carry51 f2 c1 in
let tmp3, c3 = carry51 f3 c2 in
let tmp4, c4 = carry51 f4 c3 in
lemma_carry5_simplify c0 c1 c2 c3 c4 tmp0 tmp1 tmp2 tmp3 tmp4;
[@inline_let]
let tmp0', c5 = carry51 tmp0 (c4 *! u64 19) in
[@inline_let]
let tmp1' = tmp1 +! c5 in
(tmp0', tmp1', tmp2, tmp3, tmp4) | val carry_felem5_full:
inp:felem5{mul_inv_t inp}
-> out:felem5{feval out == feval inp /\ felem_fits5 out (1, 1, 1, 1, 1)}
let carry_felem5_full (f0, f1, f2, f3, f4) = | false | null | false | assert_norm (pow51 = pow2 51);
let tmp0, c0 = carry51 f0 (u64 0) in
let tmp1, c1 = carry51 f1 c0 in
assert (if v f1 < pow2 51 then v tmp1 < pow2 51 else v tmp1 < 8192);
let tmp2, c2 = carry51 f2 c1 in
let tmp3, c3 = carry51 f3 c2 in
let tmp4, c4 = carry51 f4 c3 in
lemma_carry5_simplify c0 c1 c2 c3 c4 tmp0 tmp1 tmp2 tmp3 tmp4;
[@@ inline_let ]let tmp0', c5 = carry51 tmp0 (c4 *! u64 19) in
[@@ inline_let ]let tmp1' = tmp1 +! c5 in
(tmp0', tmp1', tmp2, tmp3, tmp4) | {
"checked_file": "Hacl.Spec.Curve25519.Field51.fst.checked",
"dependencies": [
"Spec.Curve25519.fst.checked",
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Curve25519.Field51.Lemmas.fst.checked",
"Hacl.Spec.Curve25519.Field51.Definition.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Curve25519.Field51.fst"
} | [
"total"
] | [
"Hacl.Spec.Curve25519.Field51.Definition.felem5",
"Hacl.Spec.Curve25519.Field51.mul_inv_t",
"Lib.IntTypes.uint64",
"FStar.Pervasives.Native.Mktuple5",
"Lib.IntTypes.int_t",
"Lib.IntTypes.U64",
"Lib.IntTypes.SEC",
"Lib.IntTypes.op_Plus_Bang",
"Prims.l_and",
"Prims.eq2",
"Spec.Curve25519.elem",
"Hacl.Spec.Curve25519.Field51.Definition.feval",
"Hacl.Spec.Curve25519.Field51.Definition.felem_fits5",
"Prims.nat",
"FStar.Pervasives.Native.tuple2",
"Hacl.Spec.Curve25519.Field51.carry51",
"Lib.IntTypes.op_Star_Bang",
"Lib.IntTypes.u64",
"Prims.unit",
"Hacl.Spec.Curve25519.Field51.Lemmas.lemma_carry5_simplify",
"Prims._assert",
"Prims.op_LessThan",
"Lib.IntTypes.v",
"Prims.pow2",
"Prims.b2t",
"Prims.bool",
"FStar.Pervasives.assert_norm",
"Prims.op_Equality",
"Prims.pos",
"Hacl.Spec.Curve25519.Field51.Definition.pow51"
] | [] | module Hacl.Spec.Curve25519.Field51
open Lib.Sequence
open Lib.IntTypes
open FStar.Mul
open Spec.Curve25519
open Hacl.Spec.Curve25519.Field51.Definition
open Hacl.Spec.Curve25519.Field51.Lemmas
#reset-options "--z3rlimit 50 --fuel 0 --ifuel 0 --using_facts_from '* -FStar.Seq'"
inline_for_extraction noextract
val fadd5:
f1:felem5{felem_fits5 f1 (1, 2, 1, 1, 1)}
-> f2:felem5{felem_fits5 f2 (1, 2, 1, 1, 1)}
-> out:felem5{felem_fits5 out (2, 4, 2, 2, 2) /\
feval out == fadd (feval f1) (feval f2)}
let fadd5 (f10, f11, f12, f13, f14) (f20, f21, f22, f23, f24) =
let o0 = f10 +! f20 in
let o1 = f11 +! f21 in
let o2 = f12 +! f22 in
let o3 = f13 +! f23 in
let o4 = f14 +! f24 in
let out = (o0, o1, o2, o3, o4) in
FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (f10, f11, f12, f13, f14)) (as_nat5 (f20, f21, f22, f23, f24)) prime;
FStar.Math.Lemmas.lemma_mod_plus_distr_r
((as_nat5 (f10, f11, f12, f13, f14)) % prime) (as_nat5 (f20, f21, f22, f23, f24)) prime;
out
inline_for_extraction noextract
val fadd_zero:
f1:felem5{felem_fits5 f1 (1, 2, 1, 1, 1)}
-> out:felem5{felem_fits5 out (9, 10, 9, 9, 9) /\
feval out == feval f1}
let fadd_zero (f10, f11, f12, f13, f14) =
let o0 = f10 +! u64 0x3fffffffffff68 in
let o1 = f11 +! u64 0x3ffffffffffff8 in
let o2 = f12 +! u64 0x3ffffffffffff8 in
let o3 = f13 +! u64 0x3ffffffffffff8 in
let o4 = f14 +! u64 0x3ffffffffffff8 in
lemma_add_zero (f10, f11, f12, f13, f14);
(o0, o1, o2, o3, o4)
inline_for_extraction noextract
val fsub5:
f1:felem5{felem_fits5 f1 (1, 2, 1, 1, 1)}
-> f2:felem5{felem_fits5 f2 (1, 2, 1, 1, 1)}
-> out:felem5{felem_fits5 out (9, 10, 9, 9, 9) /\
feval out == fsub (feval f1) (feval f2)}
let fsub5 (f10, f11, f12, f13, f14) (f20, f21, f22, f23, f24) =
//assert_norm (0x3fffffffffff68 == pow2 54 - 152);
//assert_norm (0x3ffffffffffff8 == pow2 54 - 8);
let (t0, t1, t2, t3, t4) = fadd_zero (f10, f11, f12, f13, f14) in
let o0 = t0 -! f20 in
let o1 = t1 -! f21 in
let o2 = t2 -! f22 in
let o3 = t3 -! f23 in
let o4 = t4 -! f24 in
let out = (o0, o1, o2, o3, o4) in
FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (t0, t1, t2, t3, t4)) (- as_nat5 (f20, f21, f22, f23, f24)) prime;
lemma_mod_sub_distr ((as_nat5 (t0, t1, t2, t3, t4)) % prime) (as_nat5 (f20, f21, f22, f23, f24)) prime;
out
val lemma_fsub:
f1:felem5{felem_fits5 f1 (1, 2, 1, 1, 1)}
-> f2:felem5{felem_fits5 f2 (1, 2, 1, 1, 1)}
-> Lemma (let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o0 = f10 +! u64 0x3fffffffffff68 -! f20 in
let o1 = f11 +! u64 0x3ffffffffffff8 -! f21 in
let o2 = f12 +! u64 0x3ffffffffffff8 -! f22 in
let o3 = f13 +! u64 0x3ffffffffffff8 -! f23 in
let o4 = f14 +! u64 0x3ffffffffffff8 -! f24 in
let out = (o0, o1, o2, o3, o4) in
out == fsub5 (f10, f11, f12, f13, f14) (f20, f21, f22, f23, f24))
let lemma_fsub f1 f2 = ()
inline_for_extraction noextract
val mul_wide64:
#m1:scale64
-> #m2:scale64
-> x:uint64{felem_fits1 x m1}
-> y:uint64{felem_fits1 y m2 /\ m1 * m2 <= 67108864}
-> z:uint128{uint_v z == uint_v x * uint_v y /\ felem_wide_fits1 z (m1 * m2)}
#push-options "--z3rlimit 5"
let mul_wide64 #m1 #m2 x y =
let open FStar.Math.Lemmas in
lemma_mult_le_left (v x) (v y) (m2 * max51); //v x * v y <= v x * (m2 * max51)
lemma_mult_le_right (m2 * max51) (v x) (m1 * max51); // v x * (m2 * max51) <= (m1 * max51) * (m2 * max51)
paren_mul_right (m1 * max51) m2 max51; //(m1 * max51) * (m2 * max51) = ((m1 * max51) * m2) * max51
paren_mul_right m1 max51 m2; //(m1 * max51) * m2 = m1 * (max51 * m2)
swap_mul max51 m2; //max51 * m2 = m2 * max51
paren_mul_right m1 m2 max51; //m1 * (m2 * max51) = (m1 * m2) * max51
paren_mul_right (m1 * m2) max51 max51; //((m1 * m2) * max51) * max51 = (m1 * m2) * (max51 * max51)
assert (v x * v y <= m1 * max51 * m2 * max51);
assert (v x * v y <= m1 * m2 * max51 * max51);
mul64_wide x y
#pop-options
inline_for_extraction noextract
val smul_felem5:
#m1:scale64
-> #m2:scale64_5
-> u1:uint64{felem_fits1 u1 m1}
-> f2:felem5{felem_fits5 f2 m2 /\ m1 *^ m2 <=* s128x5 67108864}
-> out:felem_wide5{felem_wide_fits5 out (m1 *^ m2) /\
wide_as_nat5 out == uint_v u1 * as_nat5 f2}
let smul_felem5 #m1 #m2 u1 (f20, f21, f22, f23, f24) =
let (m20, m21, m22, m23, m24) = m2 in
[@inline_let]
let o0 = mul_wide64 #m1 #m20 u1 f20 in
[@inline_let]
let o1 = mul_wide64 #m1 #m21 u1 f21 in
[@inline_let]
let o2 = mul_wide64 #m1 #m22 u1 f22 in
[@inline_let]
let o3 = mul_wide64 #m1 #m23 u1 f23 in
[@inline_let]
let o4 = mul_wide64 #m1 #m24 u1 f24 in
[@inline_let]
let out = (o0, o1, o2, o3, o4) in
lemma_smul_felem5 u1 (f20, f21, f22, f23, f24);
out
inline_for_extraction noextract
val mul_add_wide128:
#m1:scale64
-> #m2:scale64
-> #m3:scale128
-> x:uint64{felem_fits1 x m1}
-> y:uint64{felem_fits1 y m2}
-> z:uint128{felem_wide_fits1 z m3 /\ m3 + m1 * m2 <= 67108864}
-> r:uint128{uint_v r == uint_v z + uint_v x * uint_v y /\ felem_wide_fits1 r (m3 + m1 * m2)}
let mul_add_wide128 #m1 #m2 #m3 x y z =
z +! mul_wide64 #m1 #m2 x y
#push-options "--z3rlimit 100"
inline_for_extraction noextract
val smul_add_felem5:
#m1:scale64
-> #m2:scale64_5
-> #m3:scale128_5
-> u1:uint64{felem_fits1 u1 m1}
-> f2:felem5{felem_fits5 f2 m2}
-> acc1:felem_wide5{felem_wide_fits5 acc1 m3 /\ m3 +* m1 *^ m2 <=* s128x5 67108864}
-> acc2:felem_wide5{
wide_as_nat5 acc2 == wide_as_nat5 acc1 + uint_v u1 * as_nat5 f2 /\
felem_wide_fits5 acc2 (m3 +* m1 *^ m2)}
let smul_add_felem5 #m1 #m2 #m3 u1 (f20, f21, f22, f23, f24) (o0, o1, o2, o3, o4) =
let (m20, m21, m22, m23, m24) = m2 in
let (m30, m31, m32, m33, m34) = m3 in
[@inline_let]
let o0' = mul_add_wide128 #m1 #m20 #m30 u1 f20 o0 in
[@inline_let]
let o1' = mul_add_wide128 #m1 #m21 #m31 u1 f21 o1 in
[@inline_let]
let o2' = mul_add_wide128 #m1 #m22 #m32 u1 f22 o2 in
[@inline_let]
let o3' = mul_add_wide128 #m1 #m23 #m33 u1 f23 o3 in
[@inline_let]
let o4' = mul_add_wide128 #m1 #m24 #m34 u1 f24 o4 in
[@inline_let]
let out = (o0', o1', o2', o3', o4') in
lemma_smul_add_felem5 u1 (f20, f21, f22, f23, f24) (o0, o1, o2, o3, o4);
out
#pop-options
inline_for_extraction noextract
val precomp_r19:
f2:felem5{felem_fits5 f2 (9, 10, 9, 9, 9)}
-> r19:felem5{felem_fits5 r19 (171, 190, 171, 171, 171)}
let precomp_r19 (f20, f21, f22, f23, f24) =
[@inline_let]
let r190 = f20 *! u64 19 in
[@inline_let]
let r191 = f21 *! u64 19 in
[@inline_let]
let r192 = f22 *! u64 19 in
[@inline_let]
let r193 = f23 *! u64 19 in
[@inline_let]
let r194 = f24 *! u64 19 in
(r190, r191, r192, r193, r194)
inline_for_extraction noextract
val mul_felem5:
f1:felem5{felem_fits5 f1 (9, 10, 9, 9, 9)}
-> r:felem5{felem_fits5 r (9, 10, 9, 9, 9)}
-> r19:felem5{felem_fits5 r19 (171, 190, 171, 171, 171) /\ r19 == precomp_r19 r}
-> out:felem_wide5{felem_wide_fits5 out (6579, 4797, 3340, 1881, 423) /\
feval_wide out == fmul (feval f1) (feval r)}
let mul_felem5 (f10, f11, f12, f13, f14) (r0, r1, r2, r3, r4) (r190, r191, r192, r193, r194) =
let (o0, o1, o2, o3, o4) = smul_felem5 #9 #(9, 10, 9, 9, 9) f10 (r0, r1, r2, r3, r4) in
let (o0, o1, o2, o3, o4) = smul_add_felem5 #10 #(171, 9, 10, 9, 9) #(81, 90, 81, 81, 81)
f11 (r194, r0, r1, r2, r3) (o0, o1, o2, o3, o4) in
let (o0, o1, o2, o3, o4) = smul_add_felem5 #9 #(171, 171, 9, 10, 9) #(1791, 180, 181, 171, 171)
f12 (r193, r194, r0, r1, r2) (o0, o1, o2, o3, o4) in
let (o0, o1, o2, o3, o4) = smul_add_felem5 #9 #(171, 171, 171, 9, 10) #(3330, 1719, 262, 261, 252)
f13 (r192, r193, r194, r0, r1) (o0, o1, o2, o3, o4) in
let (o0, o1, o2, o3, o4) = smul_add_felem5 #9 #(190, 171, 171, 171, 9) #(4869, 3258, 1801, 342, 342)
f14 (r191, r192, r193, r194, r0) (o0, o1, o2, o3, o4) in
lemma_fmul5 (f10, f11, f12, f13, f14) (r0, r1, r2, r3, r4);
(o0, o1, o2, o3, o4)
inline_for_extraction noextract
val carry51:
l:uint64
-> cin:uint64
-> Pure (uint64 & uint64)
(requires felem_fits1 l 2 /\ felem_fits1 cin 8190)
(ensures fun (l0, l1) ->
v l + v cin == v l1 * pow2 51 + v l0 /\
felem_fits1 l0 1 /\ uint_v l1 < pow2 13)
let carry51 l cin =
let l' = l +! cin in
lemma_carry51 l cin;
(l' &. mask51, l' >>. 51ul)
inline_for_extraction noextract
val carry51_wide:
#m:scale64{m < 8192}
-> l:uint128{felem_wide_fits1 l m}
-> cin:uint64
-> Pure (uint64 & uint64)
(requires True)
(ensures fun (l0, l1) ->
v l + v cin == v l1 * pow2 51 + v l0 /\
felem_fits1 l0 1 /\ felem_fits1 l1 (m + 1))
let carry51_wide #m l cin =
let l' = l +! to_u128 cin in
lemma_carry51_wide #m l cin;
((to_u64 l') &. mask51, to_u64 (l' >>. 51ul))
let mul_inv_t (f:felem5) =
let (o0, o1, o2, o3, o4) = f in
if v o1 >= pow2 51 then
felem_fits5 f (1, 2, 1, 1, 1) /\ v o1 % pow2 51 < 8192
else felem_fits5 f (1, 1, 1, 1, 1)
#push-options "--ifuel 1"
val lemma_mul_inv:
f:felem5{felem_fits5 f (1, 1, 1, 1, 1)}
-> cin:uint64{v cin < pow2 51}
-> Lemma
(let (i0, i1, i2, i3, i4) = f in
assert_norm (pow51 = pow2 51);
let i1' = i1 +! cin in
let out = (i0, i1', i2, i3, i4) in
if (v i1 + v cin) / pow2 51 > 0 then
felem_fits5 out (1, 2, 1, 1, 1) /\
(v i1 + v cin) % pow2 51 < v cin
else felem_fits5 out (1, 1, 1, 1, 1))
let lemma_mul_inv f cin =
assert_norm (pow51 = pow2 51)
#pop-options
#push-options "--z3rlimit 100"
inline_for_extraction noextract
val carry_wide5:
inp:felem_wide5{felem_wide_fits5 inp (6579, 4797, 3340, 1881, 423)}
-> Pure felem5
(requires True)
(ensures fun out ->
mul_inv_t out /\ feval out == feval_wide inp)
let carry_wide5 (i0, i1, i2, i3, i4) =
assert_norm (6579 < pow2 13);
assert_norm (pow2 13 < max51);
let tmp0, c0 = carry51_wide #6579 i0 (u64 0) in
let tmp1, c1 = carry51_wide #4797 i1 c0 in
let tmp2, c2 = carry51_wide #3340 i2 c1 in
let tmp3, c3 = carry51_wide #1881 i3 c2 in
let tmp4, c4 = carry51_wide #423 i4 c3 in
lemma_carry5_simplify c0 c1 c2 c3 c4 tmp0 tmp1 tmp2 tmp3 tmp4;
let tmp0', c5 = carry51 tmp0 (c4 *! u64 19) in
[@inline_let]
let tmp1' = tmp1 +! c5 in
lemma_mul_inv (tmp0', tmp1, tmp2, tmp3, tmp4) c5;
(tmp0', tmp1', tmp2, tmp3, tmp4)
#pop-options
inline_for_extraction noextract
val fmul5:
f1:felem5{felem_fits5 f1 (9, 10, 9, 9, 9)}
-> f2:felem5{felem_fits5 f2 (9, 10, 9, 9, 9)}
-> out:felem5{mul_inv_t out /\
feval out == fmul (feval f1) (feval f2)}
let fmul5 (f10, f11, f12, f13, f14) (f20, f21, f22, f23, f24) =
let (tmp0, tmp1, tmp2, tmp3, tmp4) = precomp_r19 (f20, f21, f22, f23, f24) in
let (tmp_w0, tmp_w1, tmp_w2, tmp_w3, tmp_w4) =
mul_felem5 (f10, f11, f12, f13, f14) (f20, f21, f22, f23, f24) (tmp0, tmp1, tmp2, tmp3, tmp4) in
carry_wide5 (tmp_w0, tmp_w1, tmp_w2, tmp_w3, tmp_w4)
inline_for_extraction noextract
val fmul25:
f1:felem5{felem_fits5 f1 (9, 10, 9, 9, 9)}
-> f2:felem5{felem_fits5 f2 (9, 10, 9, 9, 9)}
-> f3:felem5{felem_fits5 f3 (9, 10, 9, 9, 9)}
-> f4:felem5{felem_fits5 f4 (9, 10, 9, 9, 9)}
-> Pure (felem5 & felem5)
(requires True)
(ensures fun (out1, out2) ->
mul_inv_t out1 /\ mul_inv_t out2 /\
feval out1 == fmul (feval f1) (feval f2) /\
feval out2 == fmul (feval f3) (feval f4))
let fmul25 (f10, f11, f12, f13, f14) (f20, f21, f22, f23, f24) (f30, f31, f32, f33, f34) (f40, f41, f42, f43, f44) =
let (tmp10, tmp11, tmp12, tmp13, tmp14) = precomp_r19 (f20, f21, f22, f23, f24) in
let (tmp20, tmp21, tmp22, tmp23, tmp24) = precomp_r19 (f40, f41, f42, f43, f44) in
let (tmp_w10, tmp_w11, tmp_w12, tmp_w13, tmp_w14) =
mul_felem5 (f10, f11, f12, f13, f14) (f20, f21, f22, f23, f24) (tmp10, tmp11, tmp12, tmp13, tmp14) in
let (tmp_w20, tmp_w21, tmp_w22, tmp_w23, tmp_w24) =
mul_felem5 (f30, f31, f32, f33, f34) (f40, f41, f42, f43, f44) (tmp20, tmp21, tmp22, tmp23, tmp24) in
let (o10,o11,o12,o13,o14) = carry_wide5 (tmp_w10, tmp_w11, tmp_w12, tmp_w13, tmp_w14) in
let (o20,o21,o22,o23,o24) = carry_wide5 (tmp_w20, tmp_w21, tmp_w22, tmp_w23, tmp_w24) in
((o10,o11,o12,o13,o14), (o20,o21,o22,o23,o24))
inline_for_extraction noextract
val fmul15:
f1:felem5{felem_fits5 f1 (9, 10, 9, 9, 9)}
-> f2:uint64{felem_fits1 f2 1}
-> Pure felem5
(requires True)
(ensures fun out ->
mul_inv_t out /\ feval out == (feval f1 * v f2) % prime)
let fmul15 (f10, f11, f12, f13, f14) f2 =
let (tmp_w0, tmp_w1, tmp_w2, tmp_w3, tmp_w4) =
smul_felem5 #1 #(9, 10, 9, 9, 9) f2 (f10, f11, f12, f13, f14) in
let out = (tmp_w0, tmp_w1, tmp_w2, tmp_w3, tmp_w4) in
[@inline_let]
let res = carry_wide5 (tmp_w0, tmp_w1, tmp_w2, tmp_w3, tmp_w4) in
FStar.Math.Lemmas.lemma_mod_mul_distr_l (as_nat5 (f10, f11, f12, f13, f14)) (uint_v f2) prime;
assert (feval res == feval_wide (tmp_w0, tmp_w1, tmp_w2, tmp_w3, tmp_w4));
assert (feval res == (wide_as_nat5 (tmp_w0, tmp_w1, tmp_w2, tmp_w3, tmp_w4)) % prime);
assert (feval res == (v f2 * as_nat5 (f10, f11, f12, f13, f14)) % prime);
FStar.Math.Lemmas.swap_mul (v f2) (as_nat5 (f10, f11, f12, f13, f14));
assert (feval res == (as_nat5 (f10, f11, f12, f13, f14) * v f2) % prime);
res
// inline_for_extraction noextract
// val fsqr_felem5:
// f:felem5{felem_fits5 f (9, 10, 9, 9, 9)}
// -> out:felem_wide5{felem_wide_fits5 out (6579, 4797, 3340, 1881, 423)}
// let fsqr_felem5 (f0, f1, f2, f3, f4) =
// let (o0, o1, o2, o3, o4) = smul_felem5 #9 #(9, 20, 18, 18, 18) f0 (f0, u64 2 *! f1, u64 2 *! f2, u64 2 *! f3, u64 2 *! f4) in
// let (o0, o1, o2, o3, o4) = smul_add_felem5 #10 #(342, 0, 10, 18, 18) #(81, 180, 162, 162, 162)
// f1 (u64 38 *! f4, u64 0, f1, u64 2 *! f2, u64 2 *! f3) (o0, o1, o2, o3, o4) in
// let (o0, o1, o2, o3, o4) = smul_add_felem5 #9 #(342, 342, 0, 0, 9) #(3501, 180, 262, 342, 342)
// f2 (u64 38 *! f3, u64 38 *! f4, u64 0, u64 0, f2) (o0, o1, o2, o3, o4) in
// let (o0, o1, o2, o3, o4) = smul_add_felem5 #9 #(0, 171, 342, 0, 0) #(6579, 3258, 262, 342, 423)
// f3 (u64 0, u64 19 *. f3, u64 38 *. f4, u64 0, u64 0) (o0, o1, o2, o3, o4) in
// let (o0, o1, o2, o3, o4) = smul_add_felem5 #9 #(0, 0, 0, 171, 0) #(6579, 4797, 3340, 342, 423)
// f4 (u64 0, u64 0, u64 0, u64 19 *. f4, u64 0) (o0, o1, o2, o3, o4) in
// (o0, o1, o2, o3, o4)
inline_for_extraction noextract
val mul64_wide_add3:
#m0:scale64 -> #m1:scale64 -> #m2:scale64
-> #m3:scale64 -> #m4:scale64 -> #m5:scale64
-> a0:uint64{felem_fits1 a0 m0}
-> a1:uint64{felem_fits1 a1 m1}
-> b0:uint64{felem_fits1 b0 m2}
-> b1:uint64{felem_fits1 b1 m3}
-> c0:uint64{felem_fits1 c0 m4}
-> c1:uint64{felem_fits1 c1 m5} ->
Pure uint128
(requires m0 * m1 + m2 * m3 + m4 * m5 < 8192)
(ensures fun res ->
felem_wide_fits1 res (m0 * m1 + m2 * m3 + m4 * m5) /\
v res == v a0 * v a1 + v b0 * v b1 + v c0 * v c1)
let mul64_wide_add3 #m0 #m1 #m2 #m3 #m4 #m5 a0 a1 b0 b1 c0 c1 =
assert_norm (pow2 13 = 8192);
mul64_wide_add3_lemma #m0 #m1 #m2 #m3 #m4 #m5 a0 a1 b0 b1 c0 c1;
mul64_wide a0 a1 +! mul64_wide b0 b1 +! mul64_wide c0 c1
inline_for_extraction noextract
val fsqr_felem5:
f:felem5{felem_fits5 f (9, 10, 9, 9, 9)}
-> Pure felem_wide5
(requires True)
(ensures fun out ->
felem_wide_fits5 out (6579, 4797, 3340, 1881, 423) /\
feval_wide out == fmul (feval f) (feval f))
let fsqr_felem5 (f0, f1, f2, f3, f4) =
assert_norm (pow2 13 = 8192);
let d0 = u64 2 *! f0 in
let d1 = u64 2 *! f1 in
let d2 = u64 38 *! f2 in
let d3 = u64 19 *! f3 in
let d419 = u64 19 *! f4 in
let d4 = u64 2 *! d419 in
let s0 = mul64_wide_add3 #9 #9 #342 #10 #342 #9 f0 f0 d4 f1 d2 f3 in
let s1 = mul64_wide_add3 #18 #10 #342 #9 #171 #9 d0 f1 d4 f2 d3 f3 in
let s2 = mul64_wide_add3 #18 #9 #10 #10 #342 #9 d0 f2 f1 f1 d4 f3 in
let s3 = mul64_wide_add3 #18 #9 #20 #9 #9 #171 d0 f3 d1 f2 f4 d419 in
let s4 = mul64_wide_add3 #18 #9 #20 #9 #9 #9 d0 f4 d1 f3 f2 f2 in
lemma_fmul_fsqr5 (f0, f1, f2, f3, f4);
(s0, s1, s2, s3, s4)
inline_for_extraction noextract
val fsqr5:
f:felem5{felem_fits5 f (9, 10, 9, 9, 9)}
-> out:felem5{mul_inv_t out /\ feval out == fmul (feval f) (feval f)}
let fsqr5 (f0, f1, f2, f3, f4) =
let (o0, o1, o2, o3, o4) = fsqr_felem5 (f0, f1, f2, f3, f4) in
carry_wide5 (o0, o1, o2, o3, o4)
inline_for_extraction noextract
val fsqr25:
f1:felem5{felem_fits5 f1 (9, 10, 9, 9, 9)}
-> f2:felem5{felem_fits5 f2 (9, 10, 9, 9, 9)}
-> Pure (felem5 & felem5)
(requires True)
(ensures fun (out1, out2) ->
mul_inv_t out1 /\
mul_inv_t out2 /\
feval out1 == fmul (feval f1) (feval f1) /\
feval out2 == fmul (feval f2) (feval f2))
let fsqr25 (f10, f11, f12, f13, f14) (f20, f21, f22, f23, f24) =
let (o10, o11, o12, o13, o14) = fsqr_felem5 (f10, f11, f12, f13, f14) in
let (o20, o21, o22, o23, o24) = fsqr_felem5 (f20, f21, f22, f23, f24) in
let (o10, o11, o12, o13, o14) = carry_wide5 (o10, o11, o12, o13, o14) in
let (o20, o21, o22, o23, o24) = carry_wide5 (o20, o21, o22, o23, o24) in
((o10, o11, o12, o13, o14), (o20, o21, o22, o23, o24))
#set-options "--z3rlimit 100 --max_fuel 2"
inline_for_extraction noextract
val carry_felem5_full:
inp:felem5{mul_inv_t inp}
-> out:felem5{feval out == feval inp /\ felem_fits5 out (1, 1, 1, 1, 1)} | false | false | Hacl.Spec.Curve25519.Field51.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 2,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val carry_felem5_full:
inp:felem5{mul_inv_t inp}
-> out:felem5{feval out == feval inp /\ felem_fits5 out (1, 1, 1, 1, 1)} | [] | Hacl.Spec.Curve25519.Field51.carry_felem5_full | {
"file_name": "code/curve25519/Hacl.Spec.Curve25519.Field51.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | inp: Hacl.Spec.Curve25519.Field51.Definition.felem5{Hacl.Spec.Curve25519.Field51.mul_inv_t inp}
-> out:
Hacl.Spec.Curve25519.Field51.Definition.felem5
{ Hacl.Spec.Curve25519.Field51.Definition.feval out ==
Hacl.Spec.Curve25519.Field51.Definition.feval inp /\
Hacl.Spec.Curve25519.Field51.Definition.felem_fits5 out (1, 1, 1, 1, 1) } | {
"end_col": 34,
"end_line": 455,
"start_col": 2,
"start_line": 443
} |
Prims.Tot | val ecdh: ecdh_st M51 True | [
{
"abbrev": true,
"full_module": "Hacl.Impl.Curve25519.Field51",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Hacl.Meta.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Impl.Curve25519.Fields",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Impl.Curve25519.Generic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let ecdh = generic_ecdh_higher #M51 True scalarmult | val ecdh: ecdh_st M51 True
let ecdh = | false | null | false | generic_ecdh_higher #M51 True scalarmult | {
"checked_file": "Hacl.Curve25519_51.fst.checked",
"dependencies": [
"Spec.Curve25519.fst.checked",
"prims.fst.checked",
"Lib.Buffer.fsti.checked",
"Hacl.Meta.Curve25519.fst.checked",
"Hacl.Impl.Curve25519.Field51.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Hacl.Curve25519_51.fst"
} | [
"total"
] | [
"Hacl.Meta.Curve25519.generic_ecdh_higher",
"Hacl.Impl.Curve25519.Fields.Core.M51",
"Prims.l_True",
"Hacl.Curve25519_51.scalarmult"
] | [] | module Hacl.Curve25519_51
friend Hacl.Meta.Curve25519
open Hacl.Meta.Curve25519
// The Hacl core.
module C = Hacl.Impl.Curve25519.Field51
let g25519: g25519_t =
Lib.Buffer.createL_global Spec.Curve25519.basepoint_list
#set-options "--max_fuel 0 --max_ifuel 0 --z3rlimit 100"
let point_add_and_double =
addanddouble_point_add_and_double_higher #M51 True C.fmul C.fsqr2 C.fmul1 C.fmul2 C.fsub C.fadd
let point_double =
addanddouble_point_double_higher #M51 True C.fmul2 C.fmul1 C.fsqr2 C.fsub C.fadd
let montgomery_ladder =
generic_montgomery_ladder_higher #M51 True point_double C.cswap2 point_add_and_double
let fsquare_times = finv_fsquare_times_higher #M51 True C.fsqr
let finv = finv_finv_higher #M51 True C.fmul fsquare_times
let encode_point = generic_encode_point_higher #M51 True C.store_felem C.fmul finv
let scalarmult = generic_scalarmult_higher #M51 True encode_point montgomery_ladder decode_point | false | true | Hacl.Curve25519_51.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val ecdh: ecdh_st M51 True | [] | Hacl.Curve25519_51.ecdh | {
"file_name": "code/curve25519/Hacl.Curve25519_51.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Impl.Curve25519.Generic.ecdh_st Hacl.Impl.Curve25519.Fields.Core.M51 Prims.l_True | {
"end_col": 51,
"end_line": 24,
"start_col": 11,
"start_line": 24
} |
Prims.Tot | [
{
"abbrev": true,
"full_module": "Hacl.Impl.Curve25519.Field51",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Hacl.Meta.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Impl.Curve25519.Fields",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Impl.Curve25519.Generic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let montgomery_ladder =
generic_montgomery_ladder_higher #M51 True point_double C.cswap2 point_add_and_double | let montgomery_ladder = | false | null | false | generic_montgomery_ladder_higher #M51 True point_double C.cswap2 point_add_and_double | {
"checked_file": "Hacl.Curve25519_51.fst.checked",
"dependencies": [
"Spec.Curve25519.fst.checked",
"prims.fst.checked",
"Lib.Buffer.fsti.checked",
"Hacl.Meta.Curve25519.fst.checked",
"Hacl.Impl.Curve25519.Field51.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Hacl.Curve25519_51.fst"
} | [
"total"
] | [
"Hacl.Meta.Curve25519.generic_montgomery_ladder_higher",
"Hacl.Impl.Curve25519.Fields.Core.M51",
"Prims.l_True",
"Hacl.Curve25519_51.point_double",
"Hacl.Impl.Curve25519.Field51.cswap2",
"Hacl.Curve25519_51.point_add_and_double"
] | [] | module Hacl.Curve25519_51
friend Hacl.Meta.Curve25519
open Hacl.Meta.Curve25519
// The Hacl core.
module C = Hacl.Impl.Curve25519.Field51
let g25519: g25519_t =
Lib.Buffer.createL_global Spec.Curve25519.basepoint_list
#set-options "--max_fuel 0 --max_ifuel 0 --z3rlimit 100"
let point_add_and_double =
addanddouble_point_add_and_double_higher #M51 True C.fmul C.fsqr2 C.fmul1 C.fmul2 C.fsub C.fadd
let point_double =
addanddouble_point_double_higher #M51 True C.fmul2 C.fmul1 C.fsqr2 C.fsub C.fadd | false | true | Hacl.Curve25519_51.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val montgomery_ladder : Hacl.Meta.Curve25519.generic_montgomery_ladder_higher_t Prims.l_True | [] | Hacl.Curve25519_51.montgomery_ladder | {
"file_name": "code/curve25519/Hacl.Curve25519_51.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Meta.Curve25519.generic_montgomery_ladder_higher_t Prims.l_True | {
"end_col": 87,
"end_line": 18,
"start_col": 2,
"start_line": 18
} |
|
Prims.Tot | [
{
"abbrev": true,
"full_module": "Hacl.Impl.Curve25519.Field51",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Hacl.Meta.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Impl.Curve25519.Fields",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Impl.Curve25519.Generic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let point_add_and_double =
addanddouble_point_add_and_double_higher #M51 True C.fmul C.fsqr2 C.fmul1 C.fmul2 C.fsub C.fadd | let point_add_and_double = | false | null | false | addanddouble_point_add_and_double_higher #M51 True C.fmul C.fsqr2 C.fmul1 C.fmul2 C.fsub C.fadd | {
"checked_file": "Hacl.Curve25519_51.fst.checked",
"dependencies": [
"Spec.Curve25519.fst.checked",
"prims.fst.checked",
"Lib.Buffer.fsti.checked",
"Hacl.Meta.Curve25519.fst.checked",
"Hacl.Impl.Curve25519.Field51.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Hacl.Curve25519_51.fst"
} | [
"total"
] | [
"Hacl.Meta.Curve25519.addanddouble_point_add_and_double_higher",
"Hacl.Impl.Curve25519.Fields.Core.M51",
"Prims.l_True",
"Hacl.Impl.Curve25519.Field51.fmul",
"Hacl.Impl.Curve25519.Field51.fsqr2",
"Hacl.Impl.Curve25519.Field51.fmul1",
"Hacl.Impl.Curve25519.Field51.fmul2",
"Hacl.Impl.Curve25519.Field51.fsub",
"Hacl.Impl.Curve25519.Field51.fadd"
] | [] | module Hacl.Curve25519_51
friend Hacl.Meta.Curve25519
open Hacl.Meta.Curve25519
// The Hacl core.
module C = Hacl.Impl.Curve25519.Field51
let g25519: g25519_t =
Lib.Buffer.createL_global Spec.Curve25519.basepoint_list
#set-options "--max_fuel 0 --max_ifuel 0 --z3rlimit 100" | false | true | Hacl.Curve25519_51.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val point_add_and_double : Hacl.Meta.Curve25519.addanddouble_point_add_and_double_higher_t Prims.l_True | [] | Hacl.Curve25519_51.point_add_and_double | {
"file_name": "code/curve25519/Hacl.Curve25519_51.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Meta.Curve25519.addanddouble_point_add_and_double_higher_t Prims.l_True | {
"end_col": 97,
"end_line": 14,
"start_col": 2,
"start_line": 14
} |
|
Prims.Tot | val secret_to_public: secret_to_public_st M51 True | [
{
"abbrev": true,
"full_module": "Hacl.Impl.Curve25519.Field51",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Hacl.Meta.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Impl.Curve25519.Fields",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Impl.Curve25519.Generic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let secret_to_public = generic_secret_to_public_higher #M51 True scalarmult g25519 | val secret_to_public: secret_to_public_st M51 True
let secret_to_public = | false | null | false | generic_secret_to_public_higher #M51 True scalarmult g25519 | {
"checked_file": "Hacl.Curve25519_51.fst.checked",
"dependencies": [
"Spec.Curve25519.fst.checked",
"prims.fst.checked",
"Lib.Buffer.fsti.checked",
"Hacl.Meta.Curve25519.fst.checked",
"Hacl.Impl.Curve25519.Field51.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Hacl.Curve25519_51.fst"
} | [
"total"
] | [
"Hacl.Meta.Curve25519.generic_secret_to_public_higher",
"Hacl.Impl.Curve25519.Fields.Core.M51",
"Prims.l_True",
"Hacl.Curve25519_51.scalarmult",
"Hacl.Curve25519_51.g25519"
] | [] | module Hacl.Curve25519_51
friend Hacl.Meta.Curve25519
open Hacl.Meta.Curve25519
// The Hacl core.
module C = Hacl.Impl.Curve25519.Field51
let g25519: g25519_t =
Lib.Buffer.createL_global Spec.Curve25519.basepoint_list
#set-options "--max_fuel 0 --max_ifuel 0 --z3rlimit 100"
let point_add_and_double =
addanddouble_point_add_and_double_higher #M51 True C.fmul C.fsqr2 C.fmul1 C.fmul2 C.fsub C.fadd
let point_double =
addanddouble_point_double_higher #M51 True C.fmul2 C.fmul1 C.fsqr2 C.fsub C.fadd
let montgomery_ladder =
generic_montgomery_ladder_higher #M51 True point_double C.cswap2 point_add_and_double
let fsquare_times = finv_fsquare_times_higher #M51 True C.fsqr
let finv = finv_finv_higher #M51 True C.fmul fsquare_times
let encode_point = generic_encode_point_higher #M51 True C.store_felem C.fmul finv | false | true | Hacl.Curve25519_51.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val secret_to_public: secret_to_public_st M51 True | [] | Hacl.Curve25519_51.secret_to_public | {
"file_name": "code/curve25519/Hacl.Curve25519_51.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Impl.Curve25519.Generic.secret_to_public_st Hacl.Impl.Curve25519.Fields.Core.M51 Prims.l_True | {
"end_col": 82,
"end_line": 23,
"start_col": 23,
"start_line": 23
} |
Prims.Tot | [
{
"abbrev": true,
"full_module": "Hacl.Impl.Curve25519.Field51",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Hacl.Meta.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Impl.Curve25519.Fields",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Impl.Curve25519.Generic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let finv = finv_finv_higher #M51 True C.fmul fsquare_times | let finv = | false | null | false | finv_finv_higher #M51 True C.fmul fsquare_times | {
"checked_file": "Hacl.Curve25519_51.fst.checked",
"dependencies": [
"Spec.Curve25519.fst.checked",
"prims.fst.checked",
"Lib.Buffer.fsti.checked",
"Hacl.Meta.Curve25519.fst.checked",
"Hacl.Impl.Curve25519.Field51.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Hacl.Curve25519_51.fst"
} | [
"total"
] | [
"Hacl.Meta.Curve25519.finv_finv_higher",
"Hacl.Impl.Curve25519.Fields.Core.M51",
"Prims.l_True",
"Hacl.Impl.Curve25519.Field51.fmul",
"Hacl.Curve25519_51.fsquare_times"
] | [] | module Hacl.Curve25519_51
friend Hacl.Meta.Curve25519
open Hacl.Meta.Curve25519
// The Hacl core.
module C = Hacl.Impl.Curve25519.Field51
let g25519: g25519_t =
Lib.Buffer.createL_global Spec.Curve25519.basepoint_list
#set-options "--max_fuel 0 --max_ifuel 0 --z3rlimit 100"
let point_add_and_double =
addanddouble_point_add_and_double_higher #M51 True C.fmul C.fsqr2 C.fmul1 C.fmul2 C.fsub C.fadd
let point_double =
addanddouble_point_double_higher #M51 True C.fmul2 C.fmul1 C.fsqr2 C.fsub C.fadd
let montgomery_ladder =
generic_montgomery_ladder_higher #M51 True point_double C.cswap2 point_add_and_double | false | true | Hacl.Curve25519_51.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val finv : Hacl.Meta.Curve25519.finv_finv_higher_t Prims.l_True | [] | Hacl.Curve25519_51.finv | {
"file_name": "code/curve25519/Hacl.Curve25519_51.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Meta.Curve25519.finv_finv_higher_t Prims.l_True | {
"end_col": 58,
"end_line": 20,
"start_col": 11,
"start_line": 20
} |
|
Prims.Tot | [
{
"abbrev": true,
"full_module": "Hacl.Impl.Curve25519.Field51",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Hacl.Meta.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Impl.Curve25519.Fields",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Impl.Curve25519.Generic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let point_double =
addanddouble_point_double_higher #M51 True C.fmul2 C.fmul1 C.fsqr2 C.fsub C.fadd | let point_double = | false | null | false | addanddouble_point_double_higher #M51 True C.fmul2 C.fmul1 C.fsqr2 C.fsub C.fadd | {
"checked_file": "Hacl.Curve25519_51.fst.checked",
"dependencies": [
"Spec.Curve25519.fst.checked",
"prims.fst.checked",
"Lib.Buffer.fsti.checked",
"Hacl.Meta.Curve25519.fst.checked",
"Hacl.Impl.Curve25519.Field51.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Hacl.Curve25519_51.fst"
} | [
"total"
] | [
"Hacl.Meta.Curve25519.addanddouble_point_double_higher",
"Hacl.Impl.Curve25519.Fields.Core.M51",
"Prims.l_True",
"Hacl.Impl.Curve25519.Field51.fmul2",
"Hacl.Impl.Curve25519.Field51.fmul1",
"Hacl.Impl.Curve25519.Field51.fsqr2",
"Hacl.Impl.Curve25519.Field51.fsub",
"Hacl.Impl.Curve25519.Field51.fadd"
] | [] | module Hacl.Curve25519_51
friend Hacl.Meta.Curve25519
open Hacl.Meta.Curve25519
// The Hacl core.
module C = Hacl.Impl.Curve25519.Field51
let g25519: g25519_t =
Lib.Buffer.createL_global Spec.Curve25519.basepoint_list
#set-options "--max_fuel 0 --max_ifuel 0 --z3rlimit 100"
let point_add_and_double =
addanddouble_point_add_and_double_higher #M51 True C.fmul C.fsqr2 C.fmul1 C.fmul2 C.fsub C.fadd | false | true | Hacl.Curve25519_51.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val point_double : Hacl.Meta.Curve25519.addanddouble_point_double_higher_t Prims.l_True | [] | Hacl.Curve25519_51.point_double | {
"file_name": "code/curve25519/Hacl.Curve25519_51.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Meta.Curve25519.addanddouble_point_double_higher_t Prims.l_True | {
"end_col": 82,
"end_line": 16,
"start_col": 2,
"start_line": 16
} |
|
Prims.Tot | val scalarmult: scalarmult_st M51 True | [
{
"abbrev": true,
"full_module": "Hacl.Impl.Curve25519.Field51",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Hacl.Meta.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Impl.Curve25519.Fields",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Impl.Curve25519.Generic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let scalarmult = generic_scalarmult_higher #M51 True encode_point montgomery_ladder decode_point | val scalarmult: scalarmult_st M51 True
let scalarmult = | false | null | false | generic_scalarmult_higher #M51 True encode_point montgomery_ladder decode_point | {
"checked_file": "Hacl.Curve25519_51.fst.checked",
"dependencies": [
"Spec.Curve25519.fst.checked",
"prims.fst.checked",
"Lib.Buffer.fsti.checked",
"Hacl.Meta.Curve25519.fst.checked",
"Hacl.Impl.Curve25519.Field51.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Hacl.Curve25519_51.fst"
} | [
"total"
] | [
"Hacl.Meta.Curve25519.generic_scalarmult_higher",
"Hacl.Impl.Curve25519.Fields.Core.M51",
"Prims.l_True",
"Hacl.Curve25519_51.encode_point",
"Hacl.Curve25519_51.montgomery_ladder",
"Hacl.Impl.Curve25519.Generic.decode_point"
] | [] | module Hacl.Curve25519_51
friend Hacl.Meta.Curve25519
open Hacl.Meta.Curve25519
// The Hacl core.
module C = Hacl.Impl.Curve25519.Field51
let g25519: g25519_t =
Lib.Buffer.createL_global Spec.Curve25519.basepoint_list
#set-options "--max_fuel 0 --max_ifuel 0 --z3rlimit 100"
let point_add_and_double =
addanddouble_point_add_and_double_higher #M51 True C.fmul C.fsqr2 C.fmul1 C.fmul2 C.fsub C.fadd
let point_double =
addanddouble_point_double_higher #M51 True C.fmul2 C.fmul1 C.fsqr2 C.fsub C.fadd
let montgomery_ladder =
generic_montgomery_ladder_higher #M51 True point_double C.cswap2 point_add_and_double
let fsquare_times = finv_fsquare_times_higher #M51 True C.fsqr
let finv = finv_finv_higher #M51 True C.fmul fsquare_times | false | true | Hacl.Curve25519_51.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val scalarmult: scalarmult_st M51 True | [] | Hacl.Curve25519_51.scalarmult | {
"file_name": "code/curve25519/Hacl.Curve25519_51.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Impl.Curve25519.Generic.scalarmult_st Hacl.Impl.Curve25519.Fields.Core.M51 Prims.l_True | {
"end_col": 96,
"end_line": 22,
"start_col": 17,
"start_line": 22
} |
Prims.Tot | [
{
"abbrev": true,
"full_module": "Hacl.Impl.Curve25519.Field51",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Hacl.Meta.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Impl.Curve25519.Fields",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Impl.Curve25519.Generic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let fsquare_times = finv_fsquare_times_higher #M51 True C.fsqr | let fsquare_times = | false | null | false | finv_fsquare_times_higher #M51 True C.fsqr | {
"checked_file": "Hacl.Curve25519_51.fst.checked",
"dependencies": [
"Spec.Curve25519.fst.checked",
"prims.fst.checked",
"Lib.Buffer.fsti.checked",
"Hacl.Meta.Curve25519.fst.checked",
"Hacl.Impl.Curve25519.Field51.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Hacl.Curve25519_51.fst"
} | [
"total"
] | [
"Hacl.Meta.Curve25519.finv_fsquare_times_higher",
"Hacl.Impl.Curve25519.Fields.Core.M51",
"Prims.l_True",
"Hacl.Impl.Curve25519.Field51.fsqr"
] | [] | module Hacl.Curve25519_51
friend Hacl.Meta.Curve25519
open Hacl.Meta.Curve25519
// The Hacl core.
module C = Hacl.Impl.Curve25519.Field51
let g25519: g25519_t =
Lib.Buffer.createL_global Spec.Curve25519.basepoint_list
#set-options "--max_fuel 0 --max_ifuel 0 --z3rlimit 100"
let point_add_and_double =
addanddouble_point_add_and_double_higher #M51 True C.fmul C.fsqr2 C.fmul1 C.fmul2 C.fsub C.fadd
let point_double =
addanddouble_point_double_higher #M51 True C.fmul2 C.fmul1 C.fsqr2 C.fsub C.fadd
let montgomery_ladder = | false | true | Hacl.Curve25519_51.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val fsquare_times : Hacl.Meta.Curve25519.finv_fsquare_times_higher_t Prims.l_True | [] | Hacl.Curve25519_51.fsquare_times | {
"file_name": "code/curve25519/Hacl.Curve25519_51.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Meta.Curve25519.finv_fsquare_times_higher_t Prims.l_True | {
"end_col": 62,
"end_line": 19,
"start_col": 20,
"start_line": 19
} |
|
Prims.Tot | [
{
"abbrev": true,
"full_module": "Hacl.Impl.Curve25519.Field51",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Hacl.Meta.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Impl.Curve25519.Fields",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Impl.Curve25519.Generic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let encode_point = generic_encode_point_higher #M51 True C.store_felem C.fmul finv | let encode_point = | false | null | false | generic_encode_point_higher #M51 True C.store_felem C.fmul finv | {
"checked_file": "Hacl.Curve25519_51.fst.checked",
"dependencies": [
"Spec.Curve25519.fst.checked",
"prims.fst.checked",
"Lib.Buffer.fsti.checked",
"Hacl.Meta.Curve25519.fst.checked",
"Hacl.Impl.Curve25519.Field51.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Hacl.Curve25519_51.fst"
} | [
"total"
] | [
"Hacl.Meta.Curve25519.generic_encode_point_higher",
"Hacl.Impl.Curve25519.Fields.Core.M51",
"Prims.l_True",
"Hacl.Impl.Curve25519.Field51.store_felem",
"Hacl.Impl.Curve25519.Field51.fmul",
"Hacl.Curve25519_51.finv"
] | [] | module Hacl.Curve25519_51
friend Hacl.Meta.Curve25519
open Hacl.Meta.Curve25519
// The Hacl core.
module C = Hacl.Impl.Curve25519.Field51
let g25519: g25519_t =
Lib.Buffer.createL_global Spec.Curve25519.basepoint_list
#set-options "--max_fuel 0 --max_ifuel 0 --z3rlimit 100"
let point_add_and_double =
addanddouble_point_add_and_double_higher #M51 True C.fmul C.fsqr2 C.fmul1 C.fmul2 C.fsub C.fadd
let point_double =
addanddouble_point_double_higher #M51 True C.fmul2 C.fmul1 C.fsqr2 C.fsub C.fadd
let montgomery_ladder =
generic_montgomery_ladder_higher #M51 True point_double C.cswap2 point_add_and_double
let fsquare_times = finv_fsquare_times_higher #M51 True C.fsqr | false | true | Hacl.Curve25519_51.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val encode_point : Hacl.Meta.Curve25519.generic_encode_point_higher_t Prims.l_True | [] | Hacl.Curve25519_51.encode_point | {
"file_name": "code/curve25519/Hacl.Curve25519_51.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Meta.Curve25519.generic_encode_point_higher_t Prims.l_True | {
"end_col": 82,
"end_line": 21,
"start_col": 19,
"start_line": 21
} |
|
Prims.Tot | val g25519:g25519_t | [
{
"abbrev": true,
"full_module": "Hacl.Impl.Curve25519.Field51",
"short_module": "C"
},
{
"abbrev": false,
"full_module": "Hacl.Meta.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Impl.Curve25519.Fields",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Impl.Curve25519.Generic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let g25519: g25519_t =
Lib.Buffer.createL_global Spec.Curve25519.basepoint_list | val g25519:g25519_t
let g25519:g25519_t = | false | null | false | Lib.Buffer.createL_global Spec.Curve25519.basepoint_list | {
"checked_file": "Hacl.Curve25519_51.fst.checked",
"dependencies": [
"Spec.Curve25519.fst.checked",
"prims.fst.checked",
"Lib.Buffer.fsti.checked",
"Hacl.Meta.Curve25519.fst.checked",
"Hacl.Impl.Curve25519.Field51.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Hacl.Curve25519_51.fst"
} | [
"total"
] | [
"Lib.Buffer.createL_global",
"Lib.IntTypes.int_t",
"Lib.IntTypes.U8",
"Lib.IntTypes.PUB",
"Spec.Curve25519.basepoint_list",
"Lib.Buffer.glbuffer",
"Lib.IntTypes.size",
"FStar.Pervasives.normalize_term",
"Lib.IntTypes.size_nat",
"FStar.List.Tot.Base.length"
] | [] | module Hacl.Curve25519_51
friend Hacl.Meta.Curve25519
open Hacl.Meta.Curve25519
// The Hacl core.
module C = Hacl.Impl.Curve25519.Field51 | false | true | Hacl.Curve25519_51.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val g25519:g25519_t | [] | Hacl.Curve25519_51.g25519 | {
"file_name": "code/curve25519/Hacl.Curve25519_51.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Impl.Curve25519.Generic.g25519_t | {
"end_col": 58,
"end_line": 10,
"start_col": 2,
"start_line": 10
} |
Prims.Tot | val normal (#a: Type) (x: a) : a | [
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "L"
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let normal (#a: Type) (x: a) : a =
FStar.Pervasives.norm [
iota;
zeta;
delta_only [`%L.fold_right_gtot; `%L.map_gtot];
delta_attr [`%__reduce__];
primops;
simplify
]
x | val normal (#a: Type) (x: a) : a
let normal (#a: Type) (x: a) : a = | false | null | false | FStar.Pervasives.norm [
iota;
zeta;
delta_only [`%L.fold_right_gtot; `%L.map_gtot];
delta_attr [`%__reduce__];
primops;
simplify
]
x | {
"checked_file": "FStar.BigOps.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.List.Tot.fst.checked"
],
"interface_file": false,
"source_file": "FStar.BigOps.fsti"
} | [
"total"
] | [
"FStar.Pervasives.norm",
"Prims.Cons",
"FStar.Pervasives.norm_step",
"FStar.Pervasives.iota",
"FStar.Pervasives.zeta",
"FStar.Pervasives.delta_only",
"Prims.string",
"Prims.Nil",
"FStar.Pervasives.delta_attr",
"FStar.Pervasives.primops",
"FStar.Pervasives.simplify"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.BigOps
/// This library provides propositional connectives over finite sets
/// expressed as lists, aka "big operators", in analogy with LaTeX
/// usage for \bigand, \bigor, etc.
///
/// The library is designed with a dual usage in mind:
///
/// 1. Normalization: When applied to a list literal, we want
/// {[big_and f [a;b;c]]} to implicilty reduce to [f a /\ f b /\ f c]
///
/// 2. Symbolic manipulation: We provide lemmas of the form
///
/// [big_and f l <==> forall x. L.memP x l ==> f x]
///
/// In this latter form, partially computing [big_and] as a fold over
/// a list is cumbersome for proof. So, we provide variants [big_and']
/// etc., that do not reduce implicitly.
module L = FStar.List.Tot
(** We control reduction using the [delta_attr] feature of the
normalizer. See FStar.Pervasives for how that works. Every term
that is to be reduced is with the [__reduce__] attribute *)
let __reduce__ = ()
(** We wrap [norm] with a module-specific custom usage, triggering
specific reduction steps *)
[@@ __reduce__]
unfold | false | false | FStar.BigOps.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val normal (#a: Type) (x: a) : a | [] | FStar.BigOps.normal | {
"file_name": "ulib/FStar.BigOps.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | x: a -> a | {
"end_col": 5,
"end_line": 57,
"start_col": 2,
"start_line": 49
} |
Prims.Tot | [
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "L"
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let symmetric (#a: Type) (f: (a -> a -> Type)) = forall x y. f x y <==> f y x | let symmetric (#a: Type) (f: (a -> a -> Type)) = | false | null | false | forall x y. f x y <==> f y x | {
"checked_file": "FStar.BigOps.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.List.Tot.fst.checked"
],
"interface_file": false,
"source_file": "FStar.BigOps.fsti"
} | [
"total"
] | [
"Prims.l_Forall",
"Prims.l_iff",
"Prims.logical"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.BigOps
/// This library provides propositional connectives over finite sets
/// expressed as lists, aka "big operators", in analogy with LaTeX
/// usage for \bigand, \bigor, etc.
///
/// The library is designed with a dual usage in mind:
///
/// 1. Normalization: When applied to a list literal, we want
/// {[big_and f [a;b;c]]} to implicilty reduce to [f a /\ f b /\ f c]
///
/// 2. Symbolic manipulation: We provide lemmas of the form
///
/// [big_and f l <==> forall x. L.memP x l ==> f x]
///
/// In this latter form, partially computing [big_and] as a fold over
/// a list is cumbersome for proof. So, we provide variants [big_and']
/// etc., that do not reduce implicitly.
module L = FStar.List.Tot
(** We control reduction using the [delta_attr] feature of the
normalizer. See FStar.Pervasives for how that works. Every term
that is to be reduced is with the [__reduce__] attribute *)
let __reduce__ = ()
(** We wrap [norm] with a module-specific custom usage, triggering
specific reduction steps *)
[@@ __reduce__]
unfold
let normal (#a: Type) (x: a) : a =
FStar.Pervasives.norm [
iota;
zeta;
delta_only [`%L.fold_right_gtot; `%L.map_gtot];
delta_attr [`%__reduce__];
primops;
simplify
]
x
(** A useful lemma to relate terms to their implicilty reducing variants *)
val normal_eq (#a: Type) (f: a) : Lemma (f == normal f)
(**** Map and fold *)
(** A utility that combines map and fold: [map_op' op f l z] maps each
element of [l] by [f] and then combines them using [op] *)
[@@ __reduce__]
let map_op' #a #b #c (op: (b -> c -> GTot c)) (f: (a -> GTot b)) (l: list a) (z: c) : GTot c =
L.fold_right_gtot #a #c l (fun x acc -> (f x) `op` acc) z
(** Equations for [map_op'] showing how it folds over the empty list *)
val map_op'_nil (#a #b #c: Type) (op: (b -> c -> GTot c)) (f: (a -> GTot b)) (z: c)
: Lemma (map_op' op f [] z == z)
(** Equations for [map_op'] showing how it folds over a cons cell *)
val map_op'_cons
(#a #b #c: Type)
(op: (b -> c -> GTot c))
(f: (a -> GTot b))
(hd: a)
(tl: list a)
(z: c)
: Lemma (map_op' op f (hd :: tl) z == (f hd) `op` (map_op' op f tl z))
(**** Conjunction *)
(** [big_and' f l] = [/\_{x in l} f x] *)
[@@ __reduce__]
let big_and' #a (f: (a -> Type)) (l: list a) : Type = map_op' l_and f l True
(** Equations for [big_and'] showing it to be trivial over the empty list *)
val big_and'_nil (#a: Type) (f: (a -> Type)) : Lemma (big_and' f [] == True)
(** Equations for [big_and'] showing it to be a fold over a list with [/\] *)
val big_and'_cons (#a: Type) (f: (a -> Type)) (hd: a) (tl: list a)
: Lemma (big_and' f (hd :: tl) == (f hd /\ big_and' f tl))
(** [big_and' f l] is a [prop], i.e., it is proof irrelevant.
Note: defining `big_and'` to intrinsically be in `prop`
is also possible, but it's much more tedious in proofs.
This is in part because the [/\] is not defined in prop,
though one can prove that [a /\ b] is a prop.
The discrepancy means that I preferred to prove these
operators in [prop] extrinsically.
*)
val big_and'_prop (#a: Type) (f: (a -> Type)) (l: list a) : Lemma ((big_and' f l) `subtype_of` unit)
(** Interpreting the finite conjunction [big_and f l]
as an infinite conjunction [forall] *)
val big_and'_forall (#a: Type) (f: (a -> Type)) (l: list a)
: Lemma (big_and' f l <==> (forall x. L.memP x l ==> f x))
(** [big_and f l] is an implicitly reducing variant of [big_and']
It is defined in [prop] *)
[@@ __reduce__]
unfold
let big_and #a (f: (a -> Type)) (l: list a) : prop =
big_and'_prop f l;
normal (big_and' f l)
(**** Disjunction *)
(** [big_or f l] = [\/_{x in l} f x] *)
[@@ __reduce__]
let big_or' #a (f: (a -> Type)) (l: list a) : Type = map_op' l_or f l False
(** Equations for [big_or] showing it to be [False] on the empty list *)
val big_or'_nil (#a: Type) (f: (a -> Type)) : Lemma (big_or' f [] == False)
(** Equations for [big_or] showing it to fold over a list *)
val big_or'_cons (#a: Type) (f: (a -> Type)) (hd: a) (tl: list a)
: Lemma (big_or' f (hd :: tl) == (f hd \/ big_or' f tl))
(** [big_or f l] is a `prop`
See the remark above on the style of proof for prop *)
val big_or'_prop (#a: Type) (f: (a -> Type)) (l: list a) : Lemma ((big_or' f l) `subtype_of` unit)
(** Interpreting the finite disjunction [big_or f l]
as an infinite disjunction [exists] *)
val big_or'_exists (#a: Type) (f: (a -> Type)) (l: list a)
: Lemma (big_or' f l <==> (exists x. L.memP x l /\ f x))
(** [big_or f l] is an implicitly reducing variant of [big_or']
It is defined in [prop] *)
[@@ __reduce__]
unfold
let big_or #a (f: (a -> Type)) (l: list a) : prop =
big_or'_prop f l;
normal (big_or' f l)
(**** Pairwise operators *)
/// We provide functions to apply a reflexive, symmetric binary
/// operator to elements in a list [l] pairwise, in a triangle of
/// elements in the square matrix of [l X l]. To illustrate, for a
/// list of [n] elements, we fold the operator over the pairwise
/// elements of the list in top-down, left-to-right order of the
/// diagram below
///
///
/// {[
/// 0 1 2 3 ... n
/// 0
/// 1 x
/// 2 x x
/// 3 x x x
/// . x x x x
/// n x x x x ]}
(** Mapping pairs of elements of [l] using [f] and combining them with
[op]. *)
[@@ __reduce__]
let rec pairwise_op' #a #b (op: (b -> b -> GTot b)) (f: (a -> a -> b)) (l: list a) (z: b) : GTot b =
match l with
| [] -> z
| hd :: tl -> (map_op' op (f hd) tl z) `op` (pairwise_op' op f tl z) | false | false | FStar.BigOps.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val symmetric : f: (_: a -> _: a -> Type0) -> Prims.logical | [] | FStar.BigOps.symmetric | {
"file_name": "ulib/FStar.BigOps.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | f: (_: a -> _: a -> Type0) -> Prims.logical | {
"end_col": 77,
"end_line": 183,
"start_col": 49,
"start_line": 183
} |
|
Prims.GTot | val map_op' (#a #b #c: _) (op: (b -> c -> GTot c)) (f: (a -> GTot b)) (l: list a) (z: c) : GTot c | [
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "L"
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let map_op' #a #b #c (op: (b -> c -> GTot c)) (f: (a -> GTot b)) (l: list a) (z: c) : GTot c =
L.fold_right_gtot #a #c l (fun x acc -> (f x) `op` acc) z | val map_op' (#a #b #c: _) (op: (b -> c -> GTot c)) (f: (a -> GTot b)) (l: list a) (z: c) : GTot c
let map_op' #a #b #c (op: (b -> c -> GTot c)) (f: (a -> GTot b)) (l: list a) (z: c) : GTot c = | false | null | false | L.fold_right_gtot #a #c l (fun x acc -> (f x) `op` acc) z | {
"checked_file": "FStar.BigOps.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.List.Tot.fst.checked"
],
"interface_file": false,
"source_file": "FStar.BigOps.fsti"
} | [
"sometrivial"
] | [
"Prims.list",
"FStar.List.Tot.Base.fold_right_gtot"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.BigOps
/// This library provides propositional connectives over finite sets
/// expressed as lists, aka "big operators", in analogy with LaTeX
/// usage for \bigand, \bigor, etc.
///
/// The library is designed with a dual usage in mind:
///
/// 1. Normalization: When applied to a list literal, we want
/// {[big_and f [a;b;c]]} to implicilty reduce to [f a /\ f b /\ f c]
///
/// 2. Symbolic manipulation: We provide lemmas of the form
///
/// [big_and f l <==> forall x. L.memP x l ==> f x]
///
/// In this latter form, partially computing [big_and] as a fold over
/// a list is cumbersome for proof. So, we provide variants [big_and']
/// etc., that do not reduce implicitly.
module L = FStar.List.Tot
(** We control reduction using the [delta_attr] feature of the
normalizer. See FStar.Pervasives for how that works. Every term
that is to be reduced is with the [__reduce__] attribute *)
let __reduce__ = ()
(** We wrap [norm] with a module-specific custom usage, triggering
specific reduction steps *)
[@@ __reduce__]
unfold
let normal (#a: Type) (x: a) : a =
FStar.Pervasives.norm [
iota;
zeta;
delta_only [`%L.fold_right_gtot; `%L.map_gtot];
delta_attr [`%__reduce__];
primops;
simplify
]
x
(** A useful lemma to relate terms to their implicilty reducing variants *)
val normal_eq (#a: Type) (f: a) : Lemma (f == normal f)
(**** Map and fold *)
(** A utility that combines map and fold: [map_op' op f l z] maps each
element of [l] by [f] and then combines them using [op] *)
[@@ __reduce__] | false | false | FStar.BigOps.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val map_op' (#a #b #c: _) (op: (b -> c -> GTot c)) (f: (a -> GTot b)) (l: list a) (z: c) : GTot c | [] | FStar.BigOps.map_op' | {
"file_name": "ulib/FStar.BigOps.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | op: (_: b -> _: c -> Prims.GTot c) -> f: (_: a -> Prims.GTot b) -> l: Prims.list a -> z: c
-> Prims.GTot c | {
"end_col": 59,
"end_line": 68,
"start_col": 2,
"start_line": 68
} |
Prims.Tot | [
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "L"
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let reflexive (#a: Type) (f: (a -> a -> Type)) = forall x. f x x | let reflexive (#a: Type) (f: (a -> a -> Type)) = | false | null | false | forall x. f x x | {
"checked_file": "FStar.BigOps.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.List.Tot.fst.checked"
],
"interface_file": false,
"source_file": "FStar.BigOps.fsti"
} | [
"total"
] | [
"Prims.l_Forall",
"Prims.logical"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.BigOps
/// This library provides propositional connectives over finite sets
/// expressed as lists, aka "big operators", in analogy with LaTeX
/// usage for \bigand, \bigor, etc.
///
/// The library is designed with a dual usage in mind:
///
/// 1. Normalization: When applied to a list literal, we want
/// {[big_and f [a;b;c]]} to implicilty reduce to [f a /\ f b /\ f c]
///
/// 2. Symbolic manipulation: We provide lemmas of the form
///
/// [big_and f l <==> forall x. L.memP x l ==> f x]
///
/// In this latter form, partially computing [big_and] as a fold over
/// a list is cumbersome for proof. So, we provide variants [big_and']
/// etc., that do not reduce implicitly.
module L = FStar.List.Tot
(** We control reduction using the [delta_attr] feature of the
normalizer. See FStar.Pervasives for how that works. Every term
that is to be reduced is with the [__reduce__] attribute *)
let __reduce__ = ()
(** We wrap [norm] with a module-specific custom usage, triggering
specific reduction steps *)
[@@ __reduce__]
unfold
let normal (#a: Type) (x: a) : a =
FStar.Pervasives.norm [
iota;
zeta;
delta_only [`%L.fold_right_gtot; `%L.map_gtot];
delta_attr [`%__reduce__];
primops;
simplify
]
x
(** A useful lemma to relate terms to their implicilty reducing variants *)
val normal_eq (#a: Type) (f: a) : Lemma (f == normal f)
(**** Map and fold *)
(** A utility that combines map and fold: [map_op' op f l z] maps each
element of [l] by [f] and then combines them using [op] *)
[@@ __reduce__]
let map_op' #a #b #c (op: (b -> c -> GTot c)) (f: (a -> GTot b)) (l: list a) (z: c) : GTot c =
L.fold_right_gtot #a #c l (fun x acc -> (f x) `op` acc) z
(** Equations for [map_op'] showing how it folds over the empty list *)
val map_op'_nil (#a #b #c: Type) (op: (b -> c -> GTot c)) (f: (a -> GTot b)) (z: c)
: Lemma (map_op' op f [] z == z)
(** Equations for [map_op'] showing how it folds over a cons cell *)
val map_op'_cons
(#a #b #c: Type)
(op: (b -> c -> GTot c))
(f: (a -> GTot b))
(hd: a)
(tl: list a)
(z: c)
: Lemma (map_op' op f (hd :: tl) z == (f hd) `op` (map_op' op f tl z))
(**** Conjunction *)
(** [big_and' f l] = [/\_{x in l} f x] *)
[@@ __reduce__]
let big_and' #a (f: (a -> Type)) (l: list a) : Type = map_op' l_and f l True
(** Equations for [big_and'] showing it to be trivial over the empty list *)
val big_and'_nil (#a: Type) (f: (a -> Type)) : Lemma (big_and' f [] == True)
(** Equations for [big_and'] showing it to be a fold over a list with [/\] *)
val big_and'_cons (#a: Type) (f: (a -> Type)) (hd: a) (tl: list a)
: Lemma (big_and' f (hd :: tl) == (f hd /\ big_and' f tl))
(** [big_and' f l] is a [prop], i.e., it is proof irrelevant.
Note: defining `big_and'` to intrinsically be in `prop`
is also possible, but it's much more tedious in proofs.
This is in part because the [/\] is not defined in prop,
though one can prove that [a /\ b] is a prop.
The discrepancy means that I preferred to prove these
operators in [prop] extrinsically.
*)
val big_and'_prop (#a: Type) (f: (a -> Type)) (l: list a) : Lemma ((big_and' f l) `subtype_of` unit)
(** Interpreting the finite conjunction [big_and f l]
as an infinite conjunction [forall] *)
val big_and'_forall (#a: Type) (f: (a -> Type)) (l: list a)
: Lemma (big_and' f l <==> (forall x. L.memP x l ==> f x))
(** [big_and f l] is an implicitly reducing variant of [big_and']
It is defined in [prop] *)
[@@ __reduce__]
unfold
let big_and #a (f: (a -> Type)) (l: list a) : prop =
big_and'_prop f l;
normal (big_and' f l)
(**** Disjunction *)
(** [big_or f l] = [\/_{x in l} f x] *)
[@@ __reduce__]
let big_or' #a (f: (a -> Type)) (l: list a) : Type = map_op' l_or f l False
(** Equations for [big_or] showing it to be [False] on the empty list *)
val big_or'_nil (#a: Type) (f: (a -> Type)) : Lemma (big_or' f [] == False)
(** Equations for [big_or] showing it to fold over a list *)
val big_or'_cons (#a: Type) (f: (a -> Type)) (hd: a) (tl: list a)
: Lemma (big_or' f (hd :: tl) == (f hd \/ big_or' f tl))
(** [big_or f l] is a `prop`
See the remark above on the style of proof for prop *)
val big_or'_prop (#a: Type) (f: (a -> Type)) (l: list a) : Lemma ((big_or' f l) `subtype_of` unit)
(** Interpreting the finite disjunction [big_or f l]
as an infinite disjunction [exists] *)
val big_or'_exists (#a: Type) (f: (a -> Type)) (l: list a)
: Lemma (big_or' f l <==> (exists x. L.memP x l /\ f x))
(** [big_or f l] is an implicitly reducing variant of [big_or']
It is defined in [prop] *)
[@@ __reduce__]
unfold
let big_or #a (f: (a -> Type)) (l: list a) : prop =
big_or'_prop f l;
normal (big_or' f l)
(**** Pairwise operators *)
/// We provide functions to apply a reflexive, symmetric binary
/// operator to elements in a list [l] pairwise, in a triangle of
/// elements in the square matrix of [l X l]. To illustrate, for a
/// list of [n] elements, we fold the operator over the pairwise
/// elements of the list in top-down, left-to-right order of the
/// diagram below
///
///
/// {[
/// 0 1 2 3 ... n
/// 0
/// 1 x
/// 2 x x
/// 3 x x x
/// . x x x x
/// n x x x x ]}
(** Mapping pairs of elements of [l] using [f] and combining them with
[op]. *)
[@@ __reduce__]
let rec pairwise_op' #a #b (op: (b -> b -> GTot b)) (f: (a -> a -> b)) (l: list a) (z: b) : GTot b =
match l with
| [] -> z
| hd :: tl -> (map_op' op (f hd) tl z) `op` (pairwise_op' op f tl z)
(** [f] is a symmetric relation *)
let symmetric (#a: Type) (f: (a -> a -> Type)) = forall x y. f x y <==> f y x | false | false | FStar.BigOps.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val reflexive : f: (_: a -> _: a -> Type0) -> Prims.logical | [] | FStar.BigOps.reflexive | {
"file_name": "ulib/FStar.BigOps.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | f: (_: a -> _: a -> Type0) -> Prims.logical | {
"end_col": 64,
"end_line": 186,
"start_col": 49,
"start_line": 186
} |
|
Prims.Tot | [
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "L"
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let anti_reflexive (#a: Type) (f: (a -> a -> Type)) = forall x. ~(f x x) | let anti_reflexive (#a: Type) (f: (a -> a -> Type)) = | false | null | false | forall x. ~(f x x) | {
"checked_file": "FStar.BigOps.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.List.Tot.fst.checked"
],
"interface_file": false,
"source_file": "FStar.BigOps.fsti"
} | [
"total"
] | [
"Prims.l_Forall",
"Prims.l_not",
"Prims.logical"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.BigOps
/// This library provides propositional connectives over finite sets
/// expressed as lists, aka "big operators", in analogy with LaTeX
/// usage for \bigand, \bigor, etc.
///
/// The library is designed with a dual usage in mind:
///
/// 1. Normalization: When applied to a list literal, we want
/// {[big_and f [a;b;c]]} to implicilty reduce to [f a /\ f b /\ f c]
///
/// 2. Symbolic manipulation: We provide lemmas of the form
///
/// [big_and f l <==> forall x. L.memP x l ==> f x]
///
/// In this latter form, partially computing [big_and] as a fold over
/// a list is cumbersome for proof. So, we provide variants [big_and']
/// etc., that do not reduce implicitly.
module L = FStar.List.Tot
(** We control reduction using the [delta_attr] feature of the
normalizer. See FStar.Pervasives for how that works. Every term
that is to be reduced is with the [__reduce__] attribute *)
let __reduce__ = ()
(** We wrap [norm] with a module-specific custom usage, triggering
specific reduction steps *)
[@@ __reduce__]
unfold
let normal (#a: Type) (x: a) : a =
FStar.Pervasives.norm [
iota;
zeta;
delta_only [`%L.fold_right_gtot; `%L.map_gtot];
delta_attr [`%__reduce__];
primops;
simplify
]
x
(** A useful lemma to relate terms to their implicilty reducing variants *)
val normal_eq (#a: Type) (f: a) : Lemma (f == normal f)
(**** Map and fold *)
(** A utility that combines map and fold: [map_op' op f l z] maps each
element of [l] by [f] and then combines them using [op] *)
[@@ __reduce__]
let map_op' #a #b #c (op: (b -> c -> GTot c)) (f: (a -> GTot b)) (l: list a) (z: c) : GTot c =
L.fold_right_gtot #a #c l (fun x acc -> (f x) `op` acc) z
(** Equations for [map_op'] showing how it folds over the empty list *)
val map_op'_nil (#a #b #c: Type) (op: (b -> c -> GTot c)) (f: (a -> GTot b)) (z: c)
: Lemma (map_op' op f [] z == z)
(** Equations for [map_op'] showing how it folds over a cons cell *)
val map_op'_cons
(#a #b #c: Type)
(op: (b -> c -> GTot c))
(f: (a -> GTot b))
(hd: a)
(tl: list a)
(z: c)
: Lemma (map_op' op f (hd :: tl) z == (f hd) `op` (map_op' op f tl z))
(**** Conjunction *)
(** [big_and' f l] = [/\_{x in l} f x] *)
[@@ __reduce__]
let big_and' #a (f: (a -> Type)) (l: list a) : Type = map_op' l_and f l True
(** Equations for [big_and'] showing it to be trivial over the empty list *)
val big_and'_nil (#a: Type) (f: (a -> Type)) : Lemma (big_and' f [] == True)
(** Equations for [big_and'] showing it to be a fold over a list with [/\] *)
val big_and'_cons (#a: Type) (f: (a -> Type)) (hd: a) (tl: list a)
: Lemma (big_and' f (hd :: tl) == (f hd /\ big_and' f tl))
(** [big_and' f l] is a [prop], i.e., it is proof irrelevant.
Note: defining `big_and'` to intrinsically be in `prop`
is also possible, but it's much more tedious in proofs.
This is in part because the [/\] is not defined in prop,
though one can prove that [a /\ b] is a prop.
The discrepancy means that I preferred to prove these
operators in [prop] extrinsically.
*)
val big_and'_prop (#a: Type) (f: (a -> Type)) (l: list a) : Lemma ((big_and' f l) `subtype_of` unit)
(** Interpreting the finite conjunction [big_and f l]
as an infinite conjunction [forall] *)
val big_and'_forall (#a: Type) (f: (a -> Type)) (l: list a)
: Lemma (big_and' f l <==> (forall x. L.memP x l ==> f x))
(** [big_and f l] is an implicitly reducing variant of [big_and']
It is defined in [prop] *)
[@@ __reduce__]
unfold
let big_and #a (f: (a -> Type)) (l: list a) : prop =
big_and'_prop f l;
normal (big_and' f l)
(**** Disjunction *)
(** [big_or f l] = [\/_{x in l} f x] *)
[@@ __reduce__]
let big_or' #a (f: (a -> Type)) (l: list a) : Type = map_op' l_or f l False
(** Equations for [big_or] showing it to be [False] on the empty list *)
val big_or'_nil (#a: Type) (f: (a -> Type)) : Lemma (big_or' f [] == False)
(** Equations for [big_or] showing it to fold over a list *)
val big_or'_cons (#a: Type) (f: (a -> Type)) (hd: a) (tl: list a)
: Lemma (big_or' f (hd :: tl) == (f hd \/ big_or' f tl))
(** [big_or f l] is a `prop`
See the remark above on the style of proof for prop *)
val big_or'_prop (#a: Type) (f: (a -> Type)) (l: list a) : Lemma ((big_or' f l) `subtype_of` unit)
(** Interpreting the finite disjunction [big_or f l]
as an infinite disjunction [exists] *)
val big_or'_exists (#a: Type) (f: (a -> Type)) (l: list a)
: Lemma (big_or' f l <==> (exists x. L.memP x l /\ f x))
(** [big_or f l] is an implicitly reducing variant of [big_or']
It is defined in [prop] *)
[@@ __reduce__]
unfold
let big_or #a (f: (a -> Type)) (l: list a) : prop =
big_or'_prop f l;
normal (big_or' f l)
(**** Pairwise operators *)
/// We provide functions to apply a reflexive, symmetric binary
/// operator to elements in a list [l] pairwise, in a triangle of
/// elements in the square matrix of [l X l]. To illustrate, for a
/// list of [n] elements, we fold the operator over the pairwise
/// elements of the list in top-down, left-to-right order of the
/// diagram below
///
///
/// {[
/// 0 1 2 3 ... n
/// 0
/// 1 x
/// 2 x x
/// 3 x x x
/// . x x x x
/// n x x x x ]}
(** Mapping pairs of elements of [l] using [f] and combining them with
[op]. *)
[@@ __reduce__]
let rec pairwise_op' #a #b (op: (b -> b -> GTot b)) (f: (a -> a -> b)) (l: list a) (z: b) : GTot b =
match l with
| [] -> z
| hd :: tl -> (map_op' op (f hd) tl z) `op` (pairwise_op' op f tl z)
(** [f] is a symmetric relation *)
let symmetric (#a: Type) (f: (a -> a -> Type)) = forall x y. f x y <==> f y x
(** [f] is a reflexive relation *)
let reflexive (#a: Type) (f: (a -> a -> Type)) = forall x. f x x | false | false | FStar.BigOps.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val anti_reflexive : f: (_: a -> _: a -> Type0) -> Prims.logical | [] | FStar.BigOps.anti_reflexive | {
"file_name": "ulib/FStar.BigOps.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | f: (_: a -> _: a -> Type0) -> Prims.logical | {
"end_col": 72,
"end_line": 189,
"start_col": 54,
"start_line": 189
} |
|
Prims.GTot | val pairwise_op' (#a #b: _) (op: (b -> b -> GTot b)) (f: (a -> a -> b)) (l: list a) (z: b) : GTot b | [
{
"abbrev": true,
"full_module": "FStar.Tactics.V2",
"short_module": "T"
},
{
"abbrev": true,
"full_module": "FStar.List.Tot",
"short_module": "L"
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec pairwise_op' #a #b (op: (b -> b -> GTot b)) (f: (a -> a -> b)) (l: list a) (z: b) : GTot b =
match l with
| [] -> z
| hd :: tl -> (map_op' op (f hd) tl z) `op` (pairwise_op' op f tl z) | val pairwise_op' (#a #b: _) (op: (b -> b -> GTot b)) (f: (a -> a -> b)) (l: list a) (z: b) : GTot b
let rec pairwise_op' #a #b (op: (b -> b -> GTot b)) (f: (a -> a -> b)) (l: list a) (z: b) : GTot b = | false | null | false | match l with
| [] -> z
| hd :: tl -> (map_op' op (f hd) tl z) `op` (pairwise_op' op f tl z) | {
"checked_file": "FStar.BigOps.fsti.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.List.Tot.fst.checked"
],
"interface_file": false,
"source_file": "FStar.BigOps.fsti"
} | [
"sometrivial"
] | [
"Prims.list",
"FStar.BigOps.map_op'",
"FStar.BigOps.pairwise_op'"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.BigOps
/// This library provides propositional connectives over finite sets
/// expressed as lists, aka "big operators", in analogy with LaTeX
/// usage for \bigand, \bigor, etc.
///
/// The library is designed with a dual usage in mind:
///
/// 1. Normalization: When applied to a list literal, we want
/// {[big_and f [a;b;c]]} to implicilty reduce to [f a /\ f b /\ f c]
///
/// 2. Symbolic manipulation: We provide lemmas of the form
///
/// [big_and f l <==> forall x. L.memP x l ==> f x]
///
/// In this latter form, partially computing [big_and] as a fold over
/// a list is cumbersome for proof. So, we provide variants [big_and']
/// etc., that do not reduce implicitly.
module L = FStar.List.Tot
(** We control reduction using the [delta_attr] feature of the
normalizer. See FStar.Pervasives for how that works. Every term
that is to be reduced is with the [__reduce__] attribute *)
let __reduce__ = ()
(** We wrap [norm] with a module-specific custom usage, triggering
specific reduction steps *)
[@@ __reduce__]
unfold
let normal (#a: Type) (x: a) : a =
FStar.Pervasives.norm [
iota;
zeta;
delta_only [`%L.fold_right_gtot; `%L.map_gtot];
delta_attr [`%__reduce__];
primops;
simplify
]
x
(** A useful lemma to relate terms to their implicilty reducing variants *)
val normal_eq (#a: Type) (f: a) : Lemma (f == normal f)
(**** Map and fold *)
(** A utility that combines map and fold: [map_op' op f l z] maps each
element of [l] by [f] and then combines them using [op] *)
[@@ __reduce__]
let map_op' #a #b #c (op: (b -> c -> GTot c)) (f: (a -> GTot b)) (l: list a) (z: c) : GTot c =
L.fold_right_gtot #a #c l (fun x acc -> (f x) `op` acc) z
(** Equations for [map_op'] showing how it folds over the empty list *)
val map_op'_nil (#a #b #c: Type) (op: (b -> c -> GTot c)) (f: (a -> GTot b)) (z: c)
: Lemma (map_op' op f [] z == z)
(** Equations for [map_op'] showing how it folds over a cons cell *)
val map_op'_cons
(#a #b #c: Type)
(op: (b -> c -> GTot c))
(f: (a -> GTot b))
(hd: a)
(tl: list a)
(z: c)
: Lemma (map_op' op f (hd :: tl) z == (f hd) `op` (map_op' op f tl z))
(**** Conjunction *)
(** [big_and' f l] = [/\_{x in l} f x] *)
[@@ __reduce__]
let big_and' #a (f: (a -> Type)) (l: list a) : Type = map_op' l_and f l True
(** Equations for [big_and'] showing it to be trivial over the empty list *)
val big_and'_nil (#a: Type) (f: (a -> Type)) : Lemma (big_and' f [] == True)
(** Equations for [big_and'] showing it to be a fold over a list with [/\] *)
val big_and'_cons (#a: Type) (f: (a -> Type)) (hd: a) (tl: list a)
: Lemma (big_and' f (hd :: tl) == (f hd /\ big_and' f tl))
(** [big_and' f l] is a [prop], i.e., it is proof irrelevant.
Note: defining `big_and'` to intrinsically be in `prop`
is also possible, but it's much more tedious in proofs.
This is in part because the [/\] is not defined in prop,
though one can prove that [a /\ b] is a prop.
The discrepancy means that I preferred to prove these
operators in [prop] extrinsically.
*)
val big_and'_prop (#a: Type) (f: (a -> Type)) (l: list a) : Lemma ((big_and' f l) `subtype_of` unit)
(** Interpreting the finite conjunction [big_and f l]
as an infinite conjunction [forall] *)
val big_and'_forall (#a: Type) (f: (a -> Type)) (l: list a)
: Lemma (big_and' f l <==> (forall x. L.memP x l ==> f x))
(** [big_and f l] is an implicitly reducing variant of [big_and']
It is defined in [prop] *)
[@@ __reduce__]
unfold
let big_and #a (f: (a -> Type)) (l: list a) : prop =
big_and'_prop f l;
normal (big_and' f l)
(**** Disjunction *)
(** [big_or f l] = [\/_{x in l} f x] *)
[@@ __reduce__]
let big_or' #a (f: (a -> Type)) (l: list a) : Type = map_op' l_or f l False
(** Equations for [big_or] showing it to be [False] on the empty list *)
val big_or'_nil (#a: Type) (f: (a -> Type)) : Lemma (big_or' f [] == False)
(** Equations for [big_or] showing it to fold over a list *)
val big_or'_cons (#a: Type) (f: (a -> Type)) (hd: a) (tl: list a)
: Lemma (big_or' f (hd :: tl) == (f hd \/ big_or' f tl))
(** [big_or f l] is a `prop`
See the remark above on the style of proof for prop *)
val big_or'_prop (#a: Type) (f: (a -> Type)) (l: list a) : Lemma ((big_or' f l) `subtype_of` unit)
(** Interpreting the finite disjunction [big_or f l]
as an infinite disjunction [exists] *)
val big_or'_exists (#a: Type) (f: (a -> Type)) (l: list a)
: Lemma (big_or' f l <==> (exists x. L.memP x l /\ f x))
(** [big_or f l] is an implicitly reducing variant of [big_or']
It is defined in [prop] *)
[@@ __reduce__]
unfold
let big_or #a (f: (a -> Type)) (l: list a) : prop =
big_or'_prop f l;
normal (big_or' f l)
(**** Pairwise operators *)
/// We provide functions to apply a reflexive, symmetric binary
/// operator to elements in a list [l] pairwise, in a triangle of
/// elements in the square matrix of [l X l]. To illustrate, for a
/// list of [n] elements, we fold the operator over the pairwise
/// elements of the list in top-down, left-to-right order of the
/// diagram below
///
///
/// {[
/// 0 1 2 3 ... n
/// 0
/// 1 x
/// 2 x x
/// 3 x x x
/// . x x x x
/// n x x x x ]}
(** Mapping pairs of elements of [l] using [f] and combining them with
[op]. *)
[@@ __reduce__] | false | false | FStar.BigOps.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val pairwise_op' (#a #b: _) (op: (b -> b -> GTot b)) (f: (a -> a -> b)) (l: list a) (z: b) : GTot b | [
"recursion"
] | FStar.BigOps.pairwise_op' | {
"file_name": "ulib/FStar.BigOps.fsti",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | op: (_: b -> _: b -> Prims.GTot b) -> f: (_: a -> _: a -> b) -> l: Prims.list a -> z: b
-> Prims.GTot b | {
"end_col": 70,
"end_line": 180,
"start_col": 2,
"start_line": 178
} |
Prims.Tot | val gf128_rev_shift:poly | [
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let gf128_rev_shift : poly = reverse gf128_low_shift 127 | val gf128_rev_shift:poly
let gf128_rev_shift:poly = | false | null | false | reverse gf128_low_shift 127 | {
"checked_file": "Vale.AES.GF128.fsti.checked",
"dependencies": [
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Math.Poly2.Lemmas.fsti.checked",
"Vale.Math.Poly2.Bits_s.fsti.checked",
"Vale.Math.Poly2.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.AES.GF128.fsti"
} | [
"total"
] | [
"Vale.Math.Poly2_s.reverse",
"Vale.AES.GF128.gf128_low_shift"
] | [] | module Vale.AES.GF128
open FStar.Seq
open FStar.Mul
open Vale.Def.Words_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.GF128_s
open Vale.Math.Poly2_s
open Vale.Math.Poly2.Bits_s
open Vale.Math.Poly2
open Vale.Math.Poly2.Lemmas
let quad32_shift_left_1 (q:quad32) : quad32 =
let l = four_map (fun (i:nat32) -> ishl i 1) q in
let r = four_map (fun (i:nat32) -> ishr i 31) q in
let Mkfour r0 r1 r2 r3 = r in
quad32_xor l (Mkfour 0 r0 r1 r2)
let quad32_shift_2_left_1 (qa qb:quad32) : quad32 & quad32 =
let la = four_map (fun (i:nat32) -> ishl i 1) qa in
let lb = four_map (fun (i:nat32) -> ishl i 1) qb in
let ra = four_map (fun (i:nat32) -> ishr i 31) qa in
let rb = four_map (fun (i:nat32) -> ishr i 31) qb in
let Mkfour ra0 ra1 ra2 ra3 = ra in
let Mkfour rb0 rb1 rb2 rb3 = rb in
let qa' = quad32_xor la (Mkfour 0 ra0 ra1 ra2) in
let qb' = quad32_xor lb (quad32_xor (Mkfour ra3 0 0 0) (Mkfour 0 rb0 rb1 rb2)) in
(qa', qb')
val lemma_shift_left_1 (a:poly) : Lemma
(requires degree a < 128)
(ensures to_quad32 (shift a 1) == quad32_shift_left_1 (to_quad32 a))
val lemma_shift_2_left_1 (lo hi:poly) : Lemma
(requires degree hi < 127 /\ degree lo < 128)
(ensures (
let n = monomial 128 in
let a = hi *. n +. lo in
let a' = shift a 1 in
let (lo', hi') = quad32_shift_2_left_1 (to_quad32 lo) (to_quad32 hi) in
lo' == to_quad32 (a' %. n) /\
hi' == to_quad32 (a' /. n)
))
// TODO: move this to Poly library
val lemma_reverse_reverse (a:poly) (n:nat) : Lemma
(requires degree a <= n)
(ensures reverse (reverse a n) n == a)
[SMTPat (reverse (reverse a n) n)]
val lemma_gf128_degree (_:unit) : Lemma
(ensures
degree gf128_modulus_low_terms == 7 /\
degree (monomial 128) == 128 /\
degree gf128_modulus == 128
)
val lemma_gf128_constant_rev (q:quad32) : Lemma
(ensures
to_quad32 (reverse gf128_modulus_low_terms 127) == Mkfour 0 0 0 0xe1000000 /\
quad32_xor q q == Mkfour 0 0 0 0
)
val lemma_quad32_double_hi_rev (a:poly) : Lemma
(requires degree a <= 127 /\ degree (reverse a 127) <= 63)
(ensures of_double32 (quad32_double_hi (to_quad32 a)) == reverse (reverse a 127) 63)
// Compute 128-bit multiply in terms of 64-bit multiplies
val lemma_gf128_mul (a b c d:poly) (n:nat) : Lemma
(ensures (
let m = monomial n in
let ab = a *. m +. b in
let cd = c *. m +. d in
let ac = a *. c in
let ad = a *. d in
let bc = b *. c in
let bd = b *. d in
ab *. cd ==
shift (ac +. bc /. m +. ad /. m) (n + n) +.
((bc %. m) *. m +. (ad %. m) *. m +. bd)
))
// Compute (a * b) % g, where g = n + h and %. n is easy to compute (e.g. n = x^128)
val lemma_gf128_reduce (a b g n h:poly) : Lemma
(requires
degree h >= 0 /\
degree n > 2 * degree h /\
degree g == degree n /\
degree a <= degree n /\
degree b <= degree n /\
g == n +. h
)
(ensures (
let d = (a *. b) /. n in
let dh = d *. h in
degree ((dh /. n) *. h) <= 2 * degree h /\
(a *. b) %. g == (dh /. n) *. h +. dh %. n +. (a *. b) %. n
))
val lemma_gf128_reduce_rev (a b h:poly) (n:pos) : Lemma
(requires
degree h >= 0 /\
n > 2 * degree h /\
degree (monomial n +. h) == n /\
degree a < n /\
degree b < n
)
(ensures (
let m = monomial n in
let g = m +. h in
let r x = reverse x (n - 1) in
let rr x = reverse x (2 * n - 1) in
let rab = rr (a *. b) in
let rd = rab %. m in
let rdh = rr (r rd *. h) in
let rdhL = rdh %. m in
let rdhLh = r (r rdhL *. h) in
degree (r rdhL) <= 2 * degree h /\
degree (r rdhLh) <= 2 * degree h /\
r ((a *. b) %. g) == rdhLh +. rdh /. m +. rab /. m
))
val lemma_reduce_rev (a0 a1 a2 h:poly) (n:pos) : Lemma
(requires
n == 64 /\ // verification times out unless n is known
degree a0 < n + n /\
degree a1 < n + n /\
degree a2 < n + n /\
degree (monomial (n + n) +. h) == n + n /\
degree h < n /\
h.[0]
)
(ensures (
let nn = n + n in
let mm = monomial nn in
let m = monomial n in
let g = mm +. h in
let c = reverse (shift h (-1)) (n - 1) in
let y_10 = a0 +. shift (mask a1 64) 64 in
let y_0 = mask y_10 64 in
let y_10c = swap y_10 64 +. y_0 *. c in
let a = a0 +. shift a1 64 +. shift a2 128 in
let x = reverse a (nn + nn - 1) in
reverse (x %. g) (nn - 1) == swap y_10c 64 +. (a2 +. shift a1 (-64)) +. mask y_10c 64 *. c
))
// of_fun 8 (fun (i:nat) -> i = 0 || i = 1 || i = 6)
let gf128_low_shift : poly = shift gf128_modulus_low_terms (-1) | false | true | Vale.AES.GF128.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val gf128_rev_shift:poly | [] | Vale.AES.GF128.gf128_rev_shift | {
"file_name": "vale/code/crypto/aes/Vale.AES.GF128.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Vale.Math.Poly2_s.poly | {
"end_col": 56,
"end_line": 152,
"start_col": 29,
"start_line": 152
} |
Prims.Tot | val gf128_low_shift:poly | [
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let gf128_low_shift : poly = shift gf128_modulus_low_terms (-1) | val gf128_low_shift:poly
let gf128_low_shift:poly = | false | null | false | shift gf128_modulus_low_terms (- 1) | {
"checked_file": "Vale.AES.GF128.fsti.checked",
"dependencies": [
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Math.Poly2.Lemmas.fsti.checked",
"Vale.Math.Poly2.Bits_s.fsti.checked",
"Vale.Math.Poly2.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.AES.GF128.fsti"
} | [
"total"
] | [
"Vale.Math.Poly2_s.shift",
"Vale.AES.GF128_s.gf128_modulus_low_terms",
"Prims.op_Minus"
] | [] | module Vale.AES.GF128
open FStar.Seq
open FStar.Mul
open Vale.Def.Words_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.GF128_s
open Vale.Math.Poly2_s
open Vale.Math.Poly2.Bits_s
open Vale.Math.Poly2
open Vale.Math.Poly2.Lemmas
let quad32_shift_left_1 (q:quad32) : quad32 =
let l = four_map (fun (i:nat32) -> ishl i 1) q in
let r = four_map (fun (i:nat32) -> ishr i 31) q in
let Mkfour r0 r1 r2 r3 = r in
quad32_xor l (Mkfour 0 r0 r1 r2)
let quad32_shift_2_left_1 (qa qb:quad32) : quad32 & quad32 =
let la = four_map (fun (i:nat32) -> ishl i 1) qa in
let lb = four_map (fun (i:nat32) -> ishl i 1) qb in
let ra = four_map (fun (i:nat32) -> ishr i 31) qa in
let rb = four_map (fun (i:nat32) -> ishr i 31) qb in
let Mkfour ra0 ra1 ra2 ra3 = ra in
let Mkfour rb0 rb1 rb2 rb3 = rb in
let qa' = quad32_xor la (Mkfour 0 ra0 ra1 ra2) in
let qb' = quad32_xor lb (quad32_xor (Mkfour ra3 0 0 0) (Mkfour 0 rb0 rb1 rb2)) in
(qa', qb')
val lemma_shift_left_1 (a:poly) : Lemma
(requires degree a < 128)
(ensures to_quad32 (shift a 1) == quad32_shift_left_1 (to_quad32 a))
val lemma_shift_2_left_1 (lo hi:poly) : Lemma
(requires degree hi < 127 /\ degree lo < 128)
(ensures (
let n = monomial 128 in
let a = hi *. n +. lo in
let a' = shift a 1 in
let (lo', hi') = quad32_shift_2_left_1 (to_quad32 lo) (to_quad32 hi) in
lo' == to_quad32 (a' %. n) /\
hi' == to_quad32 (a' /. n)
))
// TODO: move this to Poly library
val lemma_reverse_reverse (a:poly) (n:nat) : Lemma
(requires degree a <= n)
(ensures reverse (reverse a n) n == a)
[SMTPat (reverse (reverse a n) n)]
val lemma_gf128_degree (_:unit) : Lemma
(ensures
degree gf128_modulus_low_terms == 7 /\
degree (monomial 128) == 128 /\
degree gf128_modulus == 128
)
val lemma_gf128_constant_rev (q:quad32) : Lemma
(ensures
to_quad32 (reverse gf128_modulus_low_terms 127) == Mkfour 0 0 0 0xe1000000 /\
quad32_xor q q == Mkfour 0 0 0 0
)
val lemma_quad32_double_hi_rev (a:poly) : Lemma
(requires degree a <= 127 /\ degree (reverse a 127) <= 63)
(ensures of_double32 (quad32_double_hi (to_quad32 a)) == reverse (reverse a 127) 63)
// Compute 128-bit multiply in terms of 64-bit multiplies
val lemma_gf128_mul (a b c d:poly) (n:nat) : Lemma
(ensures (
let m = monomial n in
let ab = a *. m +. b in
let cd = c *. m +. d in
let ac = a *. c in
let ad = a *. d in
let bc = b *. c in
let bd = b *. d in
ab *. cd ==
shift (ac +. bc /. m +. ad /. m) (n + n) +.
((bc %. m) *. m +. (ad %. m) *. m +. bd)
))
// Compute (a * b) % g, where g = n + h and %. n is easy to compute (e.g. n = x^128)
val lemma_gf128_reduce (a b g n h:poly) : Lemma
(requires
degree h >= 0 /\
degree n > 2 * degree h /\
degree g == degree n /\
degree a <= degree n /\
degree b <= degree n /\
g == n +. h
)
(ensures (
let d = (a *. b) /. n in
let dh = d *. h in
degree ((dh /. n) *. h) <= 2 * degree h /\
(a *. b) %. g == (dh /. n) *. h +. dh %. n +. (a *. b) %. n
))
val lemma_gf128_reduce_rev (a b h:poly) (n:pos) : Lemma
(requires
degree h >= 0 /\
n > 2 * degree h /\
degree (monomial n +. h) == n /\
degree a < n /\
degree b < n
)
(ensures (
let m = monomial n in
let g = m +. h in
let r x = reverse x (n - 1) in
let rr x = reverse x (2 * n - 1) in
let rab = rr (a *. b) in
let rd = rab %. m in
let rdh = rr (r rd *. h) in
let rdhL = rdh %. m in
let rdhLh = r (r rdhL *. h) in
degree (r rdhL) <= 2 * degree h /\
degree (r rdhLh) <= 2 * degree h /\
r ((a *. b) %. g) == rdhLh +. rdh /. m +. rab /. m
))
val lemma_reduce_rev (a0 a1 a2 h:poly) (n:pos) : Lemma
(requires
n == 64 /\ // verification times out unless n is known
degree a0 < n + n /\
degree a1 < n + n /\
degree a2 < n + n /\
degree (monomial (n + n) +. h) == n + n /\
degree h < n /\
h.[0]
)
(ensures (
let nn = n + n in
let mm = monomial nn in
let m = monomial n in
let g = mm +. h in
let c = reverse (shift h (-1)) (n - 1) in
let y_10 = a0 +. shift (mask a1 64) 64 in
let y_0 = mask y_10 64 in
let y_10c = swap y_10 64 +. y_0 *. c in
let a = a0 +. shift a1 64 +. shift a2 128 in
let x = reverse a (nn + nn - 1) in
reverse (x %. g) (nn - 1) == swap y_10c 64 +. (a2 +. shift a1 (-64)) +. mask y_10c 64 *. c
)) | false | true | Vale.AES.GF128.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val gf128_low_shift:poly | [] | Vale.AES.GF128.gf128_low_shift | {
"file_name": "vale/code/crypto/aes/Vale.AES.GF128.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Vale.Math.Poly2_s.poly | {
"end_col": 63,
"end_line": 149,
"start_col": 29,
"start_line": 149
} |
Prims.Tot | val gf128_mul_rev (a b: poly) : poly | [
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let gf128_mul_rev (a b:poly) : poly =
reverse (gf128_mul (reverse a 127) (reverse b 127)) 127 | val gf128_mul_rev (a b: poly) : poly
let gf128_mul_rev (a b: poly) : poly = | false | null | false | reverse (gf128_mul (reverse a 127) (reverse b 127)) 127 | {
"checked_file": "Vale.AES.GF128.fsti.checked",
"dependencies": [
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Math.Poly2.Lemmas.fsti.checked",
"Vale.Math.Poly2.Bits_s.fsti.checked",
"Vale.Math.Poly2.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.AES.GF128.fsti"
} | [
"total"
] | [
"Vale.Math.Poly2_s.poly",
"Vale.Math.Poly2_s.reverse",
"Vale.AES.GF128_s.gf128_mul"
] | [] | module Vale.AES.GF128
open FStar.Seq
open FStar.Mul
open Vale.Def.Words_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.GF128_s
open Vale.Math.Poly2_s
open Vale.Math.Poly2.Bits_s
open Vale.Math.Poly2
open Vale.Math.Poly2.Lemmas
let quad32_shift_left_1 (q:quad32) : quad32 =
let l = four_map (fun (i:nat32) -> ishl i 1) q in
let r = four_map (fun (i:nat32) -> ishr i 31) q in
let Mkfour r0 r1 r2 r3 = r in
quad32_xor l (Mkfour 0 r0 r1 r2)
let quad32_shift_2_left_1 (qa qb:quad32) : quad32 & quad32 =
let la = four_map (fun (i:nat32) -> ishl i 1) qa in
let lb = four_map (fun (i:nat32) -> ishl i 1) qb in
let ra = four_map (fun (i:nat32) -> ishr i 31) qa in
let rb = four_map (fun (i:nat32) -> ishr i 31) qb in
let Mkfour ra0 ra1 ra2 ra3 = ra in
let Mkfour rb0 rb1 rb2 rb3 = rb in
let qa' = quad32_xor la (Mkfour 0 ra0 ra1 ra2) in
let qb' = quad32_xor lb (quad32_xor (Mkfour ra3 0 0 0) (Mkfour 0 rb0 rb1 rb2)) in
(qa', qb')
val lemma_shift_left_1 (a:poly) : Lemma
(requires degree a < 128)
(ensures to_quad32 (shift a 1) == quad32_shift_left_1 (to_quad32 a))
val lemma_shift_2_left_1 (lo hi:poly) : Lemma
(requires degree hi < 127 /\ degree lo < 128)
(ensures (
let n = monomial 128 in
let a = hi *. n +. lo in
let a' = shift a 1 in
let (lo', hi') = quad32_shift_2_left_1 (to_quad32 lo) (to_quad32 hi) in
lo' == to_quad32 (a' %. n) /\
hi' == to_quad32 (a' /. n)
))
// TODO: move this to Poly library
val lemma_reverse_reverse (a:poly) (n:nat) : Lemma
(requires degree a <= n)
(ensures reverse (reverse a n) n == a)
[SMTPat (reverse (reverse a n) n)]
val lemma_gf128_degree (_:unit) : Lemma
(ensures
degree gf128_modulus_low_terms == 7 /\
degree (monomial 128) == 128 /\
degree gf128_modulus == 128
)
val lemma_gf128_constant_rev (q:quad32) : Lemma
(ensures
to_quad32 (reverse gf128_modulus_low_terms 127) == Mkfour 0 0 0 0xe1000000 /\
quad32_xor q q == Mkfour 0 0 0 0
)
val lemma_quad32_double_hi_rev (a:poly) : Lemma
(requires degree a <= 127 /\ degree (reverse a 127) <= 63)
(ensures of_double32 (quad32_double_hi (to_quad32 a)) == reverse (reverse a 127) 63)
// Compute 128-bit multiply in terms of 64-bit multiplies
val lemma_gf128_mul (a b c d:poly) (n:nat) : Lemma
(ensures (
let m = monomial n in
let ab = a *. m +. b in
let cd = c *. m +. d in
let ac = a *. c in
let ad = a *. d in
let bc = b *. c in
let bd = b *. d in
ab *. cd ==
shift (ac +. bc /. m +. ad /. m) (n + n) +.
((bc %. m) *. m +. (ad %. m) *. m +. bd)
))
// Compute (a * b) % g, where g = n + h and %. n is easy to compute (e.g. n = x^128)
val lemma_gf128_reduce (a b g n h:poly) : Lemma
(requires
degree h >= 0 /\
degree n > 2 * degree h /\
degree g == degree n /\
degree a <= degree n /\
degree b <= degree n /\
g == n +. h
)
(ensures (
let d = (a *. b) /. n in
let dh = d *. h in
degree ((dh /. n) *. h) <= 2 * degree h /\
(a *. b) %. g == (dh /. n) *. h +. dh %. n +. (a *. b) %. n
))
val lemma_gf128_reduce_rev (a b h:poly) (n:pos) : Lemma
(requires
degree h >= 0 /\
n > 2 * degree h /\
degree (monomial n +. h) == n /\
degree a < n /\
degree b < n
)
(ensures (
let m = monomial n in
let g = m +. h in
let r x = reverse x (n - 1) in
let rr x = reverse x (2 * n - 1) in
let rab = rr (a *. b) in
let rd = rab %. m in
let rdh = rr (r rd *. h) in
let rdhL = rdh %. m in
let rdhLh = r (r rdhL *. h) in
degree (r rdhL) <= 2 * degree h /\
degree (r rdhLh) <= 2 * degree h /\
r ((a *. b) %. g) == rdhLh +. rdh /. m +. rab /. m
))
val lemma_reduce_rev (a0 a1 a2 h:poly) (n:pos) : Lemma
(requires
n == 64 /\ // verification times out unless n is known
degree a0 < n + n /\
degree a1 < n + n /\
degree a2 < n + n /\
degree (monomial (n + n) +. h) == n + n /\
degree h < n /\
h.[0]
)
(ensures (
let nn = n + n in
let mm = monomial nn in
let m = monomial n in
let g = mm +. h in
let c = reverse (shift h (-1)) (n - 1) in
let y_10 = a0 +. shift (mask a1 64) 64 in
let y_0 = mask y_10 64 in
let y_10c = swap y_10 64 +. y_0 *. c in
let a = a0 +. shift a1 64 +. shift a2 128 in
let x = reverse a (nn + nn - 1) in
reverse (x %. g) (nn - 1) == swap y_10c 64 +. (a2 +. shift a1 (-64)) +. mask y_10c 64 *. c
))
// of_fun 8 (fun (i:nat) -> i = 0 || i = 1 || i = 6)
let gf128_low_shift : poly = shift gf128_modulus_low_terms (-1)
// of_fun 8 (fun (i:nat) -> i = 127 || i = 126 || i = 121)
let gf128_rev_shift : poly = reverse gf128_low_shift 127
val lemma_gf128_low_shift (_:unit) : Lemma
(shift (of_quad32 (Mkfour 0 0 0 0xc2000000)) (-64) == reverse gf128_low_shift 63)
val lemma_gf128_high_bit (_:unit) : Lemma
(of_quad32 (Mkfour 0 0 0 0x80000000) == monomial 127)
val lemma_gf128_low_shift_1 (_:unit) : Lemma
(of_quad32 (Mkfour 1 0 0 0xc2000000) == reverse (shift (monomial 128 +. gf128_modulus_low_terms) (-1)) 127) | false | true | Vale.AES.GF128.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val gf128_mul_rev (a b: poly) : poly | [] | Vale.AES.GF128.gf128_mul_rev | {
"file_name": "vale/code/crypto/aes/Vale.AES.GF128.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Vale.Math.Poly2_s.poly -> b: Vale.Math.Poly2_s.poly -> Vale.Math.Poly2_s.poly | {
"end_col": 57,
"end_line": 164,
"start_col": 2,
"start_line": 164
} |
Prims.Tot | val quad32_shift_left_1 (q: quad32) : quad32 | [
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let quad32_shift_left_1 (q:quad32) : quad32 =
let l = four_map (fun (i:nat32) -> ishl i 1) q in
let r = four_map (fun (i:nat32) -> ishr i 31) q in
let Mkfour r0 r1 r2 r3 = r in
quad32_xor l (Mkfour 0 r0 r1 r2) | val quad32_shift_left_1 (q: quad32) : quad32
let quad32_shift_left_1 (q: quad32) : quad32 = | false | null | false | let l = four_map (fun (i: nat32) -> ishl i 1) q in
let r = four_map (fun (i: nat32) -> ishr i 31) q in
let Mkfour r0 r1 r2 r3 = r in
quad32_xor l (Mkfour 0 r0 r1 r2) | {
"checked_file": "Vale.AES.GF128.fsti.checked",
"dependencies": [
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Math.Poly2.Lemmas.fsti.checked",
"Vale.Math.Poly2.Bits_s.fsti.checked",
"Vale.Math.Poly2.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.AES.GF128.fsti"
} | [
"total"
] | [
"Vale.Def.Types_s.quad32",
"Vale.Def.Words_s.natN",
"Vale.Def.Words_s.pow2_32",
"Vale.Def.Types_s.quad32_xor",
"Vale.Def.Words_s.Mkfour",
"Vale.Def.Types_s.nat32",
"Vale.Def.Words_s.four",
"Vale.Def.Words.Four_s.four_map",
"Vale.Def.Types_s.ishr",
"Vale.Def.Types_s.ishl"
] | [] | module Vale.AES.GF128
open FStar.Seq
open FStar.Mul
open Vale.Def.Words_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.GF128_s
open Vale.Math.Poly2_s
open Vale.Math.Poly2.Bits_s
open Vale.Math.Poly2
open Vale.Math.Poly2.Lemmas | false | true | Vale.AES.GF128.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val quad32_shift_left_1 (q: quad32) : quad32 | [] | Vale.AES.GF128.quad32_shift_left_1 | {
"file_name": "vale/code/crypto/aes/Vale.AES.GF128.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | q: Vale.Def.Types_s.quad32 -> Vale.Def.Types_s.quad32 | {
"end_col": 34,
"end_line": 18,
"start_col": 45,
"start_line": 14
} |
Prims.Tot | val quad32_shift_2_left_1 (qa qb: quad32) : quad32 & quad32 | [
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let quad32_shift_2_left_1 (qa qb:quad32) : quad32 & quad32 =
let la = four_map (fun (i:nat32) -> ishl i 1) qa in
let lb = four_map (fun (i:nat32) -> ishl i 1) qb in
let ra = four_map (fun (i:nat32) -> ishr i 31) qa in
let rb = four_map (fun (i:nat32) -> ishr i 31) qb in
let Mkfour ra0 ra1 ra2 ra3 = ra in
let Mkfour rb0 rb1 rb2 rb3 = rb in
let qa' = quad32_xor la (Mkfour 0 ra0 ra1 ra2) in
let qb' = quad32_xor lb (quad32_xor (Mkfour ra3 0 0 0) (Mkfour 0 rb0 rb1 rb2)) in
(qa', qb') | val quad32_shift_2_left_1 (qa qb: quad32) : quad32 & quad32
let quad32_shift_2_left_1 (qa qb: quad32) : quad32 & quad32 = | false | null | false | let la = four_map (fun (i: nat32) -> ishl i 1) qa in
let lb = four_map (fun (i: nat32) -> ishl i 1) qb in
let ra = four_map (fun (i: nat32) -> ishr i 31) qa in
let rb = four_map (fun (i: nat32) -> ishr i 31) qb in
let Mkfour ra0 ra1 ra2 ra3 = ra in
let Mkfour rb0 rb1 rb2 rb3 = rb in
let qa' = quad32_xor la (Mkfour 0 ra0 ra1 ra2) in
let qb' = quad32_xor lb (quad32_xor (Mkfour ra3 0 0 0) (Mkfour 0 rb0 rb1 rb2)) in
(qa', qb') | {
"checked_file": "Vale.AES.GF128.fsti.checked",
"dependencies": [
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Math.Poly2.Lemmas.fsti.checked",
"Vale.Math.Poly2.Bits_s.fsti.checked",
"Vale.Math.Poly2.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.AES.GF128.fsti"
} | [
"total"
] | [
"Vale.Def.Types_s.quad32",
"Vale.Def.Words_s.natN",
"Vale.Def.Words_s.pow2_32",
"FStar.Pervasives.Native.Mktuple2",
"Vale.Def.Types_s.quad32_xor",
"Vale.Def.Words_s.Mkfour",
"Vale.Def.Types_s.nat32",
"FStar.Pervasives.Native.tuple2",
"Vale.Def.Words_s.four",
"Vale.Def.Words.Four_s.four_map",
"Vale.Def.Types_s.ishr",
"Vale.Def.Types_s.ishl"
] | [] | module Vale.AES.GF128
open FStar.Seq
open FStar.Mul
open Vale.Def.Words_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.GF128_s
open Vale.Math.Poly2_s
open Vale.Math.Poly2.Bits_s
open Vale.Math.Poly2
open Vale.Math.Poly2.Lemmas
let quad32_shift_left_1 (q:quad32) : quad32 =
let l = four_map (fun (i:nat32) -> ishl i 1) q in
let r = four_map (fun (i:nat32) -> ishr i 31) q in
let Mkfour r0 r1 r2 r3 = r in
quad32_xor l (Mkfour 0 r0 r1 r2) | false | true | Vale.AES.GF128.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val quad32_shift_2_left_1 (qa qb: quad32) : quad32 & quad32 | [] | Vale.AES.GF128.quad32_shift_2_left_1 | {
"file_name": "vale/code/crypto/aes/Vale.AES.GF128.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | qa: Vale.Def.Types_s.quad32 -> qb: Vale.Def.Types_s.quad32
-> Vale.Def.Types_s.quad32 * Vale.Def.Types_s.quad32 | {
"end_col": 12,
"end_line": 29,
"start_col": 60,
"start_line": 20
} |
Prims.Tot | val mod_rev (n: pos) (a b: poly) : poly | [
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mod_rev (n:pos) (a b:poly) : poly =
reverse (reverse a (n + n - 1) %. b) (n - 1) | val mod_rev (n: pos) (a b: poly) : poly
let mod_rev (n: pos) (a b: poly) : poly = | false | null | false | reverse (reverse a (n + n - 1) %. b) (n - 1) | {
"checked_file": "Vale.AES.GF128.fsti.checked",
"dependencies": [
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Math.Poly2.Lemmas.fsti.checked",
"Vale.Math.Poly2.Bits_s.fsti.checked",
"Vale.Math.Poly2.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.AES.GF128.fsti"
} | [
"total"
] | [
"Prims.pos",
"Vale.Math.Poly2_s.poly",
"Vale.Math.Poly2_s.reverse",
"Vale.Math.Poly2.op_Percent_Dot",
"Prims.op_Subtraction",
"Prims.op_Addition"
] | [] | module Vale.AES.GF128
open FStar.Seq
open FStar.Mul
open Vale.Def.Words_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.GF128_s
open Vale.Math.Poly2_s
open Vale.Math.Poly2.Bits_s
open Vale.Math.Poly2
open Vale.Math.Poly2.Lemmas
let quad32_shift_left_1 (q:quad32) : quad32 =
let l = four_map (fun (i:nat32) -> ishl i 1) q in
let r = four_map (fun (i:nat32) -> ishr i 31) q in
let Mkfour r0 r1 r2 r3 = r in
quad32_xor l (Mkfour 0 r0 r1 r2)
let quad32_shift_2_left_1 (qa qb:quad32) : quad32 & quad32 =
let la = four_map (fun (i:nat32) -> ishl i 1) qa in
let lb = four_map (fun (i:nat32) -> ishl i 1) qb in
let ra = four_map (fun (i:nat32) -> ishr i 31) qa in
let rb = four_map (fun (i:nat32) -> ishr i 31) qb in
let Mkfour ra0 ra1 ra2 ra3 = ra in
let Mkfour rb0 rb1 rb2 rb3 = rb in
let qa' = quad32_xor la (Mkfour 0 ra0 ra1 ra2) in
let qb' = quad32_xor lb (quad32_xor (Mkfour ra3 0 0 0) (Mkfour 0 rb0 rb1 rb2)) in
(qa', qb')
val lemma_shift_left_1 (a:poly) : Lemma
(requires degree a < 128)
(ensures to_quad32 (shift a 1) == quad32_shift_left_1 (to_quad32 a))
val lemma_shift_2_left_1 (lo hi:poly) : Lemma
(requires degree hi < 127 /\ degree lo < 128)
(ensures (
let n = monomial 128 in
let a = hi *. n +. lo in
let a' = shift a 1 in
let (lo', hi') = quad32_shift_2_left_1 (to_quad32 lo) (to_quad32 hi) in
lo' == to_quad32 (a' %. n) /\
hi' == to_quad32 (a' /. n)
))
// TODO: move this to Poly library
val lemma_reverse_reverse (a:poly) (n:nat) : Lemma
(requires degree a <= n)
(ensures reverse (reverse a n) n == a)
[SMTPat (reverse (reverse a n) n)]
val lemma_gf128_degree (_:unit) : Lemma
(ensures
degree gf128_modulus_low_terms == 7 /\
degree (monomial 128) == 128 /\
degree gf128_modulus == 128
)
val lemma_gf128_constant_rev (q:quad32) : Lemma
(ensures
to_quad32 (reverse gf128_modulus_low_terms 127) == Mkfour 0 0 0 0xe1000000 /\
quad32_xor q q == Mkfour 0 0 0 0
)
val lemma_quad32_double_hi_rev (a:poly) : Lemma
(requires degree a <= 127 /\ degree (reverse a 127) <= 63)
(ensures of_double32 (quad32_double_hi (to_quad32 a)) == reverse (reverse a 127) 63)
// Compute 128-bit multiply in terms of 64-bit multiplies
val lemma_gf128_mul (a b c d:poly) (n:nat) : Lemma
(ensures (
let m = monomial n in
let ab = a *. m +. b in
let cd = c *. m +. d in
let ac = a *. c in
let ad = a *. d in
let bc = b *. c in
let bd = b *. d in
ab *. cd ==
shift (ac +. bc /. m +. ad /. m) (n + n) +.
((bc %. m) *. m +. (ad %. m) *. m +. bd)
))
// Compute (a * b) % g, where g = n + h and %. n is easy to compute (e.g. n = x^128)
val lemma_gf128_reduce (a b g n h:poly) : Lemma
(requires
degree h >= 0 /\
degree n > 2 * degree h /\
degree g == degree n /\
degree a <= degree n /\
degree b <= degree n /\
g == n +. h
)
(ensures (
let d = (a *. b) /. n in
let dh = d *. h in
degree ((dh /. n) *. h) <= 2 * degree h /\
(a *. b) %. g == (dh /. n) *. h +. dh %. n +. (a *. b) %. n
))
val lemma_gf128_reduce_rev (a b h:poly) (n:pos) : Lemma
(requires
degree h >= 0 /\
n > 2 * degree h /\
degree (monomial n +. h) == n /\
degree a < n /\
degree b < n
)
(ensures (
let m = monomial n in
let g = m +. h in
let r x = reverse x (n - 1) in
let rr x = reverse x (2 * n - 1) in
let rab = rr (a *. b) in
let rd = rab %. m in
let rdh = rr (r rd *. h) in
let rdhL = rdh %. m in
let rdhLh = r (r rdhL *. h) in
degree (r rdhL) <= 2 * degree h /\
degree (r rdhLh) <= 2 * degree h /\
r ((a *. b) %. g) == rdhLh +. rdh /. m +. rab /. m
))
val lemma_reduce_rev (a0 a1 a2 h:poly) (n:pos) : Lemma
(requires
n == 64 /\ // verification times out unless n is known
degree a0 < n + n /\
degree a1 < n + n /\
degree a2 < n + n /\
degree (monomial (n + n) +. h) == n + n /\
degree h < n /\
h.[0]
)
(ensures (
let nn = n + n in
let mm = monomial nn in
let m = monomial n in
let g = mm +. h in
let c = reverse (shift h (-1)) (n - 1) in
let y_10 = a0 +. shift (mask a1 64) 64 in
let y_0 = mask y_10 64 in
let y_10c = swap y_10 64 +. y_0 *. c in
let a = a0 +. shift a1 64 +. shift a2 128 in
let x = reverse a (nn + nn - 1) in
reverse (x %. g) (nn - 1) == swap y_10c 64 +. (a2 +. shift a1 (-64)) +. mask y_10c 64 *. c
))
// of_fun 8 (fun (i:nat) -> i = 0 || i = 1 || i = 6)
let gf128_low_shift : poly = shift gf128_modulus_low_terms (-1)
// of_fun 8 (fun (i:nat) -> i = 127 || i = 126 || i = 121)
let gf128_rev_shift : poly = reverse gf128_low_shift 127
val lemma_gf128_low_shift (_:unit) : Lemma
(shift (of_quad32 (Mkfour 0 0 0 0xc2000000)) (-64) == reverse gf128_low_shift 63)
val lemma_gf128_high_bit (_:unit) : Lemma
(of_quad32 (Mkfour 0 0 0 0x80000000) == monomial 127)
val lemma_gf128_low_shift_1 (_:unit) : Lemma
(of_quad32 (Mkfour 1 0 0 0xc2000000) == reverse (shift (monomial 128 +. gf128_modulus_low_terms) (-1)) 127)
let gf128_mul_rev (a b:poly) : poly =
reverse (gf128_mul (reverse a 127) (reverse b 127)) 127
let ( *~ ) = gf128_mul_rev
val lemma_gf128_mul_rev_commute (a b:poly) : Lemma (a *~ b == b *~ a)
val lemma_gf128_mul_rev_associate (a b c:poly) : Lemma
(a *~ (b *~ c) == (a *~ b) *~ c)
val lemma_gf128_mul_rev_distribute_left (a b c:poly) : Lemma
((a +. b) *~ c == a *~ c +. b *~ c)
val lemma_gf128_mul_rev_distribute_right (a b c:poly) : Lemma
(a *~ (b +. c) == a *~ b +. a *~ c)
// TODO: change definition of reverse from (reverse a 127) to (reverse 128 a) | false | true | Vale.AES.GF128.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mod_rev (n: pos) (a b: poly) : poly | [] | Vale.AES.GF128.mod_rev | {
"file_name": "vale/code/crypto/aes/Vale.AES.GF128.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | n: Prims.pos -> a: Vale.Math.Poly2_s.poly -> b: Vale.Math.Poly2_s.poly -> Vale.Math.Poly2_s.poly | {
"end_col": 46,
"end_line": 181,
"start_col": 2,
"start_line": 181
} |
Prims.Tot | val shift_key_1 (n: pos) (f h: poly) : poly | [
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2.Bits_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Math.Poly2_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GF128_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let shift_key_1 (n:pos) (f h:poly) : poly =
let g = monomial n +. f in
let h1 = shift h 1 in
let offset = reverse (shift g (-1)) (n - 1) in
mask h1 n +. (if h1.[n] then offset else zero) | val shift_key_1 (n: pos) (f h: poly) : poly
let shift_key_1 (n: pos) (f h: poly) : poly = | false | null | false | let g = monomial n +. f in
let h1 = shift h 1 in
let offset = reverse (shift g (- 1)) (n - 1) in
mask h1 n +. (if h1.[ n ] then offset else zero) | {
"checked_file": "Vale.AES.GF128.fsti.checked",
"dependencies": [
"Vale.Math.Poly2_s.fsti.checked",
"Vale.Math.Poly2.Lemmas.fsti.checked",
"Vale.Math.Poly2.Bits_s.fsti.checked",
"Vale.Math.Poly2.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.GF128_s.fsti.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.AES.GF128.fsti"
} | [
"total"
] | [
"Prims.pos",
"Vale.Math.Poly2_s.poly",
"Vale.Math.Poly2.op_Plus_Dot",
"Vale.Math.Poly2.mask",
"Vale.Math.Poly2_s.op_String_Access",
"Prims.bool",
"Vale.Math.Poly2_s.zero",
"Vale.Math.Poly2_s.reverse",
"Vale.Math.Poly2_s.shift",
"Prims.op_Minus",
"Prims.op_Subtraction",
"Vale.Math.Poly2_s.monomial"
] | [] | module Vale.AES.GF128
open FStar.Seq
open FStar.Mul
open Vale.Def.Words_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.AES.GF128_s
open Vale.Math.Poly2_s
open Vale.Math.Poly2.Bits_s
open Vale.Math.Poly2
open Vale.Math.Poly2.Lemmas
let quad32_shift_left_1 (q:quad32) : quad32 =
let l = four_map (fun (i:nat32) -> ishl i 1) q in
let r = four_map (fun (i:nat32) -> ishr i 31) q in
let Mkfour r0 r1 r2 r3 = r in
quad32_xor l (Mkfour 0 r0 r1 r2)
let quad32_shift_2_left_1 (qa qb:quad32) : quad32 & quad32 =
let la = four_map (fun (i:nat32) -> ishl i 1) qa in
let lb = four_map (fun (i:nat32) -> ishl i 1) qb in
let ra = four_map (fun (i:nat32) -> ishr i 31) qa in
let rb = four_map (fun (i:nat32) -> ishr i 31) qb in
let Mkfour ra0 ra1 ra2 ra3 = ra in
let Mkfour rb0 rb1 rb2 rb3 = rb in
let qa' = quad32_xor la (Mkfour 0 ra0 ra1 ra2) in
let qb' = quad32_xor lb (quad32_xor (Mkfour ra3 0 0 0) (Mkfour 0 rb0 rb1 rb2)) in
(qa', qb')
val lemma_shift_left_1 (a:poly) : Lemma
(requires degree a < 128)
(ensures to_quad32 (shift a 1) == quad32_shift_left_1 (to_quad32 a))
val lemma_shift_2_left_1 (lo hi:poly) : Lemma
(requires degree hi < 127 /\ degree lo < 128)
(ensures (
let n = monomial 128 in
let a = hi *. n +. lo in
let a' = shift a 1 in
let (lo', hi') = quad32_shift_2_left_1 (to_quad32 lo) (to_quad32 hi) in
lo' == to_quad32 (a' %. n) /\
hi' == to_quad32 (a' /. n)
))
// TODO: move this to Poly library
val lemma_reverse_reverse (a:poly) (n:nat) : Lemma
(requires degree a <= n)
(ensures reverse (reverse a n) n == a)
[SMTPat (reverse (reverse a n) n)]
val lemma_gf128_degree (_:unit) : Lemma
(ensures
degree gf128_modulus_low_terms == 7 /\
degree (monomial 128) == 128 /\
degree gf128_modulus == 128
)
val lemma_gf128_constant_rev (q:quad32) : Lemma
(ensures
to_quad32 (reverse gf128_modulus_low_terms 127) == Mkfour 0 0 0 0xe1000000 /\
quad32_xor q q == Mkfour 0 0 0 0
)
val lemma_quad32_double_hi_rev (a:poly) : Lemma
(requires degree a <= 127 /\ degree (reverse a 127) <= 63)
(ensures of_double32 (quad32_double_hi (to_quad32 a)) == reverse (reverse a 127) 63)
// Compute 128-bit multiply in terms of 64-bit multiplies
val lemma_gf128_mul (a b c d:poly) (n:nat) : Lemma
(ensures (
let m = monomial n in
let ab = a *. m +. b in
let cd = c *. m +. d in
let ac = a *. c in
let ad = a *. d in
let bc = b *. c in
let bd = b *. d in
ab *. cd ==
shift (ac +. bc /. m +. ad /. m) (n + n) +.
((bc %. m) *. m +. (ad %. m) *. m +. bd)
))
// Compute (a * b) % g, where g = n + h and %. n is easy to compute (e.g. n = x^128)
val lemma_gf128_reduce (a b g n h:poly) : Lemma
(requires
degree h >= 0 /\
degree n > 2 * degree h /\
degree g == degree n /\
degree a <= degree n /\
degree b <= degree n /\
g == n +. h
)
(ensures (
let d = (a *. b) /. n in
let dh = d *. h in
degree ((dh /. n) *. h) <= 2 * degree h /\
(a *. b) %. g == (dh /. n) *. h +. dh %. n +. (a *. b) %. n
))
val lemma_gf128_reduce_rev (a b h:poly) (n:pos) : Lemma
(requires
degree h >= 0 /\
n > 2 * degree h /\
degree (monomial n +. h) == n /\
degree a < n /\
degree b < n
)
(ensures (
let m = monomial n in
let g = m +. h in
let r x = reverse x (n - 1) in
let rr x = reverse x (2 * n - 1) in
let rab = rr (a *. b) in
let rd = rab %. m in
let rdh = rr (r rd *. h) in
let rdhL = rdh %. m in
let rdhLh = r (r rdhL *. h) in
degree (r rdhL) <= 2 * degree h /\
degree (r rdhLh) <= 2 * degree h /\
r ((a *. b) %. g) == rdhLh +. rdh /. m +. rab /. m
))
val lemma_reduce_rev (a0 a1 a2 h:poly) (n:pos) : Lemma
(requires
n == 64 /\ // verification times out unless n is known
degree a0 < n + n /\
degree a1 < n + n /\
degree a2 < n + n /\
degree (monomial (n + n) +. h) == n + n /\
degree h < n /\
h.[0]
)
(ensures (
let nn = n + n in
let mm = monomial nn in
let m = monomial n in
let g = mm +. h in
let c = reverse (shift h (-1)) (n - 1) in
let y_10 = a0 +. shift (mask a1 64) 64 in
let y_0 = mask y_10 64 in
let y_10c = swap y_10 64 +. y_0 *. c in
let a = a0 +. shift a1 64 +. shift a2 128 in
let x = reverse a (nn + nn - 1) in
reverse (x %. g) (nn - 1) == swap y_10c 64 +. (a2 +. shift a1 (-64)) +. mask y_10c 64 *. c
))
// of_fun 8 (fun (i:nat) -> i = 0 || i = 1 || i = 6)
let gf128_low_shift : poly = shift gf128_modulus_low_terms (-1)
// of_fun 8 (fun (i:nat) -> i = 127 || i = 126 || i = 121)
let gf128_rev_shift : poly = reverse gf128_low_shift 127
val lemma_gf128_low_shift (_:unit) : Lemma
(shift (of_quad32 (Mkfour 0 0 0 0xc2000000)) (-64) == reverse gf128_low_shift 63)
val lemma_gf128_high_bit (_:unit) : Lemma
(of_quad32 (Mkfour 0 0 0 0x80000000) == monomial 127)
val lemma_gf128_low_shift_1 (_:unit) : Lemma
(of_quad32 (Mkfour 1 0 0 0xc2000000) == reverse (shift (monomial 128 +. gf128_modulus_low_terms) (-1)) 127)
let gf128_mul_rev (a b:poly) : poly =
reverse (gf128_mul (reverse a 127) (reverse b 127)) 127
let ( *~ ) = gf128_mul_rev
val lemma_gf128_mul_rev_commute (a b:poly) : Lemma (a *~ b == b *~ a)
val lemma_gf128_mul_rev_associate (a b c:poly) : Lemma
(a *~ (b *~ c) == (a *~ b) *~ c)
val lemma_gf128_mul_rev_distribute_left (a b c:poly) : Lemma
((a +. b) *~ c == a *~ c +. b *~ c)
val lemma_gf128_mul_rev_distribute_right (a b c:poly) : Lemma
(a *~ (b +. c) == a *~ b +. a *~ c)
// TODO: change definition of reverse from (reverse a 127) to (reverse 128 a)
let mod_rev (n:pos) (a b:poly) : poly =
reverse (reverse a (n + n - 1) %. b) (n - 1)
val lemma_add_mod_rev (n:pos) (a1 a2 b:poly) : Lemma
(requires degree b >= 0)
(ensures mod_rev n (a1 +. a2) b == mod_rev n a1 b +. mod_rev n a2 b) | false | true | Vale.AES.GF128.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val shift_key_1 (n: pos) (f h: poly) : poly | [] | Vale.AES.GF128.shift_key_1 | {
"file_name": "vale/code/crypto/aes/Vale.AES.GF128.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | n: Prims.pos -> f: Vale.Math.Poly2_s.poly -> h: Vale.Math.Poly2_s.poly -> Vale.Math.Poly2_s.poly | {
"end_col": 48,
"end_line": 191,
"start_col": 43,
"start_line": 187
} |
FStar.HyperStack.ST.Stack | val default_error_handler
(typename_s fieldname reason: string)
(error_code: U64.t)
(context: B.pointer LPE.error_frame)
(input: input_buffer)
(start_pos: U64.t)
: HST.Stack unit
(requires (fun h -> B.live h context))
(ensures (fun h _ h' -> B.modifies (B.loc_buffer context) h h')) | [
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "EverParse3d.ErrorCode",
"short_module": "LPE"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": false,
"full_module": "EverParse3d.InputStream.Extern.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverParse3d.InputStream.Extern",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverParse3d.InputStream.Extern",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let default_error_handler
(typename_s: string)
(fieldname: string)
(reason: string)
(error_code: U64.t)
(context: B.pointer LPE.error_frame)
(input: input_buffer)
(start_pos: U64.t)
: HST.Stack unit
(requires (fun h -> B.live h context))
(ensures (fun h _ h' -> B.modifies (B.loc_buffer context) h h'))
=
if not ( !* context ).LPE.filled then begin
context *= {
LPE.filled = true;
LPE.start_pos = start_pos;
LPE.typename_s = typename_s;
LPE.fieldname = fieldname;
LPE.reason = reason;
LPE.error_code = error_code;
}
end | val default_error_handler
(typename_s fieldname reason: string)
(error_code: U64.t)
(context: B.pointer LPE.error_frame)
(input: input_buffer)
(start_pos: U64.t)
: HST.Stack unit
(requires (fun h -> B.live h context))
(ensures (fun h _ h' -> B.modifies (B.loc_buffer context) h h'))
let default_error_handler
(typename_s fieldname reason: string)
(error_code: U64.t)
(context: B.pointer LPE.error_frame)
(input: input_buffer)
(start_pos: U64.t)
: HST.Stack unit
(requires (fun h -> B.live h context))
(ensures (fun h _ h' -> B.modifies (B.loc_buffer context) h h')) = | true | null | false | if not (!*context).LPE.filled
then
context *=
{
LPE.filled = true;
LPE.start_pos = start_pos;
LPE.typename_s = typename_s;
LPE.fieldname = fieldname;
LPE.reason = reason;
LPE.error_code = error_code
} | {
"checked_file": "EverParse3d.InputStream.Extern.Type.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"EverParse3d.InputStream.Extern.Base.fsti.checked",
"EverParse3d.ErrorCode.fst.checked"
],
"interface_file": false,
"source_file": "EverParse3d.InputStream.Extern.Type.fst"
} | [] | [
"Prims.string",
"FStar.UInt64.t",
"LowStar.Buffer.pointer",
"EverParse3d.ErrorCode.error_frame",
"EverParse3d.InputStream.Extern.Type.input_buffer",
"LowStar.BufferOps.op_Star_Equals",
"LowStar.Buffer.trivial_preorder",
"EverParse3d.ErrorCode.Mkerror_frame",
"Prims.unit",
"Prims.bool",
"Prims.op_Negation",
"EverParse3d.ErrorCode.__proj__Mkerror_frame__item__filled",
"LowStar.BufferOps.op_Bang_Star",
"FStar.Monotonic.HyperStack.mem",
"LowStar.Monotonic.Buffer.live",
"LowStar.Monotonic.Buffer.modifies",
"LowStar.Monotonic.Buffer.loc_buffer"
] | [] | module EverParse3d.InputStream.Extern.Type
include EverParse3d.InputStream.Extern.Base
module B = LowStar.Buffer
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module U32 = FStar.UInt32
module U8 = FStar.UInt8
module LPE = EverParse3d.ErrorCode
module U64 = FStar.UInt64
noeq
type input_buffer = {
base: t;
has_length: bool;
length: LPE.pos_t;
position: B.pointer (Ghost.erased LPE.pos_t);
prf: squash (
B.loc_disjoint (footprint base) (B.loc_buffer position) /\
(has_length == true ==> U64.v length <= U64.v (len_all base))
);
}
open LowStar.BufferOps
let make_input_buffer
(base: t)
(position: B.pointer (Ghost.erased LPE.pos_t))
: HST.Stack input_buffer
(requires (fun h ->
B.loc_disjoint (footprint base) (B.loc_buffer position) /\
B.live h position
))
(ensures (fun h _ h' ->
B.modifies (B.loc_buffer position) h h'
))
= position *= 0uL;
{
base = base;
has_length = false;
length = 0uL;
position = position;
prf = ();
}
let make_input_buffer_with_length
(base: t)
(position: B.pointer (Ghost.erased LPE.pos_t))
(length: LPE.pos_t)
: HST.Stack input_buffer
(requires (fun h ->
B.loc_disjoint (footprint base) (B.loc_buffer position) /\
B.live h position /\
U64.v length <= U64.v (len_all base)
))
(ensures (fun h _ h' ->
B.modifies (B.loc_buffer position) h h'
))
= position *= 0uL;
{
base = base;
has_length = true;
length = length;
position = position;
prf = ();
}
let default_error_handler
(typename_s: string)
(fieldname: string)
(reason: string)
(error_code: U64.t)
(context: B.pointer LPE.error_frame)
(input: input_buffer)
(start_pos: U64.t)
: HST.Stack unit
(requires (fun h -> B.live h context))
(ensures (fun h _ h' -> B.modifies (B.loc_buffer context) h h')) | false | false | EverParse3d.InputStream.Extern.Type.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 2,
"max_fuel": 0,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [
"smt.qi.eager_threshold=100"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 8,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val default_error_handler
(typename_s fieldname reason: string)
(error_code: U64.t)
(context: B.pointer LPE.error_frame)
(input: input_buffer)
(start_pos: U64.t)
: HST.Stack unit
(requires (fun h -> B.live h context))
(ensures (fun h _ h' -> B.modifies (B.loc_buffer context) h h')) | [] | EverParse3d.InputStream.Extern.Type.default_error_handler | {
"file_name": "src/3d/prelude/extern/EverParse3d.InputStream.Extern.Type.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} |
typename_s: Prims.string ->
fieldname: Prims.string ->
reason: Prims.string ->
error_code: FStar.UInt64.t ->
context: LowStar.Buffer.pointer EverParse3d.ErrorCode.error_frame ->
input: EverParse3d.InputStream.Extern.Type.input_buffer ->
start_pos: FStar.UInt64.t
-> FStar.HyperStack.ST.Stack Prims.unit | {
"end_col": 5,
"end_line": 89,
"start_col": 2,
"start_line": 80
} |
FStar.HyperStack.ST.Stack | val make_input_buffer (base: t) (position: B.pointer (Ghost.erased LPE.pos_t))
: HST.Stack input_buffer
(requires
(fun h -> B.loc_disjoint (footprint base) (B.loc_buffer position) /\ B.live h position))
(ensures (fun h _ h' -> B.modifies (B.loc_buffer position) h h')) | [
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "EverParse3d.ErrorCode",
"short_module": "LPE"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": false,
"full_module": "EverParse3d.InputStream.Extern.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverParse3d.InputStream.Extern",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverParse3d.InputStream.Extern",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let make_input_buffer
(base: t)
(position: B.pointer (Ghost.erased LPE.pos_t))
: HST.Stack input_buffer
(requires (fun h ->
B.loc_disjoint (footprint base) (B.loc_buffer position) /\
B.live h position
))
(ensures (fun h _ h' ->
B.modifies (B.loc_buffer position) h h'
))
= position *= 0uL;
{
base = base;
has_length = false;
length = 0uL;
position = position;
prf = ();
} | val make_input_buffer (base: t) (position: B.pointer (Ghost.erased LPE.pos_t))
: HST.Stack input_buffer
(requires
(fun h -> B.loc_disjoint (footprint base) (B.loc_buffer position) /\ B.live h position))
(ensures (fun h _ h' -> B.modifies (B.loc_buffer position) h h'))
let make_input_buffer (base: t) (position: B.pointer (Ghost.erased LPE.pos_t))
: HST.Stack input_buffer
(requires
(fun h -> B.loc_disjoint (footprint base) (B.loc_buffer position) /\ B.live h position))
(ensures (fun h _ h' -> B.modifies (B.loc_buffer position) h h')) = | true | null | false | position *= 0uL;
{ base = base; has_length = false; length = 0uL; position = position; prf = () } | {
"checked_file": "EverParse3d.InputStream.Extern.Type.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"EverParse3d.InputStream.Extern.Base.fsti.checked",
"EverParse3d.ErrorCode.fst.checked"
],
"interface_file": false,
"source_file": "EverParse3d.InputStream.Extern.Type.fst"
} | [] | [
"EverParse3d.InputStream.Extern.Base.t",
"LowStar.Buffer.pointer",
"FStar.Ghost.erased",
"EverParse3d.ErrorCode.pos_t",
"EverParse3d.InputStream.Extern.Type.Mkinput_buffer",
"FStar.UInt64.__uint_to_t",
"EverParse3d.InputStream.Extern.Type.input_buffer",
"Prims.unit",
"LowStar.BufferOps.op_Star_Equals",
"LowStar.Buffer.trivial_preorder",
"FStar.Ghost.hide",
"FStar.Monotonic.HyperStack.mem",
"Prims.l_and",
"LowStar.Monotonic.Buffer.loc_disjoint",
"EverParse3d.InputStream.Extern.Base.footprint",
"LowStar.Monotonic.Buffer.loc_buffer",
"LowStar.Monotonic.Buffer.live",
"LowStar.Monotonic.Buffer.modifies"
] | [] | module EverParse3d.InputStream.Extern.Type
include EverParse3d.InputStream.Extern.Base
module B = LowStar.Buffer
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module U32 = FStar.UInt32
module U8 = FStar.UInt8
module LPE = EverParse3d.ErrorCode
module U64 = FStar.UInt64
noeq
type input_buffer = {
base: t;
has_length: bool;
length: LPE.pos_t;
position: B.pointer (Ghost.erased LPE.pos_t);
prf: squash (
B.loc_disjoint (footprint base) (B.loc_buffer position) /\
(has_length == true ==> U64.v length <= U64.v (len_all base))
);
}
open LowStar.BufferOps
let make_input_buffer
(base: t)
(position: B.pointer (Ghost.erased LPE.pos_t))
: HST.Stack input_buffer
(requires (fun h ->
B.loc_disjoint (footprint base) (B.loc_buffer position) /\
B.live h position
))
(ensures (fun h _ h' ->
B.modifies (B.loc_buffer position) h h' | false | false | EverParse3d.InputStream.Extern.Type.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 2,
"max_fuel": 0,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [
"smt.qi.eager_threshold=100"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 8,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val make_input_buffer (base: t) (position: B.pointer (Ghost.erased LPE.pos_t))
: HST.Stack input_buffer
(requires
(fun h -> B.loc_disjoint (footprint base) (B.loc_buffer position) /\ B.live h position))
(ensures (fun h _ h' -> B.modifies (B.loc_buffer position) h h')) | [] | EverParse3d.InputStream.Extern.Type.make_input_buffer | {
"file_name": "src/3d/prelude/extern/EverParse3d.InputStream.Extern.Type.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} |
base: EverParse3d.InputStream.Extern.Base.t ->
position: LowStar.Buffer.pointer (FStar.Ghost.erased EverParse3d.ErrorCode.pos_t)
-> FStar.HyperStack.ST.Stack EverParse3d.InputStream.Extern.Type.input_buffer | {
"end_col": 3,
"end_line": 44,
"start_col": 2,
"start_line": 37
} |
FStar.HyperStack.ST.Stack | val make_input_buffer_with_length
(base: t)
(position: B.pointer (Ghost.erased LPE.pos_t))
(length: LPE.pos_t)
: HST.Stack input_buffer
(requires
(fun h ->
B.loc_disjoint (footprint base) (B.loc_buffer position) /\ B.live h position /\
U64.v length <= U64.v (len_all base)))
(ensures (fun h _ h' -> B.modifies (B.loc_buffer position) h h')) | [
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "EverParse3d.ErrorCode",
"short_module": "LPE"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": false,
"full_module": "EverParse3d.InputStream.Extern.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverParse3d.InputStream.Extern",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverParse3d.InputStream.Extern",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let make_input_buffer_with_length
(base: t)
(position: B.pointer (Ghost.erased LPE.pos_t))
(length: LPE.pos_t)
: HST.Stack input_buffer
(requires (fun h ->
B.loc_disjoint (footprint base) (B.loc_buffer position) /\
B.live h position /\
U64.v length <= U64.v (len_all base)
))
(ensures (fun h _ h' ->
B.modifies (B.loc_buffer position) h h'
))
= position *= 0uL;
{
base = base;
has_length = true;
length = length;
position = position;
prf = ();
} | val make_input_buffer_with_length
(base: t)
(position: B.pointer (Ghost.erased LPE.pos_t))
(length: LPE.pos_t)
: HST.Stack input_buffer
(requires
(fun h ->
B.loc_disjoint (footprint base) (B.loc_buffer position) /\ B.live h position /\
U64.v length <= U64.v (len_all base)))
(ensures (fun h _ h' -> B.modifies (B.loc_buffer position) h h'))
let make_input_buffer_with_length
(base: t)
(position: B.pointer (Ghost.erased LPE.pos_t))
(length: LPE.pos_t)
: HST.Stack input_buffer
(requires
(fun h ->
B.loc_disjoint (footprint base) (B.loc_buffer position) /\ B.live h position /\
U64.v length <= U64.v (len_all base)))
(ensures (fun h _ h' -> B.modifies (B.loc_buffer position) h h')) = | true | null | false | position *= 0uL;
{ base = base; has_length = true; length = length; position = position; prf = () } | {
"checked_file": "EverParse3d.InputStream.Extern.Type.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"EverParse3d.InputStream.Extern.Base.fsti.checked",
"EverParse3d.ErrorCode.fst.checked"
],
"interface_file": false,
"source_file": "EverParse3d.InputStream.Extern.Type.fst"
} | [] | [
"EverParse3d.InputStream.Extern.Base.t",
"LowStar.Buffer.pointer",
"FStar.Ghost.erased",
"EverParse3d.ErrorCode.pos_t",
"EverParse3d.InputStream.Extern.Type.Mkinput_buffer",
"EverParse3d.InputStream.Extern.Type.input_buffer",
"Prims.unit",
"LowStar.BufferOps.op_Star_Equals",
"LowStar.Buffer.trivial_preorder",
"FStar.Ghost.hide",
"FStar.UInt64.__uint_to_t",
"FStar.Monotonic.HyperStack.mem",
"Prims.l_and",
"LowStar.Monotonic.Buffer.loc_disjoint",
"EverParse3d.InputStream.Extern.Base.footprint",
"LowStar.Monotonic.Buffer.loc_buffer",
"LowStar.Monotonic.Buffer.live",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"FStar.UInt64.v",
"EverParse3d.InputStream.Extern.Base.len_all",
"LowStar.Monotonic.Buffer.modifies"
] | [] | module EverParse3d.InputStream.Extern.Type
include EverParse3d.InputStream.Extern.Base
module B = LowStar.Buffer
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module U32 = FStar.UInt32
module U8 = FStar.UInt8
module LPE = EverParse3d.ErrorCode
module U64 = FStar.UInt64
noeq
type input_buffer = {
base: t;
has_length: bool;
length: LPE.pos_t;
position: B.pointer (Ghost.erased LPE.pos_t);
prf: squash (
B.loc_disjoint (footprint base) (B.loc_buffer position) /\
(has_length == true ==> U64.v length <= U64.v (len_all base))
);
}
open LowStar.BufferOps
let make_input_buffer
(base: t)
(position: B.pointer (Ghost.erased LPE.pos_t))
: HST.Stack input_buffer
(requires (fun h ->
B.loc_disjoint (footprint base) (B.loc_buffer position) /\
B.live h position
))
(ensures (fun h _ h' ->
B.modifies (B.loc_buffer position) h h'
))
= position *= 0uL;
{
base = base;
has_length = false;
length = 0uL;
position = position;
prf = ();
}
let make_input_buffer_with_length
(base: t)
(position: B.pointer (Ghost.erased LPE.pos_t))
(length: LPE.pos_t)
: HST.Stack input_buffer
(requires (fun h ->
B.loc_disjoint (footprint base) (B.loc_buffer position) /\
B.live h position /\
U64.v length <= U64.v (len_all base)
))
(ensures (fun h _ h' ->
B.modifies (B.loc_buffer position) h h' | false | false | EverParse3d.InputStream.Extern.Type.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 2,
"max_fuel": 0,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [
"smt.qi.eager_threshold=100"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 8,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val make_input_buffer_with_length
(base: t)
(position: B.pointer (Ghost.erased LPE.pos_t))
(length: LPE.pos_t)
: HST.Stack input_buffer
(requires
(fun h ->
B.loc_disjoint (footprint base) (B.loc_buffer position) /\ B.live h position /\
U64.v length <= U64.v (len_all base)))
(ensures (fun h _ h' -> B.modifies (B.loc_buffer position) h h')) | [] | EverParse3d.InputStream.Extern.Type.make_input_buffer_with_length | {
"file_name": "src/3d/prelude/extern/EverParse3d.InputStream.Extern.Type.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} |
base: EverParse3d.InputStream.Extern.Base.t ->
position: LowStar.Buffer.pointer (FStar.Ghost.erased EverParse3d.ErrorCode.pos_t) ->
length: EverParse3d.ErrorCode.pos_t
-> FStar.HyperStack.ST.Stack EverParse3d.InputStream.Extern.Type.input_buffer | {
"end_col": 3,
"end_line": 66,
"start_col": 2,
"start_line": 59
} |
Prims.Tot | val openBase: openBase_st cs vale_p | [
{
"abbrev": true,
"full_module": "Hacl.HPKE.Interface.AEAD",
"short_module": "IAEAD"
},
{
"abbrev": true,
"full_module": "Hacl.HPKE.Interface.Hash",
"short_module": "IHash"
},
{
"abbrev": true,
"full_module": "Hacl.HPKE.Interface.HKDF",
"short_module": "IHK"
},
{
"abbrev": true,
"full_module": "Hacl.HPKE.Interface.DH",
"short_module": "IDH"
},
{
"abbrev": false,
"full_module": "Hacl.Meta.HPKE",
"short_module": null
},
{
"abbrev": true,
"full_module": "Spec.Agile.Hash",
"short_module": "Hash"
},
{
"abbrev": true,
"full_module": "Spec.Agile.AEAD",
"short_module": "AEAD"
},
{
"abbrev": true,
"full_module": "Spec.Agile.DH",
"short_module": "DH"
},
{
"abbrev": true,
"full_module": "Spec.Agile.HPKE",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Hacl.Impl.HPKE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.HPKE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.HPKE",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let openBase = hpke_openBase_higher #cs vale_p IAEAD.aead_decrypt_cp256 setupBaseR | val openBase: openBase_st cs vale_p
let openBase = | false | null | false | hpke_openBase_higher #cs vale_p IAEAD.aead_decrypt_cp256 setupBaseR | {
"checked_file": "Hacl.HPKE.Curve64_CP256_SHA512.fst.checked",
"dependencies": [
"prims.fst.checked",
"Hacl.Meta.HPKE.fst.checked",
"Hacl.Meta.HPKE.fst.checked",
"Hacl.HPKE.Interface.HKDF.fst.checked",
"Hacl.HPKE.Interface.Hash.fst.checked",
"Hacl.HPKE.Interface.DH.fst.checked",
"Hacl.HPKE.Interface.AEAD.fsti.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Hacl.HPKE.Curve64_CP256_SHA512.fst"
} | [
"total"
] | [
"Hacl.Meta.HPKE.hpke_openBase_higher",
"Hacl.HPKE.Curve64_CP256_SHA512.cs",
"Hacl.HPKE.Curve64_CP256_SHA512.vale_p",
"Hacl.HPKE.Interface.AEAD.aead_decrypt_cp256",
"Hacl.HPKE.Curve64_CP256_SHA512.setupBaseR"
] | [] | module Hacl.HPKE.Curve64_CP256_SHA512
open Hacl.Meta.HPKE
module IDH = Hacl.HPKE.Interface.DH
module IHK = Hacl.HPKE.Interface.HKDF
module IHash = Hacl.HPKE.Interface.Hash
module IAEAD = Hacl.HPKE.Interface.AEAD
friend Hacl.Meta.HPKE
#set-options "--fuel 0 --ifuel 0"
let setupBaseS = hpke_setupBaseS_higher #cs vale_p IHK.hkdf_expand512 IHK.hkdf_extract512 IDH.secret_to_public_c64 IDH.dh_c64 IHK.hkdf_expand256 IHK.hkdf_extract256
let setupBaseR = hpke_setupBaseR_higher #cs vale_p IHK.hkdf_expand512 IHK.hkdf_extract512 IDH.dh_c64 IHK.hkdf_expand256 IHK.hkdf_extract256 IDH.secret_to_public_c64
let sealBase = hpke_sealBase_higher #cs vale_p IAEAD.aead_encrypt_cp256 setupBaseS | false | true | Hacl.HPKE.Curve64_CP256_SHA512.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val openBase: openBase_st cs vale_p | [] | Hacl.HPKE.Curve64_CP256_SHA512.openBase | {
"file_name": "code/hpke/Hacl.HPKE.Curve64_CP256_SHA512.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Impl.HPKE.openBase_st Hacl.HPKE.Curve64_CP256_SHA512.cs Hacl.HPKE.Curve64_CP256_SHA512.vale_p | {
"end_col": 82,
"end_line": 20,
"start_col": 15,
"start_line": 20
} |
Prims.Tot | val sealBase: sealBase_st cs vale_p | [
{
"abbrev": true,
"full_module": "Hacl.HPKE.Interface.AEAD",
"short_module": "IAEAD"
},
{
"abbrev": true,
"full_module": "Hacl.HPKE.Interface.Hash",
"short_module": "IHash"
},
{
"abbrev": true,
"full_module": "Hacl.HPKE.Interface.HKDF",
"short_module": "IHK"
},
{
"abbrev": true,
"full_module": "Hacl.HPKE.Interface.DH",
"short_module": "IDH"
},
{
"abbrev": false,
"full_module": "Hacl.Meta.HPKE",
"short_module": null
},
{
"abbrev": true,
"full_module": "Spec.Agile.Hash",
"short_module": "Hash"
},
{
"abbrev": true,
"full_module": "Spec.Agile.AEAD",
"short_module": "AEAD"
},
{
"abbrev": true,
"full_module": "Spec.Agile.DH",
"short_module": "DH"
},
{
"abbrev": true,
"full_module": "Spec.Agile.HPKE",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Hacl.Impl.HPKE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.HPKE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.HPKE",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let sealBase = hpke_sealBase_higher #cs vale_p IAEAD.aead_encrypt_cp256 setupBaseS | val sealBase: sealBase_st cs vale_p
let sealBase = | false | null | false | hpke_sealBase_higher #cs vale_p IAEAD.aead_encrypt_cp256 setupBaseS | {
"checked_file": "Hacl.HPKE.Curve64_CP256_SHA512.fst.checked",
"dependencies": [
"prims.fst.checked",
"Hacl.Meta.HPKE.fst.checked",
"Hacl.Meta.HPKE.fst.checked",
"Hacl.HPKE.Interface.HKDF.fst.checked",
"Hacl.HPKE.Interface.Hash.fst.checked",
"Hacl.HPKE.Interface.DH.fst.checked",
"Hacl.HPKE.Interface.AEAD.fsti.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Hacl.HPKE.Curve64_CP256_SHA512.fst"
} | [
"total"
] | [
"Hacl.Meta.HPKE.hpke_sealBase_higher",
"Hacl.HPKE.Curve64_CP256_SHA512.cs",
"Hacl.HPKE.Curve64_CP256_SHA512.vale_p",
"Hacl.HPKE.Interface.AEAD.aead_encrypt_cp256",
"Hacl.HPKE.Curve64_CP256_SHA512.setupBaseS"
] | [] | module Hacl.HPKE.Curve64_CP256_SHA512
open Hacl.Meta.HPKE
module IDH = Hacl.HPKE.Interface.DH
module IHK = Hacl.HPKE.Interface.HKDF
module IHash = Hacl.HPKE.Interface.Hash
module IAEAD = Hacl.HPKE.Interface.AEAD
friend Hacl.Meta.HPKE
#set-options "--fuel 0 --ifuel 0"
let setupBaseS = hpke_setupBaseS_higher #cs vale_p IHK.hkdf_expand512 IHK.hkdf_extract512 IDH.secret_to_public_c64 IDH.dh_c64 IHK.hkdf_expand256 IHK.hkdf_extract256
let setupBaseR = hpke_setupBaseR_higher #cs vale_p IHK.hkdf_expand512 IHK.hkdf_extract512 IDH.dh_c64 IHK.hkdf_expand256 IHK.hkdf_extract256 IDH.secret_to_public_c64 | false | true | Hacl.HPKE.Curve64_CP256_SHA512.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val sealBase: sealBase_st cs vale_p | [] | Hacl.HPKE.Curve64_CP256_SHA512.sealBase | {
"file_name": "code/hpke/Hacl.HPKE.Curve64_CP256_SHA512.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Impl.HPKE.sealBase_st Hacl.HPKE.Curve64_CP256_SHA512.cs Hacl.HPKE.Curve64_CP256_SHA512.vale_p | {
"end_col": 82,
"end_line": 18,
"start_col": 15,
"start_line": 18
} |
Prims.Tot | val setupBaseS: setupBaseS_st cs vale_p | [
{
"abbrev": true,
"full_module": "Hacl.HPKE.Interface.AEAD",
"short_module": "IAEAD"
},
{
"abbrev": true,
"full_module": "Hacl.HPKE.Interface.Hash",
"short_module": "IHash"
},
{
"abbrev": true,
"full_module": "Hacl.HPKE.Interface.HKDF",
"short_module": "IHK"
},
{
"abbrev": true,
"full_module": "Hacl.HPKE.Interface.DH",
"short_module": "IDH"
},
{
"abbrev": false,
"full_module": "Hacl.Meta.HPKE",
"short_module": null
},
{
"abbrev": true,
"full_module": "Spec.Agile.Hash",
"short_module": "Hash"
},
{
"abbrev": true,
"full_module": "Spec.Agile.AEAD",
"short_module": "AEAD"
},
{
"abbrev": true,
"full_module": "Spec.Agile.DH",
"short_module": "DH"
},
{
"abbrev": true,
"full_module": "Spec.Agile.HPKE",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Hacl.Impl.HPKE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.HPKE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.HPKE",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let setupBaseS = hpke_setupBaseS_higher #cs vale_p IHK.hkdf_expand512 IHK.hkdf_extract512 IDH.secret_to_public_c64 IDH.dh_c64 IHK.hkdf_expand256 IHK.hkdf_extract256 | val setupBaseS: setupBaseS_st cs vale_p
let setupBaseS = | false | null | false | hpke_setupBaseS_higher #cs
vale_p
IHK.hkdf_expand512
IHK.hkdf_extract512
IDH.secret_to_public_c64
IDH.dh_c64
IHK.hkdf_expand256
IHK.hkdf_extract256 | {
"checked_file": "Hacl.HPKE.Curve64_CP256_SHA512.fst.checked",
"dependencies": [
"prims.fst.checked",
"Hacl.Meta.HPKE.fst.checked",
"Hacl.Meta.HPKE.fst.checked",
"Hacl.HPKE.Interface.HKDF.fst.checked",
"Hacl.HPKE.Interface.Hash.fst.checked",
"Hacl.HPKE.Interface.DH.fst.checked",
"Hacl.HPKE.Interface.AEAD.fsti.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Hacl.HPKE.Curve64_CP256_SHA512.fst"
} | [
"total"
] | [
"Hacl.Meta.HPKE.hpke_setupBaseS_higher",
"Hacl.HPKE.Curve64_CP256_SHA512.cs",
"Hacl.HPKE.Curve64_CP256_SHA512.vale_p",
"Hacl.HPKE.Interface.HKDF.hkdf_expand512",
"Hacl.HPKE.Interface.HKDF.hkdf_extract512",
"Hacl.HPKE.Interface.DH.secret_to_public_c64",
"Hacl.HPKE.Interface.DH.dh_c64",
"Hacl.HPKE.Interface.HKDF.hkdf_expand256",
"Hacl.HPKE.Interface.HKDF.hkdf_extract256"
] | [] | module Hacl.HPKE.Curve64_CP256_SHA512
open Hacl.Meta.HPKE
module IDH = Hacl.HPKE.Interface.DH
module IHK = Hacl.HPKE.Interface.HKDF
module IHash = Hacl.HPKE.Interface.Hash
module IAEAD = Hacl.HPKE.Interface.AEAD
friend Hacl.Meta.HPKE
#set-options "--fuel 0 --ifuel 0" | false | true | Hacl.HPKE.Curve64_CP256_SHA512.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val setupBaseS: setupBaseS_st cs vale_p | [] | Hacl.HPKE.Curve64_CP256_SHA512.setupBaseS | {
"file_name": "code/hpke/Hacl.HPKE.Curve64_CP256_SHA512.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Impl.HPKE.setupBaseS_st Hacl.HPKE.Curve64_CP256_SHA512.cs Hacl.HPKE.Curve64_CP256_SHA512.vale_p | {
"end_col": 164,
"end_line": 14,
"start_col": 17,
"start_line": 14
} |
Prims.Tot | val setupBaseR: setupBaseR_st cs vale_p | [
{
"abbrev": true,
"full_module": "Hacl.HPKE.Interface.AEAD",
"short_module": "IAEAD"
},
{
"abbrev": true,
"full_module": "Hacl.HPKE.Interface.Hash",
"short_module": "IHash"
},
{
"abbrev": true,
"full_module": "Hacl.HPKE.Interface.HKDF",
"short_module": "IHK"
},
{
"abbrev": true,
"full_module": "Hacl.HPKE.Interface.DH",
"short_module": "IDH"
},
{
"abbrev": false,
"full_module": "Hacl.Meta.HPKE",
"short_module": null
},
{
"abbrev": true,
"full_module": "Spec.Agile.Hash",
"short_module": "Hash"
},
{
"abbrev": true,
"full_module": "Spec.Agile.AEAD",
"short_module": "AEAD"
},
{
"abbrev": true,
"full_module": "Spec.Agile.DH",
"short_module": "DH"
},
{
"abbrev": true,
"full_module": "Spec.Agile.HPKE",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Hacl.Impl.HPKE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.HPKE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.HPKE",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let setupBaseR = hpke_setupBaseR_higher #cs vale_p IHK.hkdf_expand512 IHK.hkdf_extract512 IDH.dh_c64 IHK.hkdf_expand256 IHK.hkdf_extract256 IDH.secret_to_public_c64 | val setupBaseR: setupBaseR_st cs vale_p
let setupBaseR = | false | null | false | hpke_setupBaseR_higher #cs
vale_p
IHK.hkdf_expand512
IHK.hkdf_extract512
IDH.dh_c64
IHK.hkdf_expand256
IHK.hkdf_extract256
IDH.secret_to_public_c64 | {
"checked_file": "Hacl.HPKE.Curve64_CP256_SHA512.fst.checked",
"dependencies": [
"prims.fst.checked",
"Hacl.Meta.HPKE.fst.checked",
"Hacl.Meta.HPKE.fst.checked",
"Hacl.HPKE.Interface.HKDF.fst.checked",
"Hacl.HPKE.Interface.Hash.fst.checked",
"Hacl.HPKE.Interface.DH.fst.checked",
"Hacl.HPKE.Interface.AEAD.fsti.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": true,
"source_file": "Hacl.HPKE.Curve64_CP256_SHA512.fst"
} | [
"total"
] | [
"Hacl.Meta.HPKE.hpke_setupBaseR_higher",
"Hacl.HPKE.Curve64_CP256_SHA512.cs",
"Hacl.HPKE.Curve64_CP256_SHA512.vale_p",
"Hacl.HPKE.Interface.HKDF.hkdf_expand512",
"Hacl.HPKE.Interface.HKDF.hkdf_extract512",
"Hacl.HPKE.Interface.DH.dh_c64",
"Hacl.HPKE.Interface.HKDF.hkdf_expand256",
"Hacl.HPKE.Interface.HKDF.hkdf_extract256",
"Hacl.HPKE.Interface.DH.secret_to_public_c64"
] | [] | module Hacl.HPKE.Curve64_CP256_SHA512
open Hacl.Meta.HPKE
module IDH = Hacl.HPKE.Interface.DH
module IHK = Hacl.HPKE.Interface.HKDF
module IHash = Hacl.HPKE.Interface.Hash
module IAEAD = Hacl.HPKE.Interface.AEAD
friend Hacl.Meta.HPKE
#set-options "--fuel 0 --ifuel 0"
let setupBaseS = hpke_setupBaseS_higher #cs vale_p IHK.hkdf_expand512 IHK.hkdf_extract512 IDH.secret_to_public_c64 IDH.dh_c64 IHK.hkdf_expand256 IHK.hkdf_extract256 | false | true | Hacl.HPKE.Curve64_CP256_SHA512.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val setupBaseR: setupBaseR_st cs vale_p | [] | Hacl.HPKE.Curve64_CP256_SHA512.setupBaseR | {
"file_name": "code/hpke/Hacl.HPKE.Curve64_CP256_SHA512.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Hacl.Impl.HPKE.setupBaseR_st Hacl.HPKE.Curve64_CP256_SHA512.cs Hacl.HPKE.Curve64_CP256_SHA512.vale_p | {
"end_col": 164,
"end_line": 16,
"start_col": 17,
"start_line": 16
} |
Prims.Tot | [
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.PPC64LE.Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let is_full_read (#t:base_typ) (h1 h2:vale_heap) (b:buffer t) (i:int) =
buffer_addr b h1 == buffer_addr b h2 /\
buffer_read b i h1 == buffer_read b i h2 /\
valid_buffer_read h1 b i | let is_full_read (#t: base_typ) (h1 h2: vale_heap) (b: buffer t) (i: int) = | false | null | false | buffer_addr b h1 == buffer_addr b h2 /\ buffer_read b i h1 == buffer_read b i h2 /\
valid_buffer_read h1 b i | {
"checked_file": "Vale.PPC64LE.Memory_Sems.fsti.checked",
"dependencies": [
"Vale.PPC64LE.Semantics_s.fst.checked",
"Vale.PPC64LE.Memory.fsti.checked",
"Vale.PPC64LE.Machine_s.fst.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.Arch.MachineHeap_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked"
],
"interface_file": false,
"source_file": "Vale.PPC64LE.Memory_Sems.fsti"
} | [
"total"
] | [
"Vale.Arch.HeapTypes_s.base_typ",
"Vale.PPC64LE.Memory.vale_heap",
"Vale.PPC64LE.Memory.buffer",
"Prims.int",
"Prims.l_and",
"Prims.eq2",
"Vale.PPC64LE.Memory.buffer_addr",
"Vale.PPC64LE.Memory.base_typ_as_vale_type",
"Vale.PPC64LE.Memory.buffer_read",
"Vale.PPC64LE.Memory.valid_buffer_read",
"Prims.logical"
] | [] | module Vale.PPC64LE.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
open Vale.Arch.MachineHeap_s
open Vale.PPC64LE.Machine_s
open Vale.PPC64LE.Memory
open Vale.Lib.Seqs
module S = Vale.PPC64LE.Semantics_s
module Map16 = Vale.Lib.Map16
val same_domain (h:vale_heap) (m:S.machine_heap) : prop0
val lemma_same_domains (h:vale_heap) (m1:S.machine_heap) (m2:S.machine_heap) : Lemma
(requires same_domain h m1 /\ Set.equal (Map.domain m1) (Map.domain m2))
(ensures same_domain h m2)
val get_heap (h:vale_heap) : GTot (m:S.machine_heap{same_domain h m})
val upd_heap (h:vale_heap) (m:S.machine_heap{is_machine_heap_update (get_heap h) m}) : GTot vale_heap
//val lemma_upd_get_heap (h:vale_heap) : Lemma (upd_heap h (get_heap h) == h)
// [SMTPat (upd_heap h (get_heap h))]
val lemma_get_upd_heap (h:vale_heap) (m:S.machine_heap) : Lemma
(requires is_machine_heap_update (get_heap h) m)
(ensures get_heap (upd_heap h m) == m)
unfold let coerce (#b #a:Type) (x:a{a == b}) : b = x
val lemma_heap_impl : squash (heap_impl == vale_full_heap)
val lemma_heap_get_heap (h:vale_full_heap) : Lemma
(heap_get (coerce h) == get_heap (get_vale_heap h))
[SMTPat (heap_get (coerce h))]
val lemma_heap_taint (h:vale_full_heap) : Lemma
(heap_taint (coerce h) == full_heap_taint h)
[SMTPat (heap_taint (coerce h))] | false | false | Vale.PPC64LE.Memory_Sems.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val is_full_read : h1: Vale.PPC64LE.Memory.vale_heap ->
h2: Vale.PPC64LE.Memory.vale_heap ->
b: Vale.PPC64LE.Memory.buffer t ->
i: Prims.int
-> Prims.logical | [] | Vale.PPC64LE.Memory_Sems.is_full_read | {
"file_name": "vale/code/arch/ppc64le/Vale.PPC64LE.Memory_Sems.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
h1: Vale.PPC64LE.Memory.vale_heap ->
h2: Vale.PPC64LE.Memory.vale_heap ->
b: Vale.PPC64LE.Memory.buffer t ->
i: Prims.int
-> Prims.logical | {
"end_col": 26,
"end_line": 48,
"start_col": 2,
"start_line": 46
} |
|
Prims.Tot | [
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.PPC64LE.Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let is_full_update (vfh:vale_full_heap) (h':vale_heap) (hid:heaplet_id) (mh':machine_heap) (mt':memtaint) =
is_machine_heap_update (heap_get (coerce vfh)) mh' /\ (
let vfh' = coerce (heap_upd (coerce vfh) mh' mt') in
mem_inv vfh' /\
vfh'.vf_layout == vfh.vf_layout /\
vfh'.vf_heaplets == Map16.upd vfh.vf_heaplets hid h'
) | let is_full_update
(vfh: vale_full_heap)
(h': vale_heap)
(hid: heaplet_id)
(mh': machine_heap)
(mt': memtaint)
= | false | null | false | is_machine_heap_update (heap_get (coerce vfh)) mh' /\
(let vfh' = coerce (heap_upd (coerce vfh) mh' mt') in
mem_inv vfh' /\ vfh'.vf_layout == vfh.vf_layout /\
vfh'.vf_heaplets == Map16.upd vfh.vf_heaplets hid h') | {
"checked_file": "Vale.PPC64LE.Memory_Sems.fsti.checked",
"dependencies": [
"Vale.PPC64LE.Semantics_s.fst.checked",
"Vale.PPC64LE.Memory.fsti.checked",
"Vale.PPC64LE.Machine_s.fst.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.Arch.MachineHeap_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked"
],
"interface_file": false,
"source_file": "Vale.PPC64LE.Memory_Sems.fsti"
} | [
"total"
] | [
"Vale.PPC64LE.Memory.vale_full_heap",
"Vale.PPC64LE.Memory.vale_heap",
"Vale.PPC64LE.Memory.heaplet_id",
"Vale.Arch.MachineHeap_s.machine_heap",
"Vale.PPC64LE.Memory.memtaint",
"Prims.l_and",
"Vale.Arch.MachineHeap_s.is_machine_heap_update",
"Vale.Arch.Heap.heap_get",
"Vale.PPC64LE.Memory_Sems.coerce",
"Vale.Arch.Heap.heap_impl",
"Vale.PPC64LE.Memory.mem_inv",
"Prims.eq2",
"Vale.Arch.HeapImpl.vale_heap_layout",
"Vale.Arch.HeapImpl.__proj__Mkvale_full_heap__item__vf_layout",
"Vale.Lib.Map16.map16",
"Vale.Arch.HeapImpl.vale_heap",
"Vale.Arch.HeapImpl.__proj__Mkvale_full_heap__item__vf_heaplets",
"Vale.Lib.Map16.upd",
"Vale.Arch.HeapImpl.vale_full_heap",
"Vale.Arch.Heap.heap_upd",
"Prims.logical"
] | [] | module Vale.PPC64LE.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
open Vale.Arch.MachineHeap_s
open Vale.PPC64LE.Machine_s
open Vale.PPC64LE.Memory
open Vale.Lib.Seqs
module S = Vale.PPC64LE.Semantics_s
module Map16 = Vale.Lib.Map16
val same_domain (h:vale_heap) (m:S.machine_heap) : prop0
val lemma_same_domains (h:vale_heap) (m1:S.machine_heap) (m2:S.machine_heap) : Lemma
(requires same_domain h m1 /\ Set.equal (Map.domain m1) (Map.domain m2))
(ensures same_domain h m2)
val get_heap (h:vale_heap) : GTot (m:S.machine_heap{same_domain h m})
val upd_heap (h:vale_heap) (m:S.machine_heap{is_machine_heap_update (get_heap h) m}) : GTot vale_heap
//val lemma_upd_get_heap (h:vale_heap) : Lemma (upd_heap h (get_heap h) == h)
// [SMTPat (upd_heap h (get_heap h))]
val lemma_get_upd_heap (h:vale_heap) (m:S.machine_heap) : Lemma
(requires is_machine_heap_update (get_heap h) m)
(ensures get_heap (upd_heap h m) == m)
unfold let coerce (#b #a:Type) (x:a{a == b}) : b = x
val lemma_heap_impl : squash (heap_impl == vale_full_heap)
val lemma_heap_get_heap (h:vale_full_heap) : Lemma
(heap_get (coerce h) == get_heap (get_vale_heap h))
[SMTPat (heap_get (coerce h))]
val lemma_heap_taint (h:vale_full_heap) : Lemma
(heap_taint (coerce h) == full_heap_taint h)
[SMTPat (heap_taint (coerce h))]
let is_full_read (#t:base_typ) (h1 h2:vale_heap) (b:buffer t) (i:int) =
buffer_addr b h1 == buffer_addr b h2 /\
buffer_read b i h1 == buffer_read b i h2 /\
valid_buffer_read h1 b i // needed to trigger "index = i" in valid_mem_operand64/valid_mem_operand128 | false | true | Vale.PPC64LE.Memory_Sems.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val is_full_update : vfh: Vale.PPC64LE.Memory.vale_full_heap ->
h': Vale.PPC64LE.Memory.vale_heap ->
hid: Vale.PPC64LE.Memory.heaplet_id ->
mh': Vale.Arch.MachineHeap_s.machine_heap ->
mt': Vale.PPC64LE.Memory.memtaint
-> Prims.logical | [] | Vale.PPC64LE.Memory_Sems.is_full_update | {
"file_name": "vale/code/arch/ppc64le/Vale.PPC64LE.Memory_Sems.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
vfh: Vale.PPC64LE.Memory.vale_full_heap ->
h': Vale.PPC64LE.Memory.vale_heap ->
hid: Vale.PPC64LE.Memory.heaplet_id ->
mh': Vale.Arch.MachineHeap_s.machine_heap ->
mt': Vale.PPC64LE.Memory.memtaint
-> Prims.logical | {
"end_col": 3,
"end_line": 56,
"start_col": 2,
"start_line": 51
} |
|
Prims.Tot | val coerce (#b #a: Type) (x: a{a == b}) : b | [
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.PPC64LE.Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.PPC64LE",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let coerce (#b #a:Type) (x:a{a == b}) : b = x | val coerce (#b #a: Type) (x: a{a == b}) : b
let coerce (#b #a: Type) (x: a{a == b}) : b = | false | null | false | x | {
"checked_file": "Vale.PPC64LE.Memory_Sems.fsti.checked",
"dependencies": [
"Vale.PPC64LE.Semantics_s.fst.checked",
"Vale.PPC64LE.Memory.fsti.checked",
"Vale.PPC64LE.Machine_s.fst.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Lib.Map16.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.Arch.MachineHeap_s.fst.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked"
],
"interface_file": false,
"source_file": "Vale.PPC64LE.Memory_Sems.fsti"
} | [
"total"
] | [
"Prims.eq2"
] | [] | module Vale.PPC64LE.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Types_s
open Vale.Arch.Types
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
open Vale.Arch.MachineHeap_s
open Vale.PPC64LE.Machine_s
open Vale.PPC64LE.Memory
open Vale.Lib.Seqs
module S = Vale.PPC64LE.Semantics_s
module Map16 = Vale.Lib.Map16
val same_domain (h:vale_heap) (m:S.machine_heap) : prop0
val lemma_same_domains (h:vale_heap) (m1:S.machine_heap) (m2:S.machine_heap) : Lemma
(requires same_domain h m1 /\ Set.equal (Map.domain m1) (Map.domain m2))
(ensures same_domain h m2)
val get_heap (h:vale_heap) : GTot (m:S.machine_heap{same_domain h m})
val upd_heap (h:vale_heap) (m:S.machine_heap{is_machine_heap_update (get_heap h) m}) : GTot vale_heap
//val lemma_upd_get_heap (h:vale_heap) : Lemma (upd_heap h (get_heap h) == h)
// [SMTPat (upd_heap h (get_heap h))]
val lemma_get_upd_heap (h:vale_heap) (m:S.machine_heap) : Lemma
(requires is_machine_heap_update (get_heap h) m)
(ensures get_heap (upd_heap h m) == m) | false | false | Vale.PPC64LE.Memory_Sems.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val coerce (#b #a: Type) (x: a{a == b}) : b | [] | Vale.PPC64LE.Memory_Sems.coerce | {
"file_name": "vale/code/arch/ppc64le/Vale.PPC64LE.Memory_Sems.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | x: a{a == b} -> b | {
"end_col": 52,
"end_line": 33,
"start_col": 51,
"start_line": 33
} |
FStar.Pervasives.Lemma | val is_gcd_unique (a b c d:int) : Lemma
(requires is_gcd a b c /\ is_gcd a b d)
(ensures c = d \/ c = -d) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let is_gcd_unique a b c d =
divide_antisym c d | val is_gcd_unique (a b c d:int) : Lemma
(requires is_gcd a b c /\ is_gcd a b d)
(ensures c = d \/ c = -d)
let is_gcd_unique a b c d = | false | null | true | divide_antisym c d | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [
"lemma"
] | [
"Prims.int",
"FStar.Math.Euclid.divide_antisym",
"Prims.unit"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
///
let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1
let divides_transitive a b c =
eliminate exists q1. b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2. c == q2 * b
returns _
with _pf2.
introduce exists q. c == q * a
with (q1 * q2)
and ()
let divide_antisym a b =
if a <> 0 then
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q1. b = q1 * a))
(fun q1 ->
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q2. a = q2 * b))
(fun q2 ->
assert (b = q1 * a);
assert (a = q2 * b);
assert (b = q1 * (q2 * b));
paren_mul_right q1 q2 b;
eq_mult_left b (q1 * q2);
eq_mult_one q1 q2))
let divides_0 a =
Classical.exists_intro (fun q -> 0 = q * a) 0
let divides_1 a = ()
let divides_minus a b =
Classical.exists_elim (a `divides` (-b))
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> -b = q' * a) (-q))
let divides_opp a b =
Classical.exists_elim ((-a) `divides` b)
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> b = q' * (-a)) (-q))
let divides_plus a b d =
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q1. a = q1 * d))
(fun q1 ->
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q2. b = q2 * d))
(fun q2 ->
assert (a + b = q1 * d + q2 * d);
distributivity_add_left q1 q2 d;
Classical.exists_intro (fun q -> a + b = q * d) (q1 + q2)))
let divides_sub a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_plus a (-b) d
let divides_mult_right a b d =
Classical.exists_elim (d `divides` (a * b)) (Squash.get_proof (d `divides` b))
(fun q ->
paren_mul_right a q d;
Classical.exists_intro (fun r -> a * b = r * d) (a * q))
///
/// GCD
///
let mod_divides a b =
Classical.exists_intro (fun q -> a = q * b) (a / b)
let divides_mod a b =
Classical.exists_elim (a % b = 0) (Squash.get_proof (b `divides` a))
(fun q -> cancel_mul_div q b) | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val is_gcd_unique (a b c d:int) : Lemma
(requires is_gcd a b c /\ is_gcd a b d)
(ensures c = d \/ c = -d) | [] | FStar.Math.Euclid.is_gcd_unique | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Prims.int -> b: Prims.int -> c: Prims.int -> d: Prims.int
-> FStar.Pervasives.Lemma
(requires FStar.Math.Euclid.is_gcd a b c /\ FStar.Math.Euclid.is_gcd a b d)
(ensures c = d \/ c = - d) | {
"end_col": 20,
"end_line": 105,
"start_col": 2,
"start_line": 105
} |
FStar.Pervasives.Lemma | val is_gcd_for_euclid (a b q d:int) : Lemma
(requires is_gcd b (a - q * b) d)
(ensures is_gcd a b d) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let is_gcd_for_euclid a b q d =
add_sub_l a (q * b);
is_gcd_plus b (a - q * b) q d | val is_gcd_for_euclid (a b q d:int) : Lemma
(requires is_gcd b (a - q * b) d)
(ensures is_gcd a b d)
let is_gcd_for_euclid a b q d = | false | null | true | add_sub_l a (q * b);
is_gcd_plus b (a - q * b) q d | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [
"lemma"
] | [
"Prims.int",
"FStar.Math.Euclid.is_gcd_plus",
"Prims.op_Subtraction",
"FStar.Mul.op_Star",
"Prims.unit",
"FStar.Math.Euclid.add_sub_l"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
///
let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1
let divides_transitive a b c =
eliminate exists q1. b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2. c == q2 * b
returns _
with _pf2.
introduce exists q. c == q * a
with (q1 * q2)
and ()
let divide_antisym a b =
if a <> 0 then
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q1. b = q1 * a))
(fun q1 ->
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q2. a = q2 * b))
(fun q2 ->
assert (b = q1 * a);
assert (a = q2 * b);
assert (b = q1 * (q2 * b));
paren_mul_right q1 q2 b;
eq_mult_left b (q1 * q2);
eq_mult_one q1 q2))
let divides_0 a =
Classical.exists_intro (fun q -> 0 = q * a) 0
let divides_1 a = ()
let divides_minus a b =
Classical.exists_elim (a `divides` (-b))
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> -b = q' * a) (-q))
let divides_opp a b =
Classical.exists_elim ((-a) `divides` b)
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> b = q' * (-a)) (-q))
let divides_plus a b d =
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q1. a = q1 * d))
(fun q1 ->
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q2. b = q2 * d))
(fun q2 ->
assert (a + b = q1 * d + q2 * d);
distributivity_add_left q1 q2 d;
Classical.exists_intro (fun q -> a + b = q * d) (q1 + q2)))
let divides_sub a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_plus a (-b) d
let divides_mult_right a b d =
Classical.exists_elim (d `divides` (a * b)) (Squash.get_proof (d `divides` b))
(fun q ->
paren_mul_right a q d;
Classical.exists_intro (fun r -> a * b = r * d) (a * q))
///
/// GCD
///
let mod_divides a b =
Classical.exists_intro (fun q -> a = q * b) (a / b)
let divides_mod a b =
Classical.exists_elim (a % b = 0) (Squash.get_proof (b `divides` a))
(fun q -> cancel_mul_div q b)
let is_gcd_unique a b c d =
divide_antisym c d
let is_gcd_reflexive a = ()
let is_gcd_symmetric a b d = ()
let is_gcd_0 a = ()
let is_gcd_1 a = ()
let is_gcd_minus a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
opp_idempotent b
let is_gcd_opp a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_opp d a;
divides_opp d b
let is_gcd_plus a b q d =
add_sub_r b (q * a);
Classical.forall_intro_3 (Classical.move_requires_3 divides_plus);
Classical.forall_intro_3 (Classical.move_requires_3 divides_mult_right);
Classical.forall_intro_3 (Classical.move_requires_3 divides_sub)
///
/// Extended Euclidean algorithm
///
val is_gcd_for_euclid (a b q d:int) : Lemma
(requires is_gcd b (a - q * b) d)
(ensures is_gcd a b d) | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val is_gcd_for_euclid (a b q d:int) : Lemma
(requires is_gcd b (a - q * b) d)
(ensures is_gcd a b d) | [] | FStar.Math.Euclid.is_gcd_for_euclid | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Prims.int -> b: Prims.int -> q: Prims.int -> d: Prims.int
-> FStar.Pervasives.Lemma (requires FStar.Math.Euclid.is_gcd b (a - q * b) d)
(ensures FStar.Math.Euclid.is_gcd a b d) | {
"end_col": 31,
"end_line": 139,
"start_col": 2,
"start_line": 138
} |
FStar.Pervasives.Lemma | val mod_divides (a:int) (b:nonzero) : Lemma (requires a % b = 0) (ensures b `divides` a) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mod_divides a b =
Classical.exists_intro (fun q -> a = q * b) (a / b) | val mod_divides (a:int) (b:nonzero) : Lemma (requires a % b = 0) (ensures b `divides` a)
let mod_divides a b = | false | null | true | Classical.exists_intro (fun q -> a = q * b) (a / b) | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [
"lemma"
] | [
"Prims.int",
"Prims.nonzero",
"FStar.Classical.exists_intro",
"Prims.b2t",
"Prims.op_Equality",
"FStar.Mul.op_Star",
"Prims.op_Division",
"Prims.unit"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
///
let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1
let divides_transitive a b c =
eliminate exists q1. b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2. c == q2 * b
returns _
with _pf2.
introduce exists q. c == q * a
with (q1 * q2)
and ()
let divide_antisym a b =
if a <> 0 then
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q1. b = q1 * a))
(fun q1 ->
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q2. a = q2 * b))
(fun q2 ->
assert (b = q1 * a);
assert (a = q2 * b);
assert (b = q1 * (q2 * b));
paren_mul_right q1 q2 b;
eq_mult_left b (q1 * q2);
eq_mult_one q1 q2))
let divides_0 a =
Classical.exists_intro (fun q -> 0 = q * a) 0
let divides_1 a = ()
let divides_minus a b =
Classical.exists_elim (a `divides` (-b))
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> -b = q' * a) (-q))
let divides_opp a b =
Classical.exists_elim ((-a) `divides` b)
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> b = q' * (-a)) (-q))
let divides_plus a b d =
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q1. a = q1 * d))
(fun q1 ->
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q2. b = q2 * d))
(fun q2 ->
assert (a + b = q1 * d + q2 * d);
distributivity_add_left q1 q2 d;
Classical.exists_intro (fun q -> a + b = q * d) (q1 + q2)))
let divides_sub a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_plus a (-b) d
let divides_mult_right a b d =
Classical.exists_elim (d `divides` (a * b)) (Squash.get_proof (d `divides` b))
(fun q ->
paren_mul_right a q d;
Classical.exists_intro (fun r -> a * b = r * d) (a * q))
///
/// GCD
/// | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mod_divides (a:int) (b:nonzero) : Lemma (requires a % b = 0) (ensures b `divides` a) | [] | FStar.Math.Euclid.mod_divides | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Prims.int -> b: Prims.nonzero
-> FStar.Pervasives.Lemma (requires a % b = 0) (ensures FStar.Math.Euclid.divides b a) | {
"end_col": 53,
"end_line": 98,
"start_col": 2,
"start_line": 98
} |
FStar.Pervasives.Lemma | val divides_reflexive (a:int) : Lemma (a `divides` a) [SMTPat (a `divides` a)] | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1 | val divides_reflexive (a:int) : Lemma (a `divides` a) [SMTPat (a `divides` a)]
let divides_reflexive a = | false | null | true | Classical.exists_intro (fun q -> a = q * a) 1 | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [
"lemma"
] | [
"Prims.int",
"FStar.Classical.exists_intro",
"Prims.b2t",
"Prims.op_Equality",
"FStar.Mul.op_Star",
"Prims.unit"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
/// | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val divides_reflexive (a:int) : Lemma (a `divides` a) [SMTPat (a `divides` a)] | [] | FStar.Math.Euclid.divides_reflexive | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Prims.int
-> FStar.Pervasives.Lemma (ensures FStar.Math.Euclid.divides a a)
[SMTPat (FStar.Math.Euclid.divides a a)] | {
"end_col": 47,
"end_line": 32,
"start_col": 2,
"start_line": 32
} |
FStar.Pervasives.Lemma | val divides_opp (a b:int) : Lemma
(requires a `divides` b)
(ensures (-a) `divides` b) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let divides_opp a b =
Classical.exists_elim ((-a) `divides` b)
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> b = q' * (-a)) (-q)) | val divides_opp (a b:int) : Lemma
(requires a `divides` b)
(ensures (-a) `divides` b)
let divides_opp a b = | false | null | true | Classical.exists_elim ((- a) `divides` b)
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> b = q' * (- a)) (- q)) | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [
"lemma"
] | [
"Prims.int",
"FStar.Classical.exists_elim",
"FStar.Math.Euclid.divides",
"Prims.op_Minus",
"Prims.b2t",
"Prims.op_Equality",
"FStar.Mul.op_Star",
"FStar.Squash.get_proof",
"FStar.Classical.exists_intro",
"Prims.squash",
"Prims.unit"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
///
let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1
let divides_transitive a b c =
eliminate exists q1. b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2. c == q2 * b
returns _
with _pf2.
introduce exists q. c == q * a
with (q1 * q2)
and ()
let divide_antisym a b =
if a <> 0 then
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q1. b = q1 * a))
(fun q1 ->
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q2. a = q2 * b))
(fun q2 ->
assert (b = q1 * a);
assert (a = q2 * b);
assert (b = q1 * (q2 * b));
paren_mul_right q1 q2 b;
eq_mult_left b (q1 * q2);
eq_mult_one q1 q2))
let divides_0 a =
Classical.exists_intro (fun q -> 0 = q * a) 0
let divides_1 a = ()
let divides_minus a b =
Classical.exists_elim (a `divides` (-b))
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> -b = q' * a) (-q)) | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val divides_opp (a b:int) : Lemma
(requires a `divides` b)
(ensures (-a) `divides` b) | [] | FStar.Math.Euclid.divides_opp | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Prims.int -> b: Prims.int
-> FStar.Pervasives.Lemma (requires FStar.Math.Euclid.divides a b)
(ensures FStar.Math.Euclid.divides (- a) b) | {
"end_col": 68,
"end_line": 72,
"start_col": 2,
"start_line": 70
} |
FStar.Pervasives.Lemma | val divides_transitive (a b c:int) : Lemma
(requires a `divides` b /\ b `divides` c)
(ensures a `divides` c) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let divides_transitive a b c =
eliminate exists q1. b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2. c == q2 * b
returns _
with _pf2.
introduce exists q. c == q * a
with (q1 * q2)
and () | val divides_transitive (a b c:int) : Lemma
(requires a `divides` b /\ b `divides` c)
(ensures a `divides` c)
let divides_transitive a b c = | false | null | true | eliminate exists q1.
b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2.
c == q2 * b
returns _
with _pf2.
introduce exists q.c == q * a
with (q1 * q2)
and () | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [
"lemma"
] | [
"Prims.int",
"FStar.Classical.Sugar.exists_elim",
"Prims.eq2",
"FStar.Mul.op_Star",
"FStar.Math.Euclid.divides",
"Prims.squash",
"Prims.l_Exists",
"FStar.Classical.Sugar.exists_intro",
"Prims.unit"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
///
let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1 | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val divides_transitive (a b c:int) : Lemma
(requires a `divides` b /\ b `divides` c)
(ensures a `divides` c) | [] | FStar.Math.Euclid.divides_transitive | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Prims.int -> b: Prims.int -> c: Prims.int
-> FStar.Pervasives.Lemma
(requires FStar.Math.Euclid.divides a b /\ FStar.Math.Euclid.divides b c)
(ensures FStar.Math.Euclid.divides a c) | {
"end_col": 8,
"end_line": 43,
"start_col": 2,
"start_line": 35
} |
FStar.Pervasives.Lemma | val is_gcd_opp (a b d:int) : Lemma
(requires is_gcd a b d)
(ensures is_gcd b a (-d)) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let is_gcd_opp a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_opp d a;
divides_opp d b | val is_gcd_opp (a b d:int) : Lemma
(requires is_gcd a b d)
(ensures is_gcd b a (-d))
let is_gcd_opp a b d = | false | null | true | Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_opp d a;
divides_opp d b | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [
"lemma"
] | [
"Prims.int",
"FStar.Math.Euclid.divides_opp",
"Prims.unit",
"FStar.Classical.forall_intro_2",
"Prims.l_imp",
"FStar.Math.Euclid.divides",
"Prims.op_Minus",
"FStar.Classical.move_requires_2",
"FStar.Math.Euclid.divides_minus"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
///
let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1
let divides_transitive a b c =
eliminate exists q1. b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2. c == q2 * b
returns _
with _pf2.
introduce exists q. c == q * a
with (q1 * q2)
and ()
let divide_antisym a b =
if a <> 0 then
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q1. b = q1 * a))
(fun q1 ->
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q2. a = q2 * b))
(fun q2 ->
assert (b = q1 * a);
assert (a = q2 * b);
assert (b = q1 * (q2 * b));
paren_mul_right q1 q2 b;
eq_mult_left b (q1 * q2);
eq_mult_one q1 q2))
let divides_0 a =
Classical.exists_intro (fun q -> 0 = q * a) 0
let divides_1 a = ()
let divides_minus a b =
Classical.exists_elim (a `divides` (-b))
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> -b = q' * a) (-q))
let divides_opp a b =
Classical.exists_elim ((-a) `divides` b)
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> b = q' * (-a)) (-q))
let divides_plus a b d =
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q1. a = q1 * d))
(fun q1 ->
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q2. b = q2 * d))
(fun q2 ->
assert (a + b = q1 * d + q2 * d);
distributivity_add_left q1 q2 d;
Classical.exists_intro (fun q -> a + b = q * d) (q1 + q2)))
let divides_sub a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_plus a (-b) d
let divides_mult_right a b d =
Classical.exists_elim (d `divides` (a * b)) (Squash.get_proof (d `divides` b))
(fun q ->
paren_mul_right a q d;
Classical.exists_intro (fun r -> a * b = r * d) (a * q))
///
/// GCD
///
let mod_divides a b =
Classical.exists_intro (fun q -> a = q * b) (a / b)
let divides_mod a b =
Classical.exists_elim (a % b = 0) (Squash.get_proof (b `divides` a))
(fun q -> cancel_mul_div q b)
let is_gcd_unique a b c d =
divide_antisym c d
let is_gcd_reflexive a = ()
let is_gcd_symmetric a b d = ()
let is_gcd_0 a = ()
let is_gcd_1 a = ()
let is_gcd_minus a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
opp_idempotent b | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val is_gcd_opp (a b d:int) : Lemma
(requires is_gcd a b d)
(ensures is_gcd b a (-d)) | [] | FStar.Math.Euclid.is_gcd_opp | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Prims.int -> b: Prims.int -> d: Prims.int
-> FStar.Pervasives.Lemma (requires FStar.Math.Euclid.is_gcd a b d)
(ensures FStar.Math.Euclid.is_gcd b a (- d)) | {
"end_col": 17,
"end_line": 122,
"start_col": 2,
"start_line": 120
} |
FStar.Pervasives.Lemma | val divides_mod (a:int) (b:nonzero) : Lemma (requires b `divides` a) (ensures a % b = 0) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let divides_mod a b =
Classical.exists_elim (a % b = 0) (Squash.get_proof (b `divides` a))
(fun q -> cancel_mul_div q b) | val divides_mod (a:int) (b:nonzero) : Lemma (requires b `divides` a) (ensures a % b = 0)
let divides_mod a b = | false | null | true | Classical.exists_elim (a % b = 0) (Squash.get_proof (b `divides` a)) (fun q -> cancel_mul_div q b) | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [
"lemma"
] | [
"Prims.int",
"Prims.nonzero",
"FStar.Classical.exists_elim",
"Prims.b2t",
"Prims.op_Equality",
"Prims.op_Modulus",
"FStar.Mul.op_Star",
"FStar.Squash.get_proof",
"FStar.Math.Euclid.divides",
"FStar.Math.Lemmas.cancel_mul_div",
"Prims.squash",
"Prims.unit"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
///
let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1
let divides_transitive a b c =
eliminate exists q1. b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2. c == q2 * b
returns _
with _pf2.
introduce exists q. c == q * a
with (q1 * q2)
and ()
let divide_antisym a b =
if a <> 0 then
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q1. b = q1 * a))
(fun q1 ->
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q2. a = q2 * b))
(fun q2 ->
assert (b = q1 * a);
assert (a = q2 * b);
assert (b = q1 * (q2 * b));
paren_mul_right q1 q2 b;
eq_mult_left b (q1 * q2);
eq_mult_one q1 q2))
let divides_0 a =
Classical.exists_intro (fun q -> 0 = q * a) 0
let divides_1 a = ()
let divides_minus a b =
Classical.exists_elim (a `divides` (-b))
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> -b = q' * a) (-q))
let divides_opp a b =
Classical.exists_elim ((-a) `divides` b)
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> b = q' * (-a)) (-q))
let divides_plus a b d =
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q1. a = q1 * d))
(fun q1 ->
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q2. b = q2 * d))
(fun q2 ->
assert (a + b = q1 * d + q2 * d);
distributivity_add_left q1 q2 d;
Classical.exists_intro (fun q -> a + b = q * d) (q1 + q2)))
let divides_sub a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_plus a (-b) d
let divides_mult_right a b d =
Classical.exists_elim (d `divides` (a * b)) (Squash.get_proof (d `divides` b))
(fun q ->
paren_mul_right a q d;
Classical.exists_intro (fun r -> a * b = r * d) (a * q))
///
/// GCD
///
let mod_divides a b =
Classical.exists_intro (fun q -> a = q * b) (a / b) | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val divides_mod (a:int) (b:nonzero) : Lemma (requires b `divides` a) (ensures a % b = 0) | [] | FStar.Math.Euclid.divides_mod | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Prims.int -> b: Prims.nonzero
-> FStar.Pervasives.Lemma (requires FStar.Math.Euclid.divides b a) (ensures a % b = 0) | {
"end_col": 33,
"end_line": 102,
"start_col": 2,
"start_line": 101
} |
FStar.Pervasives.Lemma | val divides_mult_right (a b d:int) : Lemma
(requires d `divides` b)
(ensures d `divides` (a * b)) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let divides_mult_right a b d =
Classical.exists_elim (d `divides` (a * b)) (Squash.get_proof (d `divides` b))
(fun q ->
paren_mul_right a q d;
Classical.exists_intro (fun r -> a * b = r * d) (a * q)) | val divides_mult_right (a b d:int) : Lemma
(requires d `divides` b)
(ensures d `divides` (a * b))
let divides_mult_right a b d = | false | null | true | Classical.exists_elim (d `divides` (a * b))
(Squash.get_proof (d `divides` b))
(fun q ->
paren_mul_right a q d;
Classical.exists_intro (fun r -> a * b = r * d) (a * q)) | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [
"lemma"
] | [
"Prims.int",
"FStar.Classical.exists_elim",
"FStar.Math.Euclid.divides",
"FStar.Mul.op_Star",
"Prims.b2t",
"Prims.op_Equality",
"FStar.Squash.get_proof",
"FStar.Classical.exists_intro",
"Prims.unit",
"FStar.Math.Lemmas.paren_mul_right",
"Prims.squash"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
///
let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1
let divides_transitive a b c =
eliminate exists q1. b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2. c == q2 * b
returns _
with _pf2.
introduce exists q. c == q * a
with (q1 * q2)
and ()
let divide_antisym a b =
if a <> 0 then
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q1. b = q1 * a))
(fun q1 ->
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q2. a = q2 * b))
(fun q2 ->
assert (b = q1 * a);
assert (a = q2 * b);
assert (b = q1 * (q2 * b));
paren_mul_right q1 q2 b;
eq_mult_left b (q1 * q2);
eq_mult_one q1 q2))
let divides_0 a =
Classical.exists_intro (fun q -> 0 = q * a) 0
let divides_1 a = ()
let divides_minus a b =
Classical.exists_elim (a `divides` (-b))
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> -b = q' * a) (-q))
let divides_opp a b =
Classical.exists_elim ((-a) `divides` b)
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> b = q' * (-a)) (-q))
let divides_plus a b d =
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q1. a = q1 * d))
(fun q1 ->
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q2. b = q2 * d))
(fun q2 ->
assert (a + b = q1 * d + q2 * d);
distributivity_add_left q1 q2 d;
Classical.exists_intro (fun q -> a + b = q * d) (q1 + q2)))
let divides_sub a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_plus a (-b) d | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val divides_mult_right (a b d:int) : Lemma
(requires d `divides` b)
(ensures d `divides` (a * b)) | [] | FStar.Math.Euclid.divides_mult_right | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Prims.int -> b: Prims.int -> d: Prims.int
-> FStar.Pervasives.Lemma (requires FStar.Math.Euclid.divides d b)
(ensures FStar.Math.Euclid.divides d (a * b)) | {
"end_col": 62,
"end_line": 91,
"start_col": 2,
"start_line": 88
} |
FStar.Pervasives.Lemma | val is_gcd_minus (a b d:int) : Lemma
(requires is_gcd a (-b) d)
(ensures is_gcd b a d) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let is_gcd_minus a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
opp_idempotent b | val is_gcd_minus (a b d:int) : Lemma
(requires is_gcd a (-b) d)
(ensures is_gcd b a d)
let is_gcd_minus a b d = | false | null | true | Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
opp_idempotent b | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [
"lemma"
] | [
"Prims.int",
"FStar.Math.Euclid.opp_idempotent",
"Prims.unit",
"FStar.Classical.forall_intro_2",
"Prims.l_imp",
"FStar.Math.Euclid.divides",
"Prims.op_Minus",
"FStar.Classical.move_requires_2",
"FStar.Math.Euclid.divides_minus"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
///
let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1
let divides_transitive a b c =
eliminate exists q1. b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2. c == q2 * b
returns _
with _pf2.
introduce exists q. c == q * a
with (q1 * q2)
and ()
let divide_antisym a b =
if a <> 0 then
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q1. b = q1 * a))
(fun q1 ->
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q2. a = q2 * b))
(fun q2 ->
assert (b = q1 * a);
assert (a = q2 * b);
assert (b = q1 * (q2 * b));
paren_mul_right q1 q2 b;
eq_mult_left b (q1 * q2);
eq_mult_one q1 q2))
let divides_0 a =
Classical.exists_intro (fun q -> 0 = q * a) 0
let divides_1 a = ()
let divides_minus a b =
Classical.exists_elim (a `divides` (-b))
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> -b = q' * a) (-q))
let divides_opp a b =
Classical.exists_elim ((-a) `divides` b)
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> b = q' * (-a)) (-q))
let divides_plus a b d =
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q1. a = q1 * d))
(fun q1 ->
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q2. b = q2 * d))
(fun q2 ->
assert (a + b = q1 * d + q2 * d);
distributivity_add_left q1 q2 d;
Classical.exists_intro (fun q -> a + b = q * d) (q1 + q2)))
let divides_sub a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_plus a (-b) d
let divides_mult_right a b d =
Classical.exists_elim (d `divides` (a * b)) (Squash.get_proof (d `divides` b))
(fun q ->
paren_mul_right a q d;
Classical.exists_intro (fun r -> a * b = r * d) (a * q))
///
/// GCD
///
let mod_divides a b =
Classical.exists_intro (fun q -> a = q * b) (a / b)
let divides_mod a b =
Classical.exists_elim (a % b = 0) (Squash.get_proof (b `divides` a))
(fun q -> cancel_mul_div q b)
let is_gcd_unique a b c d =
divide_antisym c d
let is_gcd_reflexive a = ()
let is_gcd_symmetric a b d = ()
let is_gcd_0 a = ()
let is_gcd_1 a = () | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val is_gcd_minus (a b d:int) : Lemma
(requires is_gcd a (-b) d)
(ensures is_gcd b a d) | [] | FStar.Math.Euclid.is_gcd_minus | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Prims.int -> b: Prims.int -> d: Prims.int
-> FStar.Pervasives.Lemma (requires FStar.Math.Euclid.is_gcd a (- b) d)
(ensures FStar.Math.Euclid.is_gcd b a d) | {
"end_col": 18,
"end_line": 117,
"start_col": 2,
"start_line": 116
} |
FStar.Pervasives.Lemma | val divide_antisym (a b:int) : Lemma
(requires a `divides` b /\ b `divides` a)
(ensures a = b \/ a = -b) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let divide_antisym a b =
if a <> 0 then
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q1. b = q1 * a))
(fun q1 ->
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q2. a = q2 * b))
(fun q2 ->
assert (b = q1 * a);
assert (a = q2 * b);
assert (b = q1 * (q2 * b));
paren_mul_right q1 q2 b;
eq_mult_left b (q1 * q2);
eq_mult_one q1 q2)) | val divide_antisym (a b:int) : Lemma
(requires a `divides` b /\ b `divides` a)
(ensures a = b \/ a = -b)
let divide_antisym a b = | false | null | true | if a <> 0
then
Classical.exists_elim (a = b \/ a = - b)
(Squash.get_proof (exists q1. b = q1 * a))
(fun q1 ->
Classical.exists_elim (a = b \/ a = - b)
(Squash.get_proof (exists q2. a = q2 * b))
(fun q2 ->
assert (b = q1 * a);
assert (a = q2 * b);
assert (b = q1 * (q2 * b));
paren_mul_right q1 q2 b;
eq_mult_left b (q1 * q2);
eq_mult_one q1 q2)) | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [
"lemma"
] | [
"Prims.int",
"Prims.op_disEquality",
"FStar.Classical.exists_elim",
"Prims.l_or",
"Prims.b2t",
"Prims.op_Equality",
"Prims.op_Minus",
"FStar.Mul.op_Star",
"FStar.Squash.get_proof",
"Prims.l_Exists",
"FStar.Math.Euclid.eq_mult_one",
"Prims.unit",
"FStar.Math.Euclid.eq_mult_left",
"FStar.Math.Lemmas.paren_mul_right",
"Prims._assert",
"Prims.squash",
"Prims.bool"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
///
let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1
let divides_transitive a b c =
eliminate exists q1. b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2. c == q2 * b
returns _
with _pf2.
introduce exists q. c == q * a
with (q1 * q2)
and () | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val divide_antisym (a b:int) : Lemma
(requires a `divides` b /\ b `divides` a)
(ensures a = b \/ a = -b) | [] | FStar.Math.Euclid.divide_antisym | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Prims.int -> b: Prims.int
-> FStar.Pervasives.Lemma
(requires FStar.Math.Euclid.divides a b /\ FStar.Math.Euclid.divides b a)
(ensures a = b \/ a = - b) | {
"end_col": 31,
"end_line": 57,
"start_col": 2,
"start_line": 47
} |
FStar.Pervasives.Lemma | val divides_plus (a b d:int) : Lemma
(requires d `divides` a /\ d `divides` b)
(ensures d `divides` (a + b)) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let divides_plus a b d =
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q1. a = q1 * d))
(fun q1 ->
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q2. b = q2 * d))
(fun q2 ->
assert (a + b = q1 * d + q2 * d);
distributivity_add_left q1 q2 d;
Classical.exists_intro (fun q -> a + b = q * d) (q1 + q2))) | val divides_plus (a b d:int) : Lemma
(requires d `divides` a /\ d `divides` b)
(ensures d `divides` (a + b))
let divides_plus a b d = | false | null | true | Classical.exists_elim (d `divides` (a + b))
(Squash.get_proof (exists q1. a = q1 * d))
(fun q1 ->
Classical.exists_elim (d `divides` (a + b))
(Squash.get_proof (exists q2. b = q2 * d))
(fun q2 ->
assert (a + b = q1 * d + q2 * d);
distributivity_add_left q1 q2 d;
Classical.exists_intro (fun q -> a + b = q * d) (q1 + q2))) | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [
"lemma"
] | [
"Prims.int",
"FStar.Classical.exists_elim",
"FStar.Math.Euclid.divides",
"Prims.op_Addition",
"Prims.b2t",
"Prims.op_Equality",
"FStar.Mul.op_Star",
"FStar.Squash.get_proof",
"Prims.l_Exists",
"FStar.Classical.exists_intro",
"Prims.unit",
"FStar.Math.Lemmas.distributivity_add_left",
"Prims._assert",
"Prims.squash"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
///
let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1
let divides_transitive a b c =
eliminate exists q1. b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2. c == q2 * b
returns _
with _pf2.
introduce exists q. c == q * a
with (q1 * q2)
and ()
let divide_antisym a b =
if a <> 0 then
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q1. b = q1 * a))
(fun q1 ->
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q2. a = q2 * b))
(fun q2 ->
assert (b = q1 * a);
assert (a = q2 * b);
assert (b = q1 * (q2 * b));
paren_mul_right q1 q2 b;
eq_mult_left b (q1 * q2);
eq_mult_one q1 q2))
let divides_0 a =
Classical.exists_intro (fun q -> 0 = q * a) 0
let divides_1 a = ()
let divides_minus a b =
Classical.exists_elim (a `divides` (-b))
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> -b = q' * a) (-q))
let divides_opp a b =
Classical.exists_elim ((-a) `divides` b)
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> b = q' * (-a)) (-q)) | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val divides_plus (a b d:int) : Lemma
(requires d `divides` a /\ d `divides` b)
(ensures d `divides` (a + b)) | [] | FStar.Math.Euclid.divides_plus | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Prims.int -> b: Prims.int -> d: Prims.int
-> FStar.Pervasives.Lemma
(requires FStar.Math.Euclid.divides d a /\ FStar.Math.Euclid.divides d b)
(ensures FStar.Math.Euclid.divides d (a + b)) | {
"end_col": 69,
"end_line": 81,
"start_col": 2,
"start_line": 75
} |
FStar.Pervasives.Lemma | val divides_0 (a:int) : Lemma (a `divides` 0) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let divides_0 a =
Classical.exists_intro (fun q -> 0 = q * a) 0 | val divides_0 (a:int) : Lemma (a `divides` 0)
let divides_0 a = | false | null | true | Classical.exists_intro (fun q -> 0 = q * a) 0 | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [
"lemma"
] | [
"Prims.int",
"FStar.Classical.exists_intro",
"Prims.b2t",
"Prims.op_Equality",
"FStar.Mul.op_Star",
"Prims.unit"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
///
let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1
let divides_transitive a b c =
eliminate exists q1. b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2. c == q2 * b
returns _
with _pf2.
introduce exists q. c == q * a
with (q1 * q2)
and ()
let divide_antisym a b =
if a <> 0 then
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q1. b = q1 * a))
(fun q1 ->
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q2. a = q2 * b))
(fun q2 ->
assert (b = q1 * a);
assert (a = q2 * b);
assert (b = q1 * (q2 * b));
paren_mul_right q1 q2 b;
eq_mult_left b (q1 * q2);
eq_mult_one q1 q2)) | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val divides_0 (a:int) : Lemma (a `divides` 0) | [] | FStar.Math.Euclid.divides_0 | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Prims.int -> FStar.Pervasives.Lemma (ensures FStar.Math.Euclid.divides a 0) | {
"end_col": 47,
"end_line": 60,
"start_col": 2,
"start_line": 60
} |
FStar.Pervasives.Lemma | val divides_sub (a b d:int) : Lemma
(requires d `divides` a /\ d `divides` b)
(ensures d `divides` (a - b)) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let divides_sub a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_plus a (-b) d | val divides_sub (a b d:int) : Lemma
(requires d `divides` a /\ d `divides` b)
(ensures d `divides` (a - b))
let divides_sub a b d = | false | null | true | Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_plus a (- b) d | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [
"lemma"
] | [
"Prims.int",
"FStar.Math.Euclid.divides_plus",
"Prims.op_Minus",
"Prims.unit",
"FStar.Classical.forall_intro_2",
"Prims.l_imp",
"FStar.Math.Euclid.divides",
"FStar.Classical.move_requires_2",
"FStar.Math.Euclid.divides_minus"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
///
let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1
let divides_transitive a b c =
eliminate exists q1. b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2. c == q2 * b
returns _
with _pf2.
introduce exists q. c == q * a
with (q1 * q2)
and ()
let divide_antisym a b =
if a <> 0 then
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q1. b = q1 * a))
(fun q1 ->
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q2. a = q2 * b))
(fun q2 ->
assert (b = q1 * a);
assert (a = q2 * b);
assert (b = q1 * (q2 * b));
paren_mul_right q1 q2 b;
eq_mult_left b (q1 * q2);
eq_mult_one q1 q2))
let divides_0 a =
Classical.exists_intro (fun q -> 0 = q * a) 0
let divides_1 a = ()
let divides_minus a b =
Classical.exists_elim (a `divides` (-b))
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> -b = q' * a) (-q))
let divides_opp a b =
Classical.exists_elim ((-a) `divides` b)
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> b = q' * (-a)) (-q))
let divides_plus a b d =
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q1. a = q1 * d))
(fun q1 ->
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q2. b = q2 * d))
(fun q2 ->
assert (a + b = q1 * d + q2 * d);
distributivity_add_left q1 q2 d;
Classical.exists_intro (fun q -> a + b = q * d) (q1 + q2))) | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val divides_sub (a b d:int) : Lemma
(requires d `divides` a /\ d `divides` b)
(ensures d `divides` (a - b)) | [] | FStar.Math.Euclid.divides_sub | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Prims.int -> b: Prims.int -> d: Prims.int
-> FStar.Pervasives.Lemma
(requires FStar.Math.Euclid.divides d a /\ FStar.Math.Euclid.divides d b)
(ensures FStar.Math.Euclid.divides d (a - b)) | {
"end_col": 23,
"end_line": 85,
"start_col": 2,
"start_line": 84
} |
FStar.Pervasives.Lemma | val divides_minus (a b:int) : Lemma
(requires a `divides` b)
(ensures a `divides` (-b)) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let divides_minus a b =
Classical.exists_elim (a `divides` (-b))
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> -b = q' * a) (-q)) | val divides_minus (a b:int) : Lemma
(requires a `divides` b)
(ensures a `divides` (-b))
let divides_minus a b = | false | null | true | Classical.exists_elim (a `divides` (- b))
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> - b = q' * a) (- q)) | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [
"lemma"
] | [
"Prims.int",
"FStar.Classical.exists_elim",
"FStar.Math.Euclid.divides",
"Prims.op_Minus",
"Prims.b2t",
"Prims.op_Equality",
"FStar.Mul.op_Star",
"FStar.Squash.get_proof",
"FStar.Classical.exists_intro",
"Prims.squash",
"Prims.unit"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
///
let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1
let divides_transitive a b c =
eliminate exists q1. b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2. c == q2 * b
returns _
with _pf2.
introduce exists q. c == q * a
with (q1 * q2)
and ()
let divide_antisym a b =
if a <> 0 then
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q1. b = q1 * a))
(fun q1 ->
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q2. a = q2 * b))
(fun q2 ->
assert (b = q1 * a);
assert (a = q2 * b);
assert (b = q1 * (q2 * b));
paren_mul_right q1 q2 b;
eq_mult_left b (q1 * q2);
eq_mult_one q1 q2))
let divides_0 a =
Classical.exists_intro (fun q -> 0 = q * a) 0
let divides_1 a = () | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val divides_minus (a b:int) : Lemma
(requires a `divides` b)
(ensures a `divides` (-b)) | [] | FStar.Math.Euclid.divides_minus | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Prims.int -> b: Prims.int
-> FStar.Pervasives.Lemma (requires FStar.Math.Euclid.divides a b)
(ensures FStar.Math.Euclid.divides a (- b)) | {
"end_col": 66,
"end_line": 67,
"start_col": 2,
"start_line": 65
} |
FStar.Pervasives.Lemma | val is_gcd_plus (a b q d:int) : Lemma
(requires is_gcd a b d)
(ensures is_gcd a (b + q * a) d) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let is_gcd_plus a b q d =
add_sub_r b (q * a);
Classical.forall_intro_3 (Classical.move_requires_3 divides_plus);
Classical.forall_intro_3 (Classical.move_requires_3 divides_mult_right);
Classical.forall_intro_3 (Classical.move_requires_3 divides_sub) | val is_gcd_plus (a b q d:int) : Lemma
(requires is_gcd a b d)
(ensures is_gcd a (b + q * a) d)
let is_gcd_plus a b q d = | false | null | true | add_sub_r b (q * a);
Classical.forall_intro_3 (Classical.move_requires_3 divides_plus);
Classical.forall_intro_3 (Classical.move_requires_3 divides_mult_right);
Classical.forall_intro_3 (Classical.move_requires_3 divides_sub) | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [
"lemma"
] | [
"Prims.int",
"FStar.Classical.forall_intro_3",
"Prims.l_imp",
"Prims.l_and",
"FStar.Math.Euclid.divides",
"Prims.op_Subtraction",
"FStar.Classical.move_requires_3",
"FStar.Math.Euclid.divides_sub",
"Prims.unit",
"FStar.Mul.op_Star",
"FStar.Math.Euclid.divides_mult_right",
"Prims.op_Addition",
"FStar.Math.Euclid.divides_plus",
"FStar.Math.Euclid.add_sub_r"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
///
let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1
let divides_transitive a b c =
eliminate exists q1. b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2. c == q2 * b
returns _
with _pf2.
introduce exists q. c == q * a
with (q1 * q2)
and ()
let divide_antisym a b =
if a <> 0 then
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q1. b = q1 * a))
(fun q1 ->
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q2. a = q2 * b))
(fun q2 ->
assert (b = q1 * a);
assert (a = q2 * b);
assert (b = q1 * (q2 * b));
paren_mul_right q1 q2 b;
eq_mult_left b (q1 * q2);
eq_mult_one q1 q2))
let divides_0 a =
Classical.exists_intro (fun q -> 0 = q * a) 0
let divides_1 a = ()
let divides_minus a b =
Classical.exists_elim (a `divides` (-b))
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> -b = q' * a) (-q))
let divides_opp a b =
Classical.exists_elim ((-a) `divides` b)
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> b = q' * (-a)) (-q))
let divides_plus a b d =
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q1. a = q1 * d))
(fun q1 ->
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q2. b = q2 * d))
(fun q2 ->
assert (a + b = q1 * d + q2 * d);
distributivity_add_left q1 q2 d;
Classical.exists_intro (fun q -> a + b = q * d) (q1 + q2)))
let divides_sub a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_plus a (-b) d
let divides_mult_right a b d =
Classical.exists_elim (d `divides` (a * b)) (Squash.get_proof (d `divides` b))
(fun q ->
paren_mul_right a q d;
Classical.exists_intro (fun r -> a * b = r * d) (a * q))
///
/// GCD
///
let mod_divides a b =
Classical.exists_intro (fun q -> a = q * b) (a / b)
let divides_mod a b =
Classical.exists_elim (a % b = 0) (Squash.get_proof (b `divides` a))
(fun q -> cancel_mul_div q b)
let is_gcd_unique a b c d =
divide_antisym c d
let is_gcd_reflexive a = ()
let is_gcd_symmetric a b d = ()
let is_gcd_0 a = ()
let is_gcd_1 a = ()
let is_gcd_minus a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
opp_idempotent b
let is_gcd_opp a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_opp d a;
divides_opp d b | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val is_gcd_plus (a b q d:int) : Lemma
(requires is_gcd a b d)
(ensures is_gcd a (b + q * a) d) | [] | FStar.Math.Euclid.is_gcd_plus | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Prims.int -> b: Prims.int -> q: Prims.int -> d: Prims.int
-> FStar.Pervasives.Lemma (requires FStar.Math.Euclid.is_gcd a b d)
(ensures FStar.Math.Euclid.is_gcd a (b + q * a) d) | {
"end_col": 66,
"end_line": 128,
"start_col": 2,
"start_line": 125
} |
Prims.Pure | val bezout_prime (p:int) (a:pos{a < p}) : Pure (int & int)
(requires is_prime p)
(ensures fun (r, s) -> r * p + s * a = 1) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let bezout_prime p a =
let r, s, d = euclid_gcd p a in
assert (r * p + s * a = d);
assert (is_gcd p a d);
is_gcd_prime p a;
is_gcd_unique p a 1 d;
assert (d = 1 \/ d = -1);
assert ((-r) * p + (-s) * a == -(r * p + s * a)) by (FStar.Tactics.Canon.canon());
if d = 1 then r, s else -r, -s | val bezout_prime (p:int) (a:pos{a < p}) : Pure (int & int)
(requires is_prime p)
(ensures fun (r, s) -> r * p + s * a = 1)
let bezout_prime p a = | false | null | false | let r, s, d = euclid_gcd p a in
assert (r * p + s * a = d);
assert (is_gcd p a d);
is_gcd_prime p a;
is_gcd_unique p a 1 d;
assert (d = 1 \/ d = - 1);
FStar.Tactics.Effect.assert_by_tactic ((- r) * p + (- s) * a == - (r * p + s * a))
(fun _ ->
();
(FStar.Tactics.Canon.canon ()));
if d = 1 then r, s else - r, - s | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [] | [
"Prims.int",
"Prims.pos",
"Prims.b2t",
"Prims.op_LessThan",
"Prims.op_Equality",
"FStar.Pervasives.Native.Mktuple2",
"Prims.bool",
"Prims.op_Minus",
"FStar.Pervasives.Native.tuple2",
"Prims.unit",
"FStar.Tactics.Effect.assert_by_tactic",
"Prims.eq2",
"Prims.op_Addition",
"FStar.Mul.op_Star",
"FStar.Tactics.Canon.canon",
"Prims._assert",
"Prims.l_or",
"FStar.Math.Euclid.is_gcd_unique",
"FStar.Math.Euclid.is_gcd_prime",
"FStar.Math.Euclid.is_gcd",
"FStar.Pervasives.Native.tuple3",
"FStar.Math.Euclid.euclid_gcd"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
///
let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1
let divides_transitive a b c =
eliminate exists q1. b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2. c == q2 * b
returns _
with _pf2.
introduce exists q. c == q * a
with (q1 * q2)
and ()
let divide_antisym a b =
if a <> 0 then
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q1. b = q1 * a))
(fun q1 ->
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q2. a = q2 * b))
(fun q2 ->
assert (b = q1 * a);
assert (a = q2 * b);
assert (b = q1 * (q2 * b));
paren_mul_right q1 q2 b;
eq_mult_left b (q1 * q2);
eq_mult_one q1 q2))
let divides_0 a =
Classical.exists_intro (fun q -> 0 = q * a) 0
let divides_1 a = ()
let divides_minus a b =
Classical.exists_elim (a `divides` (-b))
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> -b = q' * a) (-q))
let divides_opp a b =
Classical.exists_elim ((-a) `divides` b)
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> b = q' * (-a)) (-q))
let divides_plus a b d =
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q1. a = q1 * d))
(fun q1 ->
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q2. b = q2 * d))
(fun q2 ->
assert (a + b = q1 * d + q2 * d);
distributivity_add_left q1 q2 d;
Classical.exists_intro (fun q -> a + b = q * d) (q1 + q2)))
let divides_sub a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_plus a (-b) d
let divides_mult_right a b d =
Classical.exists_elim (d `divides` (a * b)) (Squash.get_proof (d `divides` b))
(fun q ->
paren_mul_right a q d;
Classical.exists_intro (fun r -> a * b = r * d) (a * q))
///
/// GCD
///
let mod_divides a b =
Classical.exists_intro (fun q -> a = q * b) (a / b)
let divides_mod a b =
Classical.exists_elim (a % b = 0) (Squash.get_proof (b `divides` a))
(fun q -> cancel_mul_div q b)
let is_gcd_unique a b c d =
divide_antisym c d
let is_gcd_reflexive a = ()
let is_gcd_symmetric a b d = ()
let is_gcd_0 a = ()
let is_gcd_1 a = ()
let is_gcd_minus a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
opp_idempotent b
let is_gcd_opp a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_opp d a;
divides_opp d b
let is_gcd_plus a b q d =
add_sub_r b (q * a);
Classical.forall_intro_3 (Classical.move_requires_3 divides_plus);
Classical.forall_intro_3 (Classical.move_requires_3 divides_mult_right);
Classical.forall_intro_3 (Classical.move_requires_3 divides_sub)
///
/// Extended Euclidean algorithm
///
val is_gcd_for_euclid (a b q d:int) : Lemma
(requires is_gcd b (a - q * b) d)
(ensures is_gcd a b d)
let is_gcd_for_euclid a b q d =
add_sub_l a (q * b);
is_gcd_plus b (a - q * b) q d
val egcd (a b u1 u2 u3 v1 v2 v3:int) : Pure (int & int & int)
(requires v3 >= 0 /\
u1 * a + u2 * b = u3 /\
v1 * a + v2 * b = v3 /\
(forall d. is_gcd u3 v3 d ==> is_gcd a b d))
(ensures (fun (u, v, d) -> u * a + v * b = d /\ is_gcd a b d))
(decreases v3)
let rec egcd a b u1 u2 u3 v1 v2 v3 =
if v3 = 0 then
begin
divides_0 u3;
(u1, u2, u3)
end
else
begin
let q = u3 / v3 in
euclidean_division_definition u3 v3;
assert (u3 - q * v3 = (q * v3 + u3 % v3) - q * v3);
assert (q * v3 - q * v3 = 0);
swap_add_plus_minus (q * v3) (u3 % v3) (q * v3);
calc (==) {
(u1 - q * v1) * a + (u2 - q * v2) * b;
== { _ by (FStar.Tactics.Canon.canon()) }
(u1 * a + u2 * b) - q * (v1 * a + v2 * b);
== { }
u3 - q * v3;
== { lemma_div_mod u3 v3 }
u3 % v3;
};
let u1, v1 = v1, u1 - q * v1 in
let u2, v2 = v2, u2 - q * v2 in
let u3' = u3 in
let v3' = v3 in
let u3, v3 = v3, u3 - q * v3 in
(* proving the implication in the precondition *)
introduce forall d. is_gcd v3' (u3' - q * v3') d ==> is_gcd u3' v3' d with
introduce _ ==> _ with _.
is_gcd_for_euclid u3' v3' q d;
let r = egcd a b u1 u2 u3 v1 v2 v3 in
r
end
let euclid_gcd a b =
if b >= 0 then
egcd a b 1 0 a 0 1 b
else (
introduce forall d. is_gcd a (-b) d ==> is_gcd a b d
with introduce _ ==> _
with _pf.
(is_gcd_minus a b d;
is_gcd_symmetric b a d);
let res = egcd a b 1 0 a 0 (-1) (-b) in
let _, _, d = res in
assert (is_gcd a b d);
res
)
val is_gcd_prime_aux (p:int) (a:pos{a < p}) (d:int) : Lemma
(requires is_prime p /\ d `divides` p /\ d `divides` a)
(ensures d = 1 \/ d = -1)
let is_gcd_prime_aux p a d = ()
val is_gcd_prime (p:int{is_prime p}) (a:pos{a < p}) : Lemma (is_gcd p a 1)
let is_gcd_prime p a =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
Classical.forall_intro (Classical.move_requires (is_gcd_prime_aux p a));
assert (forall x. x `divides` p /\ x `divides` a ==> x = 1 \/ x = -1 /\ x `divides` 1) | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val bezout_prime (p:int) (a:pos{a < p}) : Pure (int & int)
(requires is_prime p)
(ensures fun (r, s) -> r * p + s * a = 1) | [] | FStar.Math.Euclid.bezout_prime | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | p: Prims.int -> a: Prims.pos{a < p} -> Prims.Pure (Prims.int * Prims.int) | {
"end_col": 32,
"end_line": 218,
"start_col": 22,
"start_line": 210
} |
FStar.Pervasives.Lemma | val is_gcd_prime (p:int{is_prime p}) (a:pos{a < p}) : Lemma (is_gcd p a 1) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let is_gcd_prime p a =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
Classical.forall_intro (Classical.move_requires (is_gcd_prime_aux p a));
assert (forall x. x `divides` p /\ x `divides` a ==> x = 1 \/ x = -1 /\ x `divides` 1) | val is_gcd_prime (p:int{is_prime p}) (a:pos{a < p}) : Lemma (is_gcd p a 1)
let is_gcd_prime p a = | false | null | true | Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
Classical.forall_intro (Classical.move_requires (is_gcd_prime_aux p a));
assert (forall x. x `divides` p /\ x `divides` a ==> x = 1 \/ x = - 1 /\ x `divides` 1) | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [
"lemma"
] | [
"Prims.int",
"FStar.Math.Euclid.is_prime",
"Prims.pos",
"Prims.b2t",
"Prims.op_LessThan",
"Prims._assert",
"Prims.l_Forall",
"Prims.l_imp",
"Prims.l_and",
"FStar.Math.Euclid.divides",
"Prims.l_or",
"Prims.op_Equality",
"Prims.op_Minus",
"Prims.unit",
"FStar.Classical.forall_intro",
"FStar.Classical.move_requires",
"FStar.Math.Euclid.is_gcd_prime_aux",
"FStar.Classical.forall_intro_2",
"FStar.Classical.move_requires_2",
"FStar.Math.Euclid.divides_minus"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
///
let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1
let divides_transitive a b c =
eliminate exists q1. b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2. c == q2 * b
returns _
with _pf2.
introduce exists q. c == q * a
with (q1 * q2)
and ()
let divide_antisym a b =
if a <> 0 then
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q1. b = q1 * a))
(fun q1 ->
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q2. a = q2 * b))
(fun q2 ->
assert (b = q1 * a);
assert (a = q2 * b);
assert (b = q1 * (q2 * b));
paren_mul_right q1 q2 b;
eq_mult_left b (q1 * q2);
eq_mult_one q1 q2))
let divides_0 a =
Classical.exists_intro (fun q -> 0 = q * a) 0
let divides_1 a = ()
let divides_minus a b =
Classical.exists_elim (a `divides` (-b))
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> -b = q' * a) (-q))
let divides_opp a b =
Classical.exists_elim ((-a) `divides` b)
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> b = q' * (-a)) (-q))
let divides_plus a b d =
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q1. a = q1 * d))
(fun q1 ->
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q2. b = q2 * d))
(fun q2 ->
assert (a + b = q1 * d + q2 * d);
distributivity_add_left q1 q2 d;
Classical.exists_intro (fun q -> a + b = q * d) (q1 + q2)))
let divides_sub a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_plus a (-b) d
let divides_mult_right a b d =
Classical.exists_elim (d `divides` (a * b)) (Squash.get_proof (d `divides` b))
(fun q ->
paren_mul_right a q d;
Classical.exists_intro (fun r -> a * b = r * d) (a * q))
///
/// GCD
///
let mod_divides a b =
Classical.exists_intro (fun q -> a = q * b) (a / b)
let divides_mod a b =
Classical.exists_elim (a % b = 0) (Squash.get_proof (b `divides` a))
(fun q -> cancel_mul_div q b)
let is_gcd_unique a b c d =
divide_antisym c d
let is_gcd_reflexive a = ()
let is_gcd_symmetric a b d = ()
let is_gcd_0 a = ()
let is_gcd_1 a = ()
let is_gcd_minus a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
opp_idempotent b
let is_gcd_opp a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_opp d a;
divides_opp d b
let is_gcd_plus a b q d =
add_sub_r b (q * a);
Classical.forall_intro_3 (Classical.move_requires_3 divides_plus);
Classical.forall_intro_3 (Classical.move_requires_3 divides_mult_right);
Classical.forall_intro_3 (Classical.move_requires_3 divides_sub)
///
/// Extended Euclidean algorithm
///
val is_gcd_for_euclid (a b q d:int) : Lemma
(requires is_gcd b (a - q * b) d)
(ensures is_gcd a b d)
let is_gcd_for_euclid a b q d =
add_sub_l a (q * b);
is_gcd_plus b (a - q * b) q d
val egcd (a b u1 u2 u3 v1 v2 v3:int) : Pure (int & int & int)
(requires v3 >= 0 /\
u1 * a + u2 * b = u3 /\
v1 * a + v2 * b = v3 /\
(forall d. is_gcd u3 v3 d ==> is_gcd a b d))
(ensures (fun (u, v, d) -> u * a + v * b = d /\ is_gcd a b d))
(decreases v3)
let rec egcd a b u1 u2 u3 v1 v2 v3 =
if v3 = 0 then
begin
divides_0 u3;
(u1, u2, u3)
end
else
begin
let q = u3 / v3 in
euclidean_division_definition u3 v3;
assert (u3 - q * v3 = (q * v3 + u3 % v3) - q * v3);
assert (q * v3 - q * v3 = 0);
swap_add_plus_minus (q * v3) (u3 % v3) (q * v3);
calc (==) {
(u1 - q * v1) * a + (u2 - q * v2) * b;
== { _ by (FStar.Tactics.Canon.canon()) }
(u1 * a + u2 * b) - q * (v1 * a + v2 * b);
== { }
u3 - q * v3;
== { lemma_div_mod u3 v3 }
u3 % v3;
};
let u1, v1 = v1, u1 - q * v1 in
let u2, v2 = v2, u2 - q * v2 in
let u3' = u3 in
let v3' = v3 in
let u3, v3 = v3, u3 - q * v3 in
(* proving the implication in the precondition *)
introduce forall d. is_gcd v3' (u3' - q * v3') d ==> is_gcd u3' v3' d with
introduce _ ==> _ with _.
is_gcd_for_euclid u3' v3' q d;
let r = egcd a b u1 u2 u3 v1 v2 v3 in
r
end
let euclid_gcd a b =
if b >= 0 then
egcd a b 1 0 a 0 1 b
else (
introduce forall d. is_gcd a (-b) d ==> is_gcd a b d
with introduce _ ==> _
with _pf.
(is_gcd_minus a b d;
is_gcd_symmetric b a d);
let res = egcd a b 1 0 a 0 (-1) (-b) in
let _, _, d = res in
assert (is_gcd a b d);
res
)
val is_gcd_prime_aux (p:int) (a:pos{a < p}) (d:int) : Lemma
(requires is_prime p /\ d `divides` p /\ d `divides` a)
(ensures d = 1 \/ d = -1)
let is_gcd_prime_aux p a d = ()
val is_gcd_prime (p:int{is_prime p}) (a:pos{a < p}) : Lemma (is_gcd p a 1) | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val is_gcd_prime (p:int{is_prime p}) (a:pos{a < p}) : Lemma (is_gcd p a 1) | [] | FStar.Math.Euclid.is_gcd_prime | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | p: Prims.int{FStar.Math.Euclid.is_prime p} -> a: Prims.pos{a < p}
-> FStar.Pervasives.Lemma (ensures FStar.Math.Euclid.is_gcd p a 1) | {
"end_col": 88,
"end_line": 208,
"start_col": 2,
"start_line": 206
} |
FStar.Pervasives.Lemma | val euclid_prime (p:int{is_prime p}) (a b:int) : Lemma
(requires (a * b) % p = 0)
(ensures a % p = 0 \/ b % p = 0) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let euclid_prime p a b =
let ra, sa, da = euclid_gcd p a in
let rb, sb, db = euclid_gcd p b in
assert (is_gcd p a da);
assert (is_gcd p b db);
assert (da `divides` p);
assert (da = 1 \/ da = -1 \/ da = p \/ da = -p);
if da = 1 then
euclid p a b ra sa
else if da = -1 then
begin
assert ((-ra) * p + (-sa) * a == -(ra * p + sa * a)) by (FStar.Tactics.Canon.canon());
euclid p a b (-ra) (-sa)
end
else if da = p then
divides_mod a p
else
begin
opp_idempotent p;
divides_opp (-p) a;
divides_mod a p
end | val euclid_prime (p:int{is_prime p}) (a b:int) : Lemma
(requires (a * b) % p = 0)
(ensures a % p = 0 \/ b % p = 0)
let euclid_prime p a b = | false | null | true | let ra, sa, da = euclid_gcd p a in
let rb, sb, db = euclid_gcd p b in
assert (is_gcd p a da);
assert (is_gcd p b db);
assert (da `divides` p);
assert (da = 1 \/ da = - 1 \/ da = p \/ da = - p);
if da = 1
then euclid p a b ra sa
else
if da = - 1
then
(FStar.Tactics.Effect.assert_by_tactic ((- ra) * p + (- sa) * a == - (ra * p + sa * a))
(fun _ ->
();
(FStar.Tactics.Canon.canon ()));
euclid p a b (- ra) (- sa))
else
if da = p
then divides_mod a p
else
(opp_idempotent p;
divides_opp (- p) a;
divides_mod a p) | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [
"lemma"
] | [
"Prims.int",
"FStar.Math.Euclid.is_prime",
"Prims.op_Equality",
"FStar.Math.Euclid.euclid",
"Prims.bool",
"Prims.op_Minus",
"Prims.unit",
"FStar.Tactics.Effect.assert_by_tactic",
"Prims.eq2",
"Prims.op_Addition",
"FStar.Mul.op_Star",
"FStar.Tactics.Canon.canon",
"FStar.Math.Euclid.divides_mod",
"FStar.Math.Euclid.divides_opp",
"FStar.Math.Euclid.opp_idempotent",
"Prims._assert",
"Prims.l_or",
"Prims.b2t",
"FStar.Math.Euclid.divides",
"FStar.Math.Euclid.is_gcd",
"FStar.Pervasives.Native.tuple3",
"FStar.Math.Euclid.euclid_gcd"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
///
let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1
let divides_transitive a b c =
eliminate exists q1. b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2. c == q2 * b
returns _
with _pf2.
introduce exists q. c == q * a
with (q1 * q2)
and ()
let divide_antisym a b =
if a <> 0 then
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q1. b = q1 * a))
(fun q1 ->
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q2. a = q2 * b))
(fun q2 ->
assert (b = q1 * a);
assert (a = q2 * b);
assert (b = q1 * (q2 * b));
paren_mul_right q1 q2 b;
eq_mult_left b (q1 * q2);
eq_mult_one q1 q2))
let divides_0 a =
Classical.exists_intro (fun q -> 0 = q * a) 0
let divides_1 a = ()
let divides_minus a b =
Classical.exists_elim (a `divides` (-b))
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> -b = q' * a) (-q))
let divides_opp a b =
Classical.exists_elim ((-a) `divides` b)
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> b = q' * (-a)) (-q))
let divides_plus a b d =
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q1. a = q1 * d))
(fun q1 ->
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q2. b = q2 * d))
(fun q2 ->
assert (a + b = q1 * d + q2 * d);
distributivity_add_left q1 q2 d;
Classical.exists_intro (fun q -> a + b = q * d) (q1 + q2)))
let divides_sub a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_plus a (-b) d
let divides_mult_right a b d =
Classical.exists_elim (d `divides` (a * b)) (Squash.get_proof (d `divides` b))
(fun q ->
paren_mul_right a q d;
Classical.exists_intro (fun r -> a * b = r * d) (a * q))
///
/// GCD
///
let mod_divides a b =
Classical.exists_intro (fun q -> a = q * b) (a / b)
let divides_mod a b =
Classical.exists_elim (a % b = 0) (Squash.get_proof (b `divides` a))
(fun q -> cancel_mul_div q b)
let is_gcd_unique a b c d =
divide_antisym c d
let is_gcd_reflexive a = ()
let is_gcd_symmetric a b d = ()
let is_gcd_0 a = ()
let is_gcd_1 a = ()
let is_gcd_minus a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
opp_idempotent b
let is_gcd_opp a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_opp d a;
divides_opp d b
let is_gcd_plus a b q d =
add_sub_r b (q * a);
Classical.forall_intro_3 (Classical.move_requires_3 divides_plus);
Classical.forall_intro_3 (Classical.move_requires_3 divides_mult_right);
Classical.forall_intro_3 (Classical.move_requires_3 divides_sub)
///
/// Extended Euclidean algorithm
///
val is_gcd_for_euclid (a b q d:int) : Lemma
(requires is_gcd b (a - q * b) d)
(ensures is_gcd a b d)
let is_gcd_for_euclid a b q d =
add_sub_l a (q * b);
is_gcd_plus b (a - q * b) q d
val egcd (a b u1 u2 u3 v1 v2 v3:int) : Pure (int & int & int)
(requires v3 >= 0 /\
u1 * a + u2 * b = u3 /\
v1 * a + v2 * b = v3 /\
(forall d. is_gcd u3 v3 d ==> is_gcd a b d))
(ensures (fun (u, v, d) -> u * a + v * b = d /\ is_gcd a b d))
(decreases v3)
let rec egcd a b u1 u2 u3 v1 v2 v3 =
if v3 = 0 then
begin
divides_0 u3;
(u1, u2, u3)
end
else
begin
let q = u3 / v3 in
euclidean_division_definition u3 v3;
assert (u3 - q * v3 = (q * v3 + u3 % v3) - q * v3);
assert (q * v3 - q * v3 = 0);
swap_add_plus_minus (q * v3) (u3 % v3) (q * v3);
calc (==) {
(u1 - q * v1) * a + (u2 - q * v2) * b;
== { _ by (FStar.Tactics.Canon.canon()) }
(u1 * a + u2 * b) - q * (v1 * a + v2 * b);
== { }
u3 - q * v3;
== { lemma_div_mod u3 v3 }
u3 % v3;
};
let u1, v1 = v1, u1 - q * v1 in
let u2, v2 = v2, u2 - q * v2 in
let u3' = u3 in
let v3' = v3 in
let u3, v3 = v3, u3 - q * v3 in
(* proving the implication in the precondition *)
introduce forall d. is_gcd v3' (u3' - q * v3') d ==> is_gcd u3' v3' d with
introduce _ ==> _ with _.
is_gcd_for_euclid u3' v3' q d;
let r = egcd a b u1 u2 u3 v1 v2 v3 in
r
end
let euclid_gcd a b =
if b >= 0 then
egcd a b 1 0 a 0 1 b
else (
introduce forall d. is_gcd a (-b) d ==> is_gcd a b d
with introduce _ ==> _
with _pf.
(is_gcd_minus a b d;
is_gcd_symmetric b a d);
let res = egcd a b 1 0 a 0 (-1) (-b) in
let _, _, d = res in
assert (is_gcd a b d);
res
)
val is_gcd_prime_aux (p:int) (a:pos{a < p}) (d:int) : Lemma
(requires is_prime p /\ d `divides` p /\ d `divides` a)
(ensures d = 1 \/ d = -1)
let is_gcd_prime_aux p a d = ()
val is_gcd_prime (p:int{is_prime p}) (a:pos{a < p}) : Lemma (is_gcd p a 1)
let is_gcd_prime p a =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
Classical.forall_intro (Classical.move_requires (is_gcd_prime_aux p a));
assert (forall x. x `divides` p /\ x `divides` a ==> x = 1 \/ x = -1 /\ x `divides` 1)
let bezout_prime p a =
let r, s, d = euclid_gcd p a in
assert (r * p + s * a = d);
assert (is_gcd p a d);
is_gcd_prime p a;
is_gcd_unique p a 1 d;
assert (d = 1 \/ d = -1);
assert ((-r) * p + (-s) * a == -(r * p + s * a)) by (FStar.Tactics.Canon.canon());
if d = 1 then r, s else -r, -s
let euclid n a b r s =
let open FStar.Math.Lemmas in
calc (==) {
b % n;
== { distributivity_add_left (r * n) (s * a) b }
(r * n * b + s * a * b) % n;
== { paren_mul_right s a b }
(r * n * b + s * (a * b)) % n;
== { modulo_distributivity (r * n * b) (s * (a * b)) n }
((r * n * b) % n + s * (a * b) % n) % n;
== { lemma_mod_mul_distr_r s (a * b) n }
((r * n * b) % n + s * ((a * b) % n) % n) % n;
== { assert (a * b % n = 0) }
((r * n * b) % n + s * 0 % n) % n;
== { assert (s * 0 == 0) }
((r * n * b) % n + 0 % n) % n;
== { modulo_lemma 0 n }
((r * n * b) % n) % n;
== { lemma_mod_twice (r * n * b) n }
(r * n * b) % n;
== { _ by (FStar.Tactics.Canon.canon ()) }
(n * (r * b)) % n;
== { lemma_mod_mul_distr_l n (r * b) n}
n % n * (r * b) % n;
== { assert (n % n = 0) }
(0 * (r * b)) % n;
== { assert (0 * (r * b) == 0) }
0 % n;
== { small_mod 0 n }
0;
} | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val euclid_prime (p:int{is_prime p}) (a b:int) : Lemma
(requires (a * b) % p = 0)
(ensures a % p = 0 \/ b % p = 0) | [] | FStar.Math.Euclid.euclid_prime | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | p: Prims.int{FStar.Math.Euclid.is_prime p} -> a: Prims.int -> b: Prims.int
-> FStar.Pervasives.Lemma (requires a * b % p = 0) (ensures a % p = 0 \/ b % p = 0) | {
"end_col": 7,
"end_line": 273,
"start_col": 24,
"start_line": 252
} |
Prims.Pure | val egcd (a b u1 u2 u3 v1 v2 v3:int) : Pure (int & int & int)
(requires v3 >= 0 /\
u1 * a + u2 * b = u3 /\
v1 * a + v2 * b = v3 /\
(forall d. is_gcd u3 v3 d ==> is_gcd a b d))
(ensures (fun (u, v, d) -> u * a + v * b = d /\ is_gcd a b d))
(decreases v3) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec egcd a b u1 u2 u3 v1 v2 v3 =
if v3 = 0 then
begin
divides_0 u3;
(u1, u2, u3)
end
else
begin
let q = u3 / v3 in
euclidean_division_definition u3 v3;
assert (u3 - q * v3 = (q * v3 + u3 % v3) - q * v3);
assert (q * v3 - q * v3 = 0);
swap_add_plus_minus (q * v3) (u3 % v3) (q * v3);
calc (==) {
(u1 - q * v1) * a + (u2 - q * v2) * b;
== { _ by (FStar.Tactics.Canon.canon()) }
(u1 * a + u2 * b) - q * (v1 * a + v2 * b);
== { }
u3 - q * v3;
== { lemma_div_mod u3 v3 }
u3 % v3;
};
let u1, v1 = v1, u1 - q * v1 in
let u2, v2 = v2, u2 - q * v2 in
let u3' = u3 in
let v3' = v3 in
let u3, v3 = v3, u3 - q * v3 in
(* proving the implication in the precondition *)
introduce forall d. is_gcd v3' (u3' - q * v3') d ==> is_gcd u3' v3' d with
introduce _ ==> _ with _.
is_gcd_for_euclid u3' v3' q d;
let r = egcd a b u1 u2 u3 v1 v2 v3 in
r
end | val egcd (a b u1 u2 u3 v1 v2 v3:int) : Pure (int & int & int)
(requires v3 >= 0 /\
u1 * a + u2 * b = u3 /\
v1 * a + v2 * b = v3 /\
(forall d. is_gcd u3 v3 d ==> is_gcd a b d))
(ensures (fun (u, v, d) -> u * a + v * b = d /\ is_gcd a b d))
(decreases v3)
let rec egcd a b u1 u2 u3 v1 v2 v3 = | false | null | false | if v3 = 0
then
(divides_0 u3;
(u1, u2, u3))
else
let q = u3 / v3 in
euclidean_division_definition u3 v3;
assert (u3 - q * v3 = (q * v3 + u3 % v3) - q * v3);
assert (q * v3 - q * v3 = 0);
swap_add_plus_minus (q * v3) (u3 % v3) (q * v3);
calc ( == ) {
(u1 - q * v1) * a + (u2 - q * v2) * b;
( == ) { FStar.Tactics.Effect.synth_by_tactic (fun _ -> (FStar.Tactics.Canon.canon ())) }
(u1 * a + u2 * b) - q * (v1 * a + v2 * b);
( == ) { () }
u3 - q * v3;
( == ) { lemma_div_mod u3 v3 }
u3 % v3;
};
let u1, v1 = v1, u1 - q * v1 in
let u2, v2 = v2, u2 - q * v2 in
let u3' = u3 in
let v3' = v3 in
let u3, v3 = v3, u3 - q * v3 in
introduce forall d . is_gcd v3' (u3' - q * v3') d ==> is_gcd u3' v3' d
with introduce _ ==> _
with _. is_gcd_for_euclid u3' v3' q d;
let r = egcd a b u1 u2 u3 v1 v2 v3 in
r | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [
""
] | [
"Prims.int",
"Prims.op_Equality",
"FStar.Pervasives.Native.Mktuple3",
"Prims.unit",
"FStar.Math.Euclid.divides_0",
"Prims.bool",
"Prims.precedes",
"FStar.Pervasives.Native.tuple3",
"FStar.Math.Euclid.egcd",
"FStar.Classical.Sugar.forall_intro",
"Prims.l_imp",
"FStar.Math.Euclid.is_gcd",
"Prims.op_Subtraction",
"FStar.Mul.op_Star",
"FStar.Classical.Sugar.implies_intro",
"Prims.squash",
"FStar.Math.Euclid.is_gcd_for_euclid",
"FStar.Pervasives.Native.tuple2",
"FStar.Pervasives.Native.Mktuple2",
"FStar.Calc.calc_finish",
"Prims.eq2",
"Prims.op_Addition",
"Prims.op_Modulus",
"Prims.Cons",
"FStar.Preorder.relation",
"Prims.Nil",
"FStar.Calc.calc_step",
"FStar.Calc.calc_init",
"FStar.Calc.calc_pack",
"FStar.Math.Lemmas.lemma_div_mod",
"FStar.Math.Lemmas.swap_add_plus_minus",
"Prims._assert",
"Prims.b2t",
"FStar.Math.Lemmas.euclidean_division_definition",
"Prims.op_Division"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
///
let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1
let divides_transitive a b c =
eliminate exists q1. b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2. c == q2 * b
returns _
with _pf2.
introduce exists q. c == q * a
with (q1 * q2)
and ()
let divide_antisym a b =
if a <> 0 then
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q1. b = q1 * a))
(fun q1 ->
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q2. a = q2 * b))
(fun q2 ->
assert (b = q1 * a);
assert (a = q2 * b);
assert (b = q1 * (q2 * b));
paren_mul_right q1 q2 b;
eq_mult_left b (q1 * q2);
eq_mult_one q1 q2))
let divides_0 a =
Classical.exists_intro (fun q -> 0 = q * a) 0
let divides_1 a = ()
let divides_minus a b =
Classical.exists_elim (a `divides` (-b))
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> -b = q' * a) (-q))
let divides_opp a b =
Classical.exists_elim ((-a) `divides` b)
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> b = q' * (-a)) (-q))
let divides_plus a b d =
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q1. a = q1 * d))
(fun q1 ->
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q2. b = q2 * d))
(fun q2 ->
assert (a + b = q1 * d + q2 * d);
distributivity_add_left q1 q2 d;
Classical.exists_intro (fun q -> a + b = q * d) (q1 + q2)))
let divides_sub a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_plus a (-b) d
let divides_mult_right a b d =
Classical.exists_elim (d `divides` (a * b)) (Squash.get_proof (d `divides` b))
(fun q ->
paren_mul_right a q d;
Classical.exists_intro (fun r -> a * b = r * d) (a * q))
///
/// GCD
///
let mod_divides a b =
Classical.exists_intro (fun q -> a = q * b) (a / b)
let divides_mod a b =
Classical.exists_elim (a % b = 0) (Squash.get_proof (b `divides` a))
(fun q -> cancel_mul_div q b)
let is_gcd_unique a b c d =
divide_antisym c d
let is_gcd_reflexive a = ()
let is_gcd_symmetric a b d = ()
let is_gcd_0 a = ()
let is_gcd_1 a = ()
let is_gcd_minus a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
opp_idempotent b
let is_gcd_opp a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_opp d a;
divides_opp d b
let is_gcd_plus a b q d =
add_sub_r b (q * a);
Classical.forall_intro_3 (Classical.move_requires_3 divides_plus);
Classical.forall_intro_3 (Classical.move_requires_3 divides_mult_right);
Classical.forall_intro_3 (Classical.move_requires_3 divides_sub)
///
/// Extended Euclidean algorithm
///
val is_gcd_for_euclid (a b q d:int) : Lemma
(requires is_gcd b (a - q * b) d)
(ensures is_gcd a b d)
let is_gcd_for_euclid a b q d =
add_sub_l a (q * b);
is_gcd_plus b (a - q * b) q d
val egcd (a b u1 u2 u3 v1 v2 v3:int) : Pure (int & int & int)
(requires v3 >= 0 /\
u1 * a + u2 * b = u3 /\
v1 * a + v2 * b = v3 /\
(forall d. is_gcd u3 v3 d ==> is_gcd a b d))
(ensures (fun (u, v, d) -> u * a + v * b = d /\ is_gcd a b d))
(decreases v3) | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val egcd (a b u1 u2 u3 v1 v2 v3:int) : Pure (int & int & int)
(requires v3 >= 0 /\
u1 * a + u2 * b = u3 /\
v1 * a + v2 * b = v3 /\
(forall d. is_gcd u3 v3 d ==> is_gcd a b d))
(ensures (fun (u, v, d) -> u * a + v * b = d /\ is_gcd a b d))
(decreases v3) | [
"recursion"
] | FStar.Math.Euclid.egcd | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
a: Prims.int ->
b: Prims.int ->
u1: Prims.int ->
u2: Prims.int ->
u3: Prims.int ->
v1: Prims.int ->
v2: Prims.int ->
v3: Prims.int
-> Prims.Pure ((Prims.int * Prims.int) * Prims.int) | {
"end_col": 7,
"end_line": 182,
"start_col": 2,
"start_line": 150
} |
FStar.Pervasives.Lemma | val euclid (n:pos) (a b r s:int) : Lemma
(requires (a * b) % n = 0 /\ r * n + s * a = 1)
(ensures b % n = 0) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let euclid n a b r s =
let open FStar.Math.Lemmas in
calc (==) {
b % n;
== { distributivity_add_left (r * n) (s * a) b }
(r * n * b + s * a * b) % n;
== { paren_mul_right s a b }
(r * n * b + s * (a * b)) % n;
== { modulo_distributivity (r * n * b) (s * (a * b)) n }
((r * n * b) % n + s * (a * b) % n) % n;
== { lemma_mod_mul_distr_r s (a * b) n }
((r * n * b) % n + s * ((a * b) % n) % n) % n;
== { assert (a * b % n = 0) }
((r * n * b) % n + s * 0 % n) % n;
== { assert (s * 0 == 0) }
((r * n * b) % n + 0 % n) % n;
== { modulo_lemma 0 n }
((r * n * b) % n) % n;
== { lemma_mod_twice (r * n * b) n }
(r * n * b) % n;
== { _ by (FStar.Tactics.Canon.canon ()) }
(n * (r * b)) % n;
== { lemma_mod_mul_distr_l n (r * b) n}
n % n * (r * b) % n;
== { assert (n % n = 0) }
(0 * (r * b)) % n;
== { assert (0 * (r * b) == 0) }
0 % n;
== { small_mod 0 n }
0;
} | val euclid (n:pos) (a b r s:int) : Lemma
(requires (a * b) % n = 0 /\ r * n + s * a = 1)
(ensures b % n = 0)
let euclid n a b r s = | false | null | true | let open FStar.Math.Lemmas in
calc ( == ) {
b % n;
( == ) { distributivity_add_left (r * n) (s * a) b }
((r * n) * b + (s * a) * b) % n;
( == ) { paren_mul_right s a b }
((r * n) * b + s * (a * b)) % n;
( == ) { modulo_distributivity ((r * n) * b) (s * (a * b)) n }
(((r * n) * b) % n + s * (a * b) % n) % n;
( == ) { lemma_mod_mul_distr_r s (a * b) n }
(((r * n) * b) % n + s * ((a * b) % n) % n) % n;
( == ) { assert (a * b % n = 0) }
(((r * n) * b) % n + s * 0 % n) % n;
( == ) { assert (s * 0 == 0) }
(((r * n) * b) % n + 0 % n) % n;
( == ) { modulo_lemma 0 n }
(((r * n) * b) % n) % n;
( == ) { lemma_mod_twice ((r * n) * b) n }
((r * n) * b) % n;
( == ) { FStar.Tactics.Effect.synth_by_tactic (fun _ -> (FStar.Tactics.Canon.canon ())) }
(n * (r * b)) % n;
( == ) { lemma_mod_mul_distr_l n (r * b) n }
(n % n) * (r * b) % n;
( == ) { assert (n % n = 0) }
(0 * (r * b)) % n;
( == ) { assert (0 * (r * b) == 0) }
0 % n;
( == ) { small_mod 0 n }
0;
} | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [
"lemma"
] | [
"Prims.pos",
"Prims.int",
"FStar.Calc.calc_finish",
"Prims.eq2",
"Prims.op_Modulus",
"Prims.Cons",
"FStar.Preorder.relation",
"Prims.Nil",
"Prims.unit",
"FStar.Calc.calc_step",
"FStar.Mul.op_Star",
"Prims.op_Addition",
"FStar.Calc.calc_init",
"FStar.Calc.calc_pack",
"FStar.Math.Lemmas.distributivity_add_left",
"Prims.squash",
"FStar.Math.Lemmas.paren_mul_right",
"FStar.Math.Lemmas.modulo_distributivity",
"FStar.Math.Lemmas.lemma_mod_mul_distr_r",
"Prims._assert",
"Prims.b2t",
"Prims.op_Equality",
"FStar.Math.Lemmas.modulo_lemma",
"FStar.Math.Lemmas.lemma_mod_twice",
"FStar.Math.Lemmas.lemma_mod_mul_distr_l",
"FStar.Math.Lemmas.small_mod"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
///
let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1
let divides_transitive a b c =
eliminate exists q1. b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2. c == q2 * b
returns _
with _pf2.
introduce exists q. c == q * a
with (q1 * q2)
and ()
let divide_antisym a b =
if a <> 0 then
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q1. b = q1 * a))
(fun q1 ->
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q2. a = q2 * b))
(fun q2 ->
assert (b = q1 * a);
assert (a = q2 * b);
assert (b = q1 * (q2 * b));
paren_mul_right q1 q2 b;
eq_mult_left b (q1 * q2);
eq_mult_one q1 q2))
let divides_0 a =
Classical.exists_intro (fun q -> 0 = q * a) 0
let divides_1 a = ()
let divides_minus a b =
Classical.exists_elim (a `divides` (-b))
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> -b = q' * a) (-q))
let divides_opp a b =
Classical.exists_elim ((-a) `divides` b)
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> b = q' * (-a)) (-q))
let divides_plus a b d =
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q1. a = q1 * d))
(fun q1 ->
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q2. b = q2 * d))
(fun q2 ->
assert (a + b = q1 * d + q2 * d);
distributivity_add_left q1 q2 d;
Classical.exists_intro (fun q -> a + b = q * d) (q1 + q2)))
let divides_sub a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_plus a (-b) d
let divides_mult_right a b d =
Classical.exists_elim (d `divides` (a * b)) (Squash.get_proof (d `divides` b))
(fun q ->
paren_mul_right a q d;
Classical.exists_intro (fun r -> a * b = r * d) (a * q))
///
/// GCD
///
let mod_divides a b =
Classical.exists_intro (fun q -> a = q * b) (a / b)
let divides_mod a b =
Classical.exists_elim (a % b = 0) (Squash.get_proof (b `divides` a))
(fun q -> cancel_mul_div q b)
let is_gcd_unique a b c d =
divide_antisym c d
let is_gcd_reflexive a = ()
let is_gcd_symmetric a b d = ()
let is_gcd_0 a = ()
let is_gcd_1 a = ()
let is_gcd_minus a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
opp_idempotent b
let is_gcd_opp a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_opp d a;
divides_opp d b
let is_gcd_plus a b q d =
add_sub_r b (q * a);
Classical.forall_intro_3 (Classical.move_requires_3 divides_plus);
Classical.forall_intro_3 (Classical.move_requires_3 divides_mult_right);
Classical.forall_intro_3 (Classical.move_requires_3 divides_sub)
///
/// Extended Euclidean algorithm
///
val is_gcd_for_euclid (a b q d:int) : Lemma
(requires is_gcd b (a - q * b) d)
(ensures is_gcd a b d)
let is_gcd_for_euclid a b q d =
add_sub_l a (q * b);
is_gcd_plus b (a - q * b) q d
val egcd (a b u1 u2 u3 v1 v2 v3:int) : Pure (int & int & int)
(requires v3 >= 0 /\
u1 * a + u2 * b = u3 /\
v1 * a + v2 * b = v3 /\
(forall d. is_gcd u3 v3 d ==> is_gcd a b d))
(ensures (fun (u, v, d) -> u * a + v * b = d /\ is_gcd a b d))
(decreases v3)
let rec egcd a b u1 u2 u3 v1 v2 v3 =
if v3 = 0 then
begin
divides_0 u3;
(u1, u2, u3)
end
else
begin
let q = u3 / v3 in
euclidean_division_definition u3 v3;
assert (u3 - q * v3 = (q * v3 + u3 % v3) - q * v3);
assert (q * v3 - q * v3 = 0);
swap_add_plus_minus (q * v3) (u3 % v3) (q * v3);
calc (==) {
(u1 - q * v1) * a + (u2 - q * v2) * b;
== { _ by (FStar.Tactics.Canon.canon()) }
(u1 * a + u2 * b) - q * (v1 * a + v2 * b);
== { }
u3 - q * v3;
== { lemma_div_mod u3 v3 }
u3 % v3;
};
let u1, v1 = v1, u1 - q * v1 in
let u2, v2 = v2, u2 - q * v2 in
let u3' = u3 in
let v3' = v3 in
let u3, v3 = v3, u3 - q * v3 in
(* proving the implication in the precondition *)
introduce forall d. is_gcd v3' (u3' - q * v3') d ==> is_gcd u3' v3' d with
introduce _ ==> _ with _.
is_gcd_for_euclid u3' v3' q d;
let r = egcd a b u1 u2 u3 v1 v2 v3 in
r
end
let euclid_gcd a b =
if b >= 0 then
egcd a b 1 0 a 0 1 b
else (
introduce forall d. is_gcd a (-b) d ==> is_gcd a b d
with introduce _ ==> _
with _pf.
(is_gcd_minus a b d;
is_gcd_symmetric b a d);
let res = egcd a b 1 0 a 0 (-1) (-b) in
let _, _, d = res in
assert (is_gcd a b d);
res
)
val is_gcd_prime_aux (p:int) (a:pos{a < p}) (d:int) : Lemma
(requires is_prime p /\ d `divides` p /\ d `divides` a)
(ensures d = 1 \/ d = -1)
let is_gcd_prime_aux p a d = ()
val is_gcd_prime (p:int{is_prime p}) (a:pos{a < p}) : Lemma (is_gcd p a 1)
let is_gcd_prime p a =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
Classical.forall_intro (Classical.move_requires (is_gcd_prime_aux p a));
assert (forall x. x `divides` p /\ x `divides` a ==> x = 1 \/ x = -1 /\ x `divides` 1)
let bezout_prime p a =
let r, s, d = euclid_gcd p a in
assert (r * p + s * a = d);
assert (is_gcd p a d);
is_gcd_prime p a;
is_gcd_unique p a 1 d;
assert (d = 1 \/ d = -1);
assert ((-r) * p + (-s) * a == -(r * p + s * a)) by (FStar.Tactics.Canon.canon());
if d = 1 then r, s else -r, -s | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val euclid (n:pos) (a b r s:int) : Lemma
(requires (a * b) % n = 0 /\ r * n + s * a = 1)
(ensures b % n = 0) | [] | FStar.Math.Euclid.euclid | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | n: Prims.pos -> a: Prims.int -> b: Prims.int -> r: Prims.int -> s: Prims.int
-> FStar.Pervasives.Lemma (requires a * b % n = 0 /\ r * n + s * a = 1) (ensures b % n = 0) | {
"end_col": 3,
"end_line": 250,
"start_col": 2,
"start_line": 221
} |
Prims.Pure | val euclid_gcd (a b:int) : Pure (int & int & int)
(requires True)
(ensures fun (r, s, d) -> r * a + s * b = d /\ is_gcd a b d) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let euclid_gcd a b =
if b >= 0 then
egcd a b 1 0 a 0 1 b
else (
introduce forall d. is_gcd a (-b) d ==> is_gcd a b d
with introduce _ ==> _
with _pf.
(is_gcd_minus a b d;
is_gcd_symmetric b a d);
let res = egcd a b 1 0 a 0 (-1) (-b) in
let _, _, d = res in
assert (is_gcd a b d);
res
) | val euclid_gcd (a b:int) : Pure (int & int & int)
(requires True)
(ensures fun (r, s, d) -> r * a + s * b = d /\ is_gcd a b d)
let euclid_gcd a b = | false | null | false | if b >= 0
then egcd a b 1 0 a 0 1 b
else
(introduce forall d . is_gcd a (- b) d ==> is_gcd a b d
with introduce _ ==> _
with _pf. (is_gcd_minus a b d;
is_gcd_symmetric b a d);
let res = egcd a b 1 0 a 0 (- 1) (- b) in
let _, _, d = res in
assert (is_gcd a b d);
res) | {
"checked_file": "FStar.Math.Euclid.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.Tactics.Effect.fsti.checked",
"FStar.Tactics.Canon.fst.checked",
"FStar.Squash.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.Sugar.fsti.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Math.Euclid.fst"
} | [] | [
"Prims.int",
"Prims.op_GreaterThanOrEqual",
"FStar.Math.Euclid.egcd",
"Prims.bool",
"Prims.unit",
"Prims._assert",
"FStar.Math.Euclid.is_gcd",
"FStar.Pervasives.Native.tuple3",
"Prims.op_Minus",
"FStar.Classical.Sugar.forall_intro",
"Prims.l_imp",
"FStar.Classical.Sugar.implies_intro",
"Prims.squash",
"FStar.Math.Euclid.is_gcd_symmetric",
"FStar.Math.Euclid.is_gcd_minus"
] | [] | module FStar.Math.Euclid
open FStar.Mul
open FStar.Math.Lemmas
///
/// Auxiliary lemmas
///
val eq_mult_left (a b:int) : Lemma (requires a = b * a) (ensures a = 0 \/ b = 1)
let eq_mult_left a b = ()
val eq_mult_one (a b:int) : Lemma
(requires a * b = 1)
(ensures (a = 1 /\ b = 1) \/ (a = -1 /\ b = -1))
let eq_mult_one a b = ()
val opp_idempotent (a:int) : Lemma (-(-a) == a)
let opp_idempotent a = ()
val add_sub_l (a b:int) : Lemma (a - b + b = a)
let add_sub_l a b = ()
val add_sub_r (a b:int) : Lemma (a + b - b = a)
let add_sub_r a b = ()
///
/// Divides relation
///
let divides_reflexive a =
Classical.exists_intro (fun q -> a = q * a) 1
let divides_transitive a b c =
eliminate exists q1. b == q1 * a
returns a `divides` c
with _pf.
eliminate exists q2. c == q2 * b
returns _
with _pf2.
introduce exists q. c == q * a
with (q1 * q2)
and ()
let divide_antisym a b =
if a <> 0 then
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q1. b = q1 * a))
(fun q1 ->
Classical.exists_elim (a = b \/ a = -b) (Squash.get_proof (exists q2. a = q2 * b))
(fun q2 ->
assert (b = q1 * a);
assert (a = q2 * b);
assert (b = q1 * (q2 * b));
paren_mul_right q1 q2 b;
eq_mult_left b (q1 * q2);
eq_mult_one q1 q2))
let divides_0 a =
Classical.exists_intro (fun q -> 0 = q * a) 0
let divides_1 a = ()
let divides_minus a b =
Classical.exists_elim (a `divides` (-b))
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> -b = q' * a) (-q))
let divides_opp a b =
Classical.exists_elim ((-a) `divides` b)
(Squash.get_proof (a `divides` b))
(fun q -> Classical.exists_intro (fun q' -> b = q' * (-a)) (-q))
let divides_plus a b d =
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q1. a = q1 * d))
(fun q1 ->
Classical.exists_elim (d `divides` (a + b)) (Squash.get_proof (exists q2. b = q2 * d))
(fun q2 ->
assert (a + b = q1 * d + q2 * d);
distributivity_add_left q1 q2 d;
Classical.exists_intro (fun q -> a + b = q * d) (q1 + q2)))
let divides_sub a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_plus a (-b) d
let divides_mult_right a b d =
Classical.exists_elim (d `divides` (a * b)) (Squash.get_proof (d `divides` b))
(fun q ->
paren_mul_right a q d;
Classical.exists_intro (fun r -> a * b = r * d) (a * q))
///
/// GCD
///
let mod_divides a b =
Classical.exists_intro (fun q -> a = q * b) (a / b)
let divides_mod a b =
Classical.exists_elim (a % b = 0) (Squash.get_proof (b `divides` a))
(fun q -> cancel_mul_div q b)
let is_gcd_unique a b c d =
divide_antisym c d
let is_gcd_reflexive a = ()
let is_gcd_symmetric a b d = ()
let is_gcd_0 a = ()
let is_gcd_1 a = ()
let is_gcd_minus a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
opp_idempotent b
let is_gcd_opp a b d =
Classical.forall_intro_2 (Classical.move_requires_2 divides_minus);
divides_opp d a;
divides_opp d b
let is_gcd_plus a b q d =
add_sub_r b (q * a);
Classical.forall_intro_3 (Classical.move_requires_3 divides_plus);
Classical.forall_intro_3 (Classical.move_requires_3 divides_mult_right);
Classical.forall_intro_3 (Classical.move_requires_3 divides_sub)
///
/// Extended Euclidean algorithm
///
val is_gcd_for_euclid (a b q d:int) : Lemma
(requires is_gcd b (a - q * b) d)
(ensures is_gcd a b d)
let is_gcd_for_euclid a b q d =
add_sub_l a (q * b);
is_gcd_plus b (a - q * b) q d
val egcd (a b u1 u2 u3 v1 v2 v3:int) : Pure (int & int & int)
(requires v3 >= 0 /\
u1 * a + u2 * b = u3 /\
v1 * a + v2 * b = v3 /\
(forall d. is_gcd u3 v3 d ==> is_gcd a b d))
(ensures (fun (u, v, d) -> u * a + v * b = d /\ is_gcd a b d))
(decreases v3)
let rec egcd a b u1 u2 u3 v1 v2 v3 =
if v3 = 0 then
begin
divides_0 u3;
(u1, u2, u3)
end
else
begin
let q = u3 / v3 in
euclidean_division_definition u3 v3;
assert (u3 - q * v3 = (q * v3 + u3 % v3) - q * v3);
assert (q * v3 - q * v3 = 0);
swap_add_plus_minus (q * v3) (u3 % v3) (q * v3);
calc (==) {
(u1 - q * v1) * a + (u2 - q * v2) * b;
== { _ by (FStar.Tactics.Canon.canon()) }
(u1 * a + u2 * b) - q * (v1 * a + v2 * b);
== { }
u3 - q * v3;
== { lemma_div_mod u3 v3 }
u3 % v3;
};
let u1, v1 = v1, u1 - q * v1 in
let u2, v2 = v2, u2 - q * v2 in
let u3' = u3 in
let v3' = v3 in
let u3, v3 = v3, u3 - q * v3 in
(* proving the implication in the precondition *)
introduce forall d. is_gcd v3' (u3' - q * v3') d ==> is_gcd u3' v3' d with
introduce _ ==> _ with _.
is_gcd_for_euclid u3' v3' q d;
let r = egcd a b u1 u2 u3 v1 v2 v3 in
r
end | false | false | FStar.Math.Euclid.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val euclid_gcd (a b:int) : Pure (int & int & int)
(requires True)
(ensures fun (r, s, d) -> r * a + s * b = d /\ is_gcd a b d) | [] | FStar.Math.Euclid.euclid_gcd | {
"file_name": "ulib/FStar.Math.Euclid.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | a: Prims.int -> b: Prims.int -> Prims.Pure ((Prims.int * Prims.int) * Prims.int) | {
"end_col": 3,
"end_line": 197,
"start_col": 2,
"start_line": 185
} |
Prims.Tot | [
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.SHA.SHA_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Arch.BufferFriend",
"short_module": "BF"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "LowStar.ImmutableBuffer",
"short_module": "IB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Wrapper.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Wrapper.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let uint32_i = IB.ibuffer uint32 | let uint32_i = | false | null | false | IB.ibuffer uint32 | {
"checked_file": "Vale.Wrapper.X64.Sha.fsti.checked",
"dependencies": [
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.SHA.SHA_helpers.fsti.checked",
"Vale.Arch.BufferFriend.fsti.checked",
"Spec.SHA2.Constants.fst.checked",
"prims.fst.checked",
"LowStar.ImmutableBuffer.fst.checked",
"LowStar.BufferView.fsti.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked"
],
"interface_file": false,
"source_file": "Vale.Wrapper.X64.Sha.fsti"
} | [
"total"
] | [
"LowStar.ImmutableBuffer.ibuffer",
"Lib.IntTypes.uint32"
] | [] | module Vale.Wrapper.X64.Sha
open Vale.X64.CPU_Features_s
open FStar.HyperStack.ST
module B = LowStar.Buffer
module IB = LowStar.ImmutableBuffer
module BV = LowStar.BufferView
module HS = FStar.HyperStack
module BF = Vale.Arch.BufferFriend
open FStar.Mul
open Vale.SHA.SHA_helpers
open Lib.IntTypes
unfold
let uint32_p = B.buffer uint32 | false | true | Vale.Wrapper.X64.Sha.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val uint32_i : Type0 | [] | Vale.Wrapper.X64.Sha.uint32_i | {
"file_name": "vale/code/arch/x64/interop/Vale.Wrapper.X64.Sha.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Type0 | {
"end_col": 32,
"end_line": 17,
"start_col": 15,
"start_line": 17
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.SHA.SHA_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Arch.BufferFriend",
"short_module": "BF"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "LowStar.ImmutableBuffer",
"short_module": "IB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Wrapper.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Wrapper.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let uint32_p = B.buffer uint32 | let uint32_p = | false | null | false | B.buffer uint32 | {
"checked_file": "Vale.Wrapper.X64.Sha.fsti.checked",
"dependencies": [
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.SHA.SHA_helpers.fsti.checked",
"Vale.Arch.BufferFriend.fsti.checked",
"Spec.SHA2.Constants.fst.checked",
"prims.fst.checked",
"LowStar.ImmutableBuffer.fst.checked",
"LowStar.BufferView.fsti.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked"
],
"interface_file": false,
"source_file": "Vale.Wrapper.X64.Sha.fsti"
} | [
"total"
] | [
"LowStar.Buffer.buffer",
"Lib.IntTypes.uint32"
] | [] | module Vale.Wrapper.X64.Sha
open Vale.X64.CPU_Features_s
open FStar.HyperStack.ST
module B = LowStar.Buffer
module IB = LowStar.ImmutableBuffer
module BV = LowStar.BufferView
module HS = FStar.HyperStack
module BF = Vale.Arch.BufferFriend
open FStar.Mul
open Vale.SHA.SHA_helpers
open Lib.IntTypes | false | true | Vale.Wrapper.X64.Sha.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val uint32_p : Type0 | [] | Vale.Wrapper.X64.Sha.uint32_p | {
"file_name": "vale/code/arch/x64/interop/Vale.Wrapper.X64.Sha.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Type0 | {
"end_col": 30,
"end_line": 15,
"start_col": 15,
"start_line": 15
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.SHA.SHA_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Arch.BufferFriend",
"short_module": "BF"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "LowStar.ImmutableBuffer",
"short_module": "IB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Wrapper.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Wrapper.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let uint64 = uint_t U64 PUB | let uint64 = | false | null | false | uint_t U64 PUB | {
"checked_file": "Vale.Wrapper.X64.Sha.fsti.checked",
"dependencies": [
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.SHA.SHA_helpers.fsti.checked",
"Vale.Arch.BufferFriend.fsti.checked",
"Spec.SHA2.Constants.fst.checked",
"prims.fst.checked",
"LowStar.ImmutableBuffer.fst.checked",
"LowStar.BufferView.fsti.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked"
],
"interface_file": false,
"source_file": "Vale.Wrapper.X64.Sha.fsti"
} | [
"total"
] | [
"Lib.IntTypes.uint_t",
"Lib.IntTypes.U64",
"Lib.IntTypes.PUB"
] | [] | module Vale.Wrapper.X64.Sha
open Vale.X64.CPU_Features_s
open FStar.HyperStack.ST
module B = LowStar.Buffer
module IB = LowStar.ImmutableBuffer
module BV = LowStar.BufferView
module HS = FStar.HyperStack
module BF = Vale.Arch.BufferFriend
open FStar.Mul
open Vale.SHA.SHA_helpers
open Lib.IntTypes
unfold
let uint32_p = B.buffer uint32
unfold
let uint32_i = IB.ibuffer uint32
unfold
let uint8_p = B.buffer uint8 | false | true | Vale.Wrapper.X64.Sha.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val uint64 : Type0 | [] | Vale.Wrapper.X64.Sha.uint64 | {
"file_name": "vale/code/arch/x64/interop/Vale.Wrapper.X64.Sha.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Type0 | {
"end_col": 27,
"end_line": 21,
"start_col": 13,
"start_line": 21
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.SHA.SHA_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Arch.BufferFriend",
"short_module": "BF"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView",
"short_module": "BV"
},
{
"abbrev": true,
"full_module": "LowStar.ImmutableBuffer",
"short_module": "IB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": false,
"full_module": "FStar.HyperStack.ST",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Wrapper.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Wrapper.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let uint8_p = B.buffer uint8 | let uint8_p = | false | null | false | B.buffer uint8 | {
"checked_file": "Vale.Wrapper.X64.Sha.fsti.checked",
"dependencies": [
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.SHA.SHA_helpers.fsti.checked",
"Vale.Arch.BufferFriend.fsti.checked",
"Spec.SHA2.Constants.fst.checked",
"prims.fst.checked",
"LowStar.ImmutableBuffer.fst.checked",
"LowStar.BufferView.fsti.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked"
],
"interface_file": false,
"source_file": "Vale.Wrapper.X64.Sha.fsti"
} | [
"total"
] | [
"LowStar.Buffer.buffer",
"Lib.IntTypes.uint8"
] | [] | module Vale.Wrapper.X64.Sha
open Vale.X64.CPU_Features_s
open FStar.HyperStack.ST
module B = LowStar.Buffer
module IB = LowStar.ImmutableBuffer
module BV = LowStar.BufferView
module HS = FStar.HyperStack
module BF = Vale.Arch.BufferFriend
open FStar.Mul
open Vale.SHA.SHA_helpers
open Lib.IntTypes
unfold
let uint32_p = B.buffer uint32
unfold
let uint32_i = IB.ibuffer uint32 | false | true | Vale.Wrapper.X64.Sha.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val uint8_p : Type0 | [] | Vale.Wrapper.X64.Sha.uint8_p | {
"file_name": "vale/code/arch/x64/interop/Vale.Wrapper.X64.Sha.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | Type0 | {
"end_col": 28,
"end_line": 19,
"start_col": 14,
"start_line": 19
} |
|
Prims.Tot | val two_two_to_four (#a: Type) (x: two (two a)) : four a | [
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let two_two_to_four (#a:Type) (x:two (two a)) : four a =
let (Mktwo (Mktwo x0 x1) (Mktwo x2 x3)) = x in
Mkfour x0 x1 x2 x3 | val two_two_to_four (#a: Type) (x: two (two a)) : four a
let two_two_to_four (#a: Type) (x: two (two a)) : four a = | false | null | false | let Mktwo (Mktwo x0 x1) (Mktwo x2 x3) = x in
Mkfour x0 x1 x2 x3 | {
"checked_file": "Vale.Def.Words.Four_s.fsti.checked",
"dependencies": [
"Vale.Def.Words_s.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.Def.Words.Four_s.fsti"
} | [
"total"
] | [
"Vale.Def.Words_s.two",
"Vale.Def.Words_s.Mkfour",
"Vale.Def.Words_s.four"
] | [] | module Vale.Def.Words.Four_s
open Vale.Def.Words_s
open FStar.Mul
unfold let four_map (#a #b:Type) (f:a -> b) (x:four a) : four b =
let Mkfour x0 x1 x2 x3 = x in
Mkfour (f x0) (f x1) (f x2) (f x3)
unfold let four_map2 (#a #b:Type) (f:a -> a -> b) (x y:four a) : four b =
let Mkfour x0 x1 x2 x3 = x in
let Mkfour y0 y1 y2 y3 = y in
Mkfour (f x0 y0) (f x1 y1) (f x2 y2) (f x3 y3) | false | false | Vale.Def.Words.Four_s.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val two_two_to_four (#a: Type) (x: two (two a)) : four a | [] | Vale.Def.Words.Four_s.two_two_to_four | {
"file_name": "vale/specs/defs/Vale.Def.Words.Four_s.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | x: Vale.Def.Words_s.two (Vale.Def.Words_s.two a) -> Vale.Def.Words_s.four a | {
"end_col": 20,
"end_line": 16,
"start_col": 56,
"start_line": 14
} |
Prims.Tot | val four_to_two_two (#a: Type) (x: four a) : two (two a) | [
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let four_to_two_two (#a:Type) (x:four a) : two (two a) =
let Mkfour x0 x1 x2 x3 = x in
Mktwo (Mktwo x0 x1) (Mktwo x2 x3) | val four_to_two_two (#a: Type) (x: four a) : two (two a)
let four_to_two_two (#a: Type) (x: four a) : two (two a) = | false | null | false | let Mkfour x0 x1 x2 x3 = x in
Mktwo (Mktwo x0 x1) (Mktwo x2 x3) | {
"checked_file": "Vale.Def.Words.Four_s.fsti.checked",
"dependencies": [
"Vale.Def.Words_s.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.Def.Words.Four_s.fsti"
} | [
"total"
] | [
"Vale.Def.Words_s.four",
"Vale.Def.Words_s.Mktwo",
"Vale.Def.Words_s.two"
] | [] | module Vale.Def.Words.Four_s
open Vale.Def.Words_s
open FStar.Mul
unfold let four_map (#a #b:Type) (f:a -> b) (x:four a) : four b =
let Mkfour x0 x1 x2 x3 = x in
Mkfour (f x0) (f x1) (f x2) (f x3)
unfold let four_map2 (#a #b:Type) (f:a -> a -> b) (x y:four a) : four b =
let Mkfour x0 x1 x2 x3 = x in
let Mkfour y0 y1 y2 y3 = y in
Mkfour (f x0 y0) (f x1 y1) (f x2 y2) (f x3 y3)
let two_two_to_four (#a:Type) (x:two (two a)) : four a =
let (Mktwo (Mktwo x0 x1) (Mktwo x2 x3)) = x in
Mkfour x0 x1 x2 x3 | false | false | Vale.Def.Words.Four_s.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val four_to_two_two (#a: Type) (x: four a) : two (two a) | [] | Vale.Def.Words.Four_s.four_to_two_two | {
"file_name": "vale/specs/defs/Vale.Def.Words.Four_s.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | x: Vale.Def.Words_s.four a -> Vale.Def.Words_s.two (Vale.Def.Words_s.two a) | {
"end_col": 35,
"end_line": 20,
"start_col": 56,
"start_line": 18
} |
Prims.Tot | val four_map2 (#a #b: Type) (f: (a -> a -> b)) (x y: four a) : four b | [
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let four_map2 (#a #b:Type) (f:a -> a -> b) (x y:four a) : four b =
let Mkfour x0 x1 x2 x3 = x in
let Mkfour y0 y1 y2 y3 = y in
Mkfour (f x0 y0) (f x1 y1) (f x2 y2) (f x3 y3) | val four_map2 (#a #b: Type) (f: (a -> a -> b)) (x y: four a) : four b
let four_map2 (#a #b: Type) (f: (a -> a -> b)) (x y: four a) : four b = | false | null | false | let Mkfour x0 x1 x2 x3 = x in
let Mkfour y0 y1 y2 y3 = y in
Mkfour (f x0 y0) (f x1 y1) (f x2 y2) (f x3 y3) | {
"checked_file": "Vale.Def.Words.Four_s.fsti.checked",
"dependencies": [
"Vale.Def.Words_s.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.Def.Words.Four_s.fsti"
} | [
"total"
] | [
"Vale.Def.Words_s.four",
"Vale.Def.Words_s.Mkfour"
] | [] | module Vale.Def.Words.Four_s
open Vale.Def.Words_s
open FStar.Mul
unfold let four_map (#a #b:Type) (f:a -> b) (x:four a) : four b =
let Mkfour x0 x1 x2 x3 = x in
Mkfour (f x0) (f x1) (f x2) (f x3) | false | false | Vale.Def.Words.Four_s.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val four_map2 (#a #b: Type) (f: (a -> a -> b)) (x y: four a) : four b | [] | Vale.Def.Words.Four_s.four_map2 | {
"file_name": "vale/specs/defs/Vale.Def.Words.Four_s.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | f: (_: a -> _: a -> b) -> x: Vale.Def.Words_s.four a -> y: Vale.Def.Words_s.four a
-> Vale.Def.Words_s.four b | {
"end_col": 48,
"end_line": 12,
"start_col": 73,
"start_line": 9
} |
Prims.Tot | val four_map (#a #b: Type) (f: (a -> b)) (x: four a) : four b | [
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let four_map (#a #b:Type) (f:a -> b) (x:four a) : four b =
let Mkfour x0 x1 x2 x3 = x in
Mkfour (f x0) (f x1) (f x2) (f x3) | val four_map (#a #b: Type) (f: (a -> b)) (x: four a) : four b
let four_map (#a #b: Type) (f: (a -> b)) (x: four a) : four b = | false | null | false | let Mkfour x0 x1 x2 x3 = x in
Mkfour (f x0) (f x1) (f x2) (f x3) | {
"checked_file": "Vale.Def.Words.Four_s.fsti.checked",
"dependencies": [
"Vale.Def.Words_s.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.Def.Words.Four_s.fsti"
} | [
"total"
] | [
"Vale.Def.Words_s.four",
"Vale.Def.Words_s.Mkfour"
] | [] | module Vale.Def.Words.Four_s
open Vale.Def.Words_s
open FStar.Mul | false | false | Vale.Def.Words.Four_s.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val four_map (#a #b: Type) (f: (a -> b)) (x: four a) : four b | [] | Vale.Def.Words.Four_s.four_map | {
"file_name": "vale/specs/defs/Vale.Def.Words.Four_s.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | f: (_: a -> b) -> x: Vale.Def.Words_s.four a -> Vale.Def.Words_s.four b | {
"end_col": 36,
"end_line": 7,
"start_col": 65,
"start_line": 5
} |
Prims.Tot | val four_reverse (#a: Type) (x: four a) : four a | [
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let four_reverse (#a:Type) (x:four a) : four a =
let Mkfour x0 x1 x2 x3 = x in
Mkfour x3 x2 x1 x0 | val four_reverse (#a: Type) (x: four a) : four a
let four_reverse (#a: Type) (x: four a) : four a = | false | null | false | let Mkfour x0 x1 x2 x3 = x in
Mkfour x3 x2 x1 x0 | {
"checked_file": "Vale.Def.Words.Four_s.fsti.checked",
"dependencies": [
"Vale.Def.Words_s.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.Def.Words.Four_s.fsti"
} | [
"total"
] | [
"Vale.Def.Words_s.four",
"Vale.Def.Words_s.Mkfour"
] | [] | module Vale.Def.Words.Four_s
open Vale.Def.Words_s
open FStar.Mul
unfold let four_map (#a #b:Type) (f:a -> b) (x:four a) : four b =
let Mkfour x0 x1 x2 x3 = x in
Mkfour (f x0) (f x1) (f x2) (f x3)
unfold let four_map2 (#a #b:Type) (f:a -> a -> b) (x y:four a) : four b =
let Mkfour x0 x1 x2 x3 = x in
let Mkfour y0 y1 y2 y3 = y in
Mkfour (f x0 y0) (f x1 y1) (f x2 y2) (f x3 y3)
let two_two_to_four (#a:Type) (x:two (two a)) : four a =
let (Mktwo (Mktwo x0 x1) (Mktwo x2 x3)) = x in
Mkfour x0 x1 x2 x3
let four_to_two_two (#a:Type) (x:four a) : two (two a) =
let Mkfour x0 x1 x2 x3 = x in
Mktwo (Mktwo x0 x1) (Mktwo x2 x3)
unfold
let nat_to_four_unfold (size:nat) (n:natN (pow2 (4 * size))) : four (natN (pow2 size)) =
let n1 = pow2_norm size in
let n2 = pow2_norm (2 * size) in
let n3 = pow2_norm (3 * size) in
let n4 = pow2_norm (4 * size) in
Mkfour (n % n1) ((n / n1) % n1) ((n / n2) % n1) ((n / n3) % n1)
[@"opaque_to_smt"]
let nat_to_four (size:nat) (n:natN (pow2 (4 * size))) : four (natN (pow2 size)) =
nat_to_four_unfold size n
unfold
let four_to_nat_unfold (size:nat) (x:four (natN (pow2 size))) : natN (pow2 (4 * size)) =
let n1 = pow2_norm size in
let n2 = pow2_norm (2 * size) in
let n3 = pow2_norm (3 * size) in
let n4 = pow2_norm (4 * size) in
let Mkfour x0 x1 x2 x3 = x in
int_to_natN n4 (x0 + x1 * n1 + x2 * n2 + x3 * n3)
[@"opaque_to_smt"]
let four_to_nat (size:nat) (x:four (natN (pow2 size))) : natN (pow2 (4 * size)) =
four_to_nat_unfold size x
let four_select (#a:Type) (x:four a) (selector:nat2) : a =
match selector with
| 0 -> x.lo0
| 1 -> x.lo1
| 2 -> x.hi2
| 3 -> x.hi3
let four_insert (#a:Type) (x:four a) (y:a) (selector:nat2) : four a =
match selector with
| 0 -> Mkfour y x.lo1 x.hi2 x.hi3
| 1 -> Mkfour x.lo0 y x.hi2 x.hi3
| 2 -> Mkfour x.lo0 x.lo1 y x.hi3
| 3 -> Mkfour x.lo0 x.lo1 x.hi2 y | false | false | Vale.Def.Words.Four_s.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val four_reverse (#a: Type) (x: four a) : four a | [] | Vale.Def.Words.Four_s.four_reverse | {
"file_name": "vale/specs/defs/Vale.Def.Words.Four_s.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | x: Vale.Def.Words_s.four a -> Vale.Def.Words_s.four a | {
"end_col": 20,
"end_line": 63,
"start_col": 48,
"start_line": 61
} |
Prims.Tot | val four_select (#a: Type) (x: four a) (selector: nat2) : a | [
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let four_select (#a:Type) (x:four a) (selector:nat2) : a =
match selector with
| 0 -> x.lo0
| 1 -> x.lo1
| 2 -> x.hi2
| 3 -> x.hi3 | val four_select (#a: Type) (x: four a) (selector: nat2) : a
let four_select (#a: Type) (x: four a) (selector: nat2) : a = | false | null | false | match selector with
| 0 -> x.lo0
| 1 -> x.lo1
| 2 -> x.hi2
| 3 -> x.hi3 | {
"checked_file": "Vale.Def.Words.Four_s.fsti.checked",
"dependencies": [
"Vale.Def.Words_s.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.Def.Words.Four_s.fsti"
} | [
"total"
] | [
"Vale.Def.Words_s.four",
"Vale.Def.Words_s.nat2",
"Vale.Def.Words_s.__proj__Mkfour__item__lo0",
"Vale.Def.Words_s.__proj__Mkfour__item__lo1",
"Vale.Def.Words_s.__proj__Mkfour__item__hi2",
"Vale.Def.Words_s.__proj__Mkfour__item__hi3"
] | [] | module Vale.Def.Words.Four_s
open Vale.Def.Words_s
open FStar.Mul
unfold let four_map (#a #b:Type) (f:a -> b) (x:four a) : four b =
let Mkfour x0 x1 x2 x3 = x in
Mkfour (f x0) (f x1) (f x2) (f x3)
unfold let four_map2 (#a #b:Type) (f:a -> a -> b) (x y:four a) : four b =
let Mkfour x0 x1 x2 x3 = x in
let Mkfour y0 y1 y2 y3 = y in
Mkfour (f x0 y0) (f x1 y1) (f x2 y2) (f x3 y3)
let two_two_to_four (#a:Type) (x:two (two a)) : four a =
let (Mktwo (Mktwo x0 x1) (Mktwo x2 x3)) = x in
Mkfour x0 x1 x2 x3
let four_to_two_two (#a:Type) (x:four a) : two (two a) =
let Mkfour x0 x1 x2 x3 = x in
Mktwo (Mktwo x0 x1) (Mktwo x2 x3)
unfold
let nat_to_four_unfold (size:nat) (n:natN (pow2 (4 * size))) : four (natN (pow2 size)) =
let n1 = pow2_norm size in
let n2 = pow2_norm (2 * size) in
let n3 = pow2_norm (3 * size) in
let n4 = pow2_norm (4 * size) in
Mkfour (n % n1) ((n / n1) % n1) ((n / n2) % n1) ((n / n3) % n1)
[@"opaque_to_smt"]
let nat_to_four (size:nat) (n:natN (pow2 (4 * size))) : four (natN (pow2 size)) =
nat_to_four_unfold size n
unfold
let four_to_nat_unfold (size:nat) (x:four (natN (pow2 size))) : natN (pow2 (4 * size)) =
let n1 = pow2_norm size in
let n2 = pow2_norm (2 * size) in
let n3 = pow2_norm (3 * size) in
let n4 = pow2_norm (4 * size) in
let Mkfour x0 x1 x2 x3 = x in
int_to_natN n4 (x0 + x1 * n1 + x2 * n2 + x3 * n3)
[@"opaque_to_smt"]
let four_to_nat (size:nat) (x:four (natN (pow2 size))) : natN (pow2 (4 * size)) =
four_to_nat_unfold size x | false | false | Vale.Def.Words.Four_s.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val four_select (#a: Type) (x: four a) (selector: nat2) : a | [] | Vale.Def.Words.Four_s.four_select | {
"file_name": "vale/specs/defs/Vale.Def.Words.Four_s.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | x: Vale.Def.Words_s.four a -> selector: Vale.Def.Words_s.nat2 -> a | {
"end_col": 14,
"end_line": 52,
"start_col": 2,
"start_line": 48
} |
Prims.Tot | val four_insert (#a: Type) (x: four a) (y: a) (selector: nat2) : four a | [
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let four_insert (#a:Type) (x:four a) (y:a) (selector:nat2) : four a =
match selector with
| 0 -> Mkfour y x.lo1 x.hi2 x.hi3
| 1 -> Mkfour x.lo0 y x.hi2 x.hi3
| 2 -> Mkfour x.lo0 x.lo1 y x.hi3
| 3 -> Mkfour x.lo0 x.lo1 x.hi2 y | val four_insert (#a: Type) (x: four a) (y: a) (selector: nat2) : four a
let four_insert (#a: Type) (x: four a) (y: a) (selector: nat2) : four a = | false | null | false | match selector with
| 0 -> Mkfour y x.lo1 x.hi2 x.hi3
| 1 -> Mkfour x.lo0 y x.hi2 x.hi3
| 2 -> Mkfour x.lo0 x.lo1 y x.hi3
| 3 -> Mkfour x.lo0 x.lo1 x.hi2 y | {
"checked_file": "Vale.Def.Words.Four_s.fsti.checked",
"dependencies": [
"Vale.Def.Words_s.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.Def.Words.Four_s.fsti"
} | [
"total"
] | [
"Vale.Def.Words_s.four",
"Vale.Def.Words_s.nat2",
"Vale.Def.Words_s.Mkfour",
"Vale.Def.Words_s.__proj__Mkfour__item__lo1",
"Vale.Def.Words_s.__proj__Mkfour__item__hi2",
"Vale.Def.Words_s.__proj__Mkfour__item__hi3",
"Vale.Def.Words_s.__proj__Mkfour__item__lo0"
] | [] | module Vale.Def.Words.Four_s
open Vale.Def.Words_s
open FStar.Mul
unfold let four_map (#a #b:Type) (f:a -> b) (x:four a) : four b =
let Mkfour x0 x1 x2 x3 = x in
Mkfour (f x0) (f x1) (f x2) (f x3)
unfold let four_map2 (#a #b:Type) (f:a -> a -> b) (x y:four a) : four b =
let Mkfour x0 x1 x2 x3 = x in
let Mkfour y0 y1 y2 y3 = y in
Mkfour (f x0 y0) (f x1 y1) (f x2 y2) (f x3 y3)
let two_two_to_four (#a:Type) (x:two (two a)) : four a =
let (Mktwo (Mktwo x0 x1) (Mktwo x2 x3)) = x in
Mkfour x0 x1 x2 x3
let four_to_two_two (#a:Type) (x:four a) : two (two a) =
let Mkfour x0 x1 x2 x3 = x in
Mktwo (Mktwo x0 x1) (Mktwo x2 x3)
unfold
let nat_to_four_unfold (size:nat) (n:natN (pow2 (4 * size))) : four (natN (pow2 size)) =
let n1 = pow2_norm size in
let n2 = pow2_norm (2 * size) in
let n3 = pow2_norm (3 * size) in
let n4 = pow2_norm (4 * size) in
Mkfour (n % n1) ((n / n1) % n1) ((n / n2) % n1) ((n / n3) % n1)
[@"opaque_to_smt"]
let nat_to_four (size:nat) (n:natN (pow2 (4 * size))) : four (natN (pow2 size)) =
nat_to_four_unfold size n
unfold
let four_to_nat_unfold (size:nat) (x:four (natN (pow2 size))) : natN (pow2 (4 * size)) =
let n1 = pow2_norm size in
let n2 = pow2_norm (2 * size) in
let n3 = pow2_norm (3 * size) in
let n4 = pow2_norm (4 * size) in
let Mkfour x0 x1 x2 x3 = x in
int_to_natN n4 (x0 + x1 * n1 + x2 * n2 + x3 * n3)
[@"opaque_to_smt"]
let four_to_nat (size:nat) (x:four (natN (pow2 size))) : natN (pow2 (4 * size)) =
four_to_nat_unfold size x
let four_select (#a:Type) (x:four a) (selector:nat2) : a =
match selector with
| 0 -> x.lo0
| 1 -> x.lo1
| 2 -> x.hi2
| 3 -> x.hi3 | false | false | Vale.Def.Words.Four_s.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val four_insert (#a: Type) (x: four a) (y: a) (selector: nat2) : four a | [] | Vale.Def.Words.Four_s.four_insert | {
"file_name": "vale/specs/defs/Vale.Def.Words.Four_s.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | x: Vale.Def.Words_s.four a -> y: a -> selector: Vale.Def.Words_s.nat2 -> Vale.Def.Words_s.four a | {
"end_col": 35,
"end_line": 59,
"start_col": 2,
"start_line": 55
} |
Prims.Tot | val four_to_nat (size: nat) (x: four (natN (pow2 size))) : natN (pow2 (4 * size)) | [
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let four_to_nat (size:nat) (x:four (natN (pow2 size))) : natN (pow2 (4 * size)) =
four_to_nat_unfold size x | val four_to_nat (size: nat) (x: four (natN (pow2 size))) : natN (pow2 (4 * size))
let four_to_nat (size: nat) (x: four (natN (pow2 size))) : natN (pow2 (4 * size)) = | false | null | false | four_to_nat_unfold size x | {
"checked_file": "Vale.Def.Words.Four_s.fsti.checked",
"dependencies": [
"Vale.Def.Words_s.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.Def.Words.Four_s.fsti"
} | [
"total"
] | [
"Prims.nat",
"Vale.Def.Words_s.four",
"Vale.Def.Words_s.natN",
"Prims.pow2",
"Vale.Def.Words.Four_s.four_to_nat_unfold",
"FStar.Mul.op_Star"
] | [] | module Vale.Def.Words.Four_s
open Vale.Def.Words_s
open FStar.Mul
unfold let four_map (#a #b:Type) (f:a -> b) (x:four a) : four b =
let Mkfour x0 x1 x2 x3 = x in
Mkfour (f x0) (f x1) (f x2) (f x3)
unfold let four_map2 (#a #b:Type) (f:a -> a -> b) (x y:four a) : four b =
let Mkfour x0 x1 x2 x3 = x in
let Mkfour y0 y1 y2 y3 = y in
Mkfour (f x0 y0) (f x1 y1) (f x2 y2) (f x3 y3)
let two_two_to_four (#a:Type) (x:two (two a)) : four a =
let (Mktwo (Mktwo x0 x1) (Mktwo x2 x3)) = x in
Mkfour x0 x1 x2 x3
let four_to_two_two (#a:Type) (x:four a) : two (two a) =
let Mkfour x0 x1 x2 x3 = x in
Mktwo (Mktwo x0 x1) (Mktwo x2 x3)
unfold
let nat_to_four_unfold (size:nat) (n:natN (pow2 (4 * size))) : four (natN (pow2 size)) =
let n1 = pow2_norm size in
let n2 = pow2_norm (2 * size) in
let n3 = pow2_norm (3 * size) in
let n4 = pow2_norm (4 * size) in
Mkfour (n % n1) ((n / n1) % n1) ((n / n2) % n1) ((n / n3) % n1)
[@"opaque_to_smt"]
let nat_to_four (size:nat) (n:natN (pow2 (4 * size))) : four (natN (pow2 size)) =
nat_to_four_unfold size n
unfold
let four_to_nat_unfold (size:nat) (x:four (natN (pow2 size))) : natN (pow2 (4 * size)) =
let n1 = pow2_norm size in
let n2 = pow2_norm (2 * size) in
let n3 = pow2_norm (3 * size) in
let n4 = pow2_norm (4 * size) in
let Mkfour x0 x1 x2 x3 = x in
int_to_natN n4 (x0 + x1 * n1 + x2 * n2 + x3 * n3)
[@"opaque_to_smt"] | false | false | Vale.Def.Words.Four_s.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val four_to_nat (size: nat) (x: four (natN (pow2 size))) : natN (pow2 (4 * size)) | [] | Vale.Def.Words.Four_s.four_to_nat | {
"file_name": "vale/specs/defs/Vale.Def.Words.Four_s.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | size: Prims.nat -> x: Vale.Def.Words_s.four (Vale.Def.Words_s.natN (Prims.pow2 size))
-> Vale.Def.Words_s.natN (Prims.pow2 (4 * size)) | {
"end_col": 27,
"end_line": 45,
"start_col": 2,
"start_line": 45
} |
Prims.Tot | val nat_to_four (size: nat) (n: natN (pow2 (4 * size))) : four (natN (pow2 size)) | [
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let nat_to_four (size:nat) (n:natN (pow2 (4 * size))) : four (natN (pow2 size)) =
nat_to_four_unfold size n | val nat_to_four (size: nat) (n: natN (pow2 (4 * size))) : four (natN (pow2 size))
let nat_to_four (size: nat) (n: natN (pow2 (4 * size))) : four (natN (pow2 size)) = | false | null | false | nat_to_four_unfold size n | {
"checked_file": "Vale.Def.Words.Four_s.fsti.checked",
"dependencies": [
"Vale.Def.Words_s.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.Def.Words.Four_s.fsti"
} | [
"total"
] | [
"Prims.nat",
"Vale.Def.Words_s.natN",
"Prims.pow2",
"FStar.Mul.op_Star",
"Vale.Def.Words.Four_s.nat_to_four_unfold",
"Vale.Def.Words_s.four"
] | [] | module Vale.Def.Words.Four_s
open Vale.Def.Words_s
open FStar.Mul
unfold let four_map (#a #b:Type) (f:a -> b) (x:four a) : four b =
let Mkfour x0 x1 x2 x3 = x in
Mkfour (f x0) (f x1) (f x2) (f x3)
unfold let four_map2 (#a #b:Type) (f:a -> a -> b) (x y:four a) : four b =
let Mkfour x0 x1 x2 x3 = x in
let Mkfour y0 y1 y2 y3 = y in
Mkfour (f x0 y0) (f x1 y1) (f x2 y2) (f x3 y3)
let two_two_to_four (#a:Type) (x:two (two a)) : four a =
let (Mktwo (Mktwo x0 x1) (Mktwo x2 x3)) = x in
Mkfour x0 x1 x2 x3
let four_to_two_two (#a:Type) (x:four a) : two (two a) =
let Mkfour x0 x1 x2 x3 = x in
Mktwo (Mktwo x0 x1) (Mktwo x2 x3)
unfold
let nat_to_four_unfold (size:nat) (n:natN (pow2 (4 * size))) : four (natN (pow2 size)) =
let n1 = pow2_norm size in
let n2 = pow2_norm (2 * size) in
let n3 = pow2_norm (3 * size) in
let n4 = pow2_norm (4 * size) in
Mkfour (n % n1) ((n / n1) % n1) ((n / n2) % n1) ((n / n3) % n1)
[@"opaque_to_smt"] | false | false | Vale.Def.Words.Four_s.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val nat_to_four (size: nat) (n: natN (pow2 (4 * size))) : four (natN (pow2 size)) | [] | Vale.Def.Words.Four_s.nat_to_four | {
"file_name": "vale/specs/defs/Vale.Def.Words.Four_s.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | size: Prims.nat -> n: Vale.Def.Words_s.natN (Prims.pow2 (4 * size))
-> Vale.Def.Words_s.four (Vale.Def.Words_s.natN (Prims.pow2 size)) | {
"end_col": 27,
"end_line": 32,
"start_col": 2,
"start_line": 32
} |
Prims.Tot | val four_to_nat_unfold (size: nat) (x: four (natN (pow2 size))) : natN (pow2 (4 * size)) | [
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let four_to_nat_unfold (size:nat) (x:four (natN (pow2 size))) : natN (pow2 (4 * size)) =
let n1 = pow2_norm size in
let n2 = pow2_norm (2 * size) in
let n3 = pow2_norm (3 * size) in
let n4 = pow2_norm (4 * size) in
let Mkfour x0 x1 x2 x3 = x in
int_to_natN n4 (x0 + x1 * n1 + x2 * n2 + x3 * n3) | val four_to_nat_unfold (size: nat) (x: four (natN (pow2 size))) : natN (pow2 (4 * size))
let four_to_nat_unfold (size: nat) (x: four (natN (pow2 size))) : natN (pow2 (4 * size)) = | false | null | false | let n1 = pow2_norm size in
let n2 = pow2_norm (2 * size) in
let n3 = pow2_norm (3 * size) in
let n4 = pow2_norm (4 * size) in
let Mkfour x0 x1 x2 x3 = x in
int_to_natN n4 (x0 + x1 * n1 + x2 * n2 + x3 * n3) | {
"checked_file": "Vale.Def.Words.Four_s.fsti.checked",
"dependencies": [
"Vale.Def.Words_s.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.Def.Words.Four_s.fsti"
} | [
"total"
] | [
"Prims.nat",
"Vale.Def.Words_s.four",
"Vale.Def.Words_s.natN",
"Prims.pow2",
"Vale.Def.Words_s.int_to_natN",
"Prims.op_Addition",
"FStar.Mul.op_Star",
"Prims.pos",
"Vale.Def.Words_s.pow2_norm"
] | [] | module Vale.Def.Words.Four_s
open Vale.Def.Words_s
open FStar.Mul
unfold let four_map (#a #b:Type) (f:a -> b) (x:four a) : four b =
let Mkfour x0 x1 x2 x3 = x in
Mkfour (f x0) (f x1) (f x2) (f x3)
unfold let four_map2 (#a #b:Type) (f:a -> a -> b) (x y:four a) : four b =
let Mkfour x0 x1 x2 x3 = x in
let Mkfour y0 y1 y2 y3 = y in
Mkfour (f x0 y0) (f x1 y1) (f x2 y2) (f x3 y3)
let two_two_to_four (#a:Type) (x:two (two a)) : four a =
let (Mktwo (Mktwo x0 x1) (Mktwo x2 x3)) = x in
Mkfour x0 x1 x2 x3
let four_to_two_two (#a:Type) (x:four a) : two (two a) =
let Mkfour x0 x1 x2 x3 = x in
Mktwo (Mktwo x0 x1) (Mktwo x2 x3)
unfold
let nat_to_four_unfold (size:nat) (n:natN (pow2 (4 * size))) : four (natN (pow2 size)) =
let n1 = pow2_norm size in
let n2 = pow2_norm (2 * size) in
let n3 = pow2_norm (3 * size) in
let n4 = pow2_norm (4 * size) in
Mkfour (n % n1) ((n / n1) % n1) ((n / n2) % n1) ((n / n3) % n1)
[@"opaque_to_smt"]
let nat_to_four (size:nat) (n:natN (pow2 (4 * size))) : four (natN (pow2 size)) =
nat_to_four_unfold size n | false | false | Vale.Def.Words.Four_s.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val four_to_nat_unfold (size: nat) (x: four (natN (pow2 size))) : natN (pow2 (4 * size)) | [] | Vale.Def.Words.Four_s.four_to_nat_unfold | {
"file_name": "vale/specs/defs/Vale.Def.Words.Four_s.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | size: Prims.nat -> x: Vale.Def.Words_s.four (Vale.Def.Words_s.natN (Prims.pow2 size))
-> Vale.Def.Words_s.natN (Prims.pow2 (4 * size)) | {
"end_col": 51,
"end_line": 41,
"start_col": 88,
"start_line": 35
} |
Prims.Tot | val nat_to_four_unfold (size: nat) (n: natN (pow2 (4 * size))) : four (natN (pow2 size)) | [
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let nat_to_four_unfold (size:nat) (n:natN (pow2 (4 * size))) : four (natN (pow2 size)) =
let n1 = pow2_norm size in
let n2 = pow2_norm (2 * size) in
let n3 = pow2_norm (3 * size) in
let n4 = pow2_norm (4 * size) in
Mkfour (n % n1) ((n / n1) % n1) ((n / n2) % n1) ((n / n3) % n1) | val nat_to_four_unfold (size: nat) (n: natN (pow2 (4 * size))) : four (natN (pow2 size))
let nat_to_four_unfold (size: nat) (n: natN (pow2 (4 * size))) : four (natN (pow2 size)) = | false | null | false | let n1 = pow2_norm size in
let n2 = pow2_norm (2 * size) in
let n3 = pow2_norm (3 * size) in
let n4 = pow2_norm (4 * size) in
Mkfour (n % n1) ((n / n1) % n1) ((n / n2) % n1) ((n / n3) % n1) | {
"checked_file": "Vale.Def.Words.Four_s.fsti.checked",
"dependencies": [
"Vale.Def.Words_s.fsti.checked",
"prims.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.Def.Words.Four_s.fsti"
} | [
"total"
] | [
"Prims.nat",
"Vale.Def.Words_s.natN",
"Prims.pow2",
"FStar.Mul.op_Star",
"Vale.Def.Words_s.Mkfour",
"Prims.op_Modulus",
"Prims.op_Division",
"Prims.pos",
"Vale.Def.Words_s.pow2_norm",
"Vale.Def.Words_s.four"
] | [] | module Vale.Def.Words.Four_s
open Vale.Def.Words_s
open FStar.Mul
unfold let four_map (#a #b:Type) (f:a -> b) (x:four a) : four b =
let Mkfour x0 x1 x2 x3 = x in
Mkfour (f x0) (f x1) (f x2) (f x3)
unfold let four_map2 (#a #b:Type) (f:a -> a -> b) (x y:four a) : four b =
let Mkfour x0 x1 x2 x3 = x in
let Mkfour y0 y1 y2 y3 = y in
Mkfour (f x0 y0) (f x1 y1) (f x2 y2) (f x3 y3)
let two_two_to_four (#a:Type) (x:two (two a)) : four a =
let (Mktwo (Mktwo x0 x1) (Mktwo x2 x3)) = x in
Mkfour x0 x1 x2 x3
let four_to_two_two (#a:Type) (x:four a) : two (two a) =
let Mkfour x0 x1 x2 x3 = x in
Mktwo (Mktwo x0 x1) (Mktwo x2 x3) | false | false | Vale.Def.Words.Four_s.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val nat_to_four_unfold (size: nat) (n: natN (pow2 (4 * size))) : four (natN (pow2 size)) | [] | Vale.Def.Words.Four_s.nat_to_four_unfold | {
"file_name": "vale/specs/defs/Vale.Def.Words.Four_s.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | size: Prims.nat -> n: Vale.Def.Words_s.natN (Prims.pow2 (4 * size))
-> Vale.Def.Words_s.four (Vale.Def.Words_s.natN (Prims.pow2 size)) | {
"end_col": 65,
"end_line": 28,
"start_col": 88,
"start_line": 23
} |
Prims.Tot | [
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let bytes_to_quad_size (num_bytes:nat) =
((num_bytes + 15) / 16) | let bytes_to_quad_size (num_bytes: nat) = | false | null | false | ((num_bytes + 15) / 16) | {
"checked_file": "Vale.AES.GCM_helpers.fsti.checked",
"dependencies": [
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked"
],
"interface_file": false,
"source_file": "Vale.AES.GCM_helpers.fsti"
} | [
"total"
] | [
"Prims.nat",
"Prims.op_Division",
"Prims.op_Addition",
"Prims.int"
] | [] | module Vale.AES.GCM_helpers
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open FStar.Math.Lemmas
open Vale.Lib.Seqs | false | true | Vale.AES.GCM_helpers.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val bytes_to_quad_size : num_bytes: Prims.nat -> Prims.int | [] | Vale.AES.GCM_helpers.bytes_to_quad_size | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCM_helpers.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | num_bytes: Prims.nat -> Prims.int | {
"end_col": 25,
"end_line": 16,
"start_col": 2,
"start_line": 16
} |
|
FStar.Tactics.Effect.Tac | [
{
"abbrev": false,
"full_module": "Vale.Curve25519.Fast_defs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Tactics.CanonCommSemiring",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Tactics",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Curve25519",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let int_canon = fun _ -> norm [delta; zeta; iota]; int_semiring () | let int_canon = | true | null | false | fun _ ->
norm [delta; zeta; iota];
int_semiring () | {
"checked_file": "Vale.Curve25519.FastMul_helpers.fsti.checked",
"dependencies": [
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Curve25519.Fast_defs.fst.checked",
"prims.fst.checked",
"FStar.Tactics.CanonCommSemiring.fst.checked",
"FStar.Tactics.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked"
],
"interface_file": false,
"source_file": "Vale.Curve25519.FastMul_helpers.fsti"
} | [] | [
"FStar.Tactics.CanonCommSemiring.int_semiring",
"Prims.unit",
"FStar.Tactics.V1.Builtins.norm",
"Prims.Cons",
"FStar.Pervasives.norm_step",
"FStar.Pervasives.delta",
"FStar.Pervasives.zeta",
"FStar.Pervasives.iota",
"Prims.Nil"
] | [] | module Vale.Curve25519.FastMul_helpers
open Vale.Def.Words_s
open Vale.Def.Types_s
open FStar.Mul
open FStar.Tactics
open FStar.Tactics.CanonCommSemiring
open Vale.Curve25519.Fast_defs | false | false | Vale.Curve25519.FastMul_helpers.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val int_canon : _: _ -> FStar.Tactics.Effect.Tac Prims.unit | [] | Vale.Curve25519.FastMul_helpers.int_canon | {
"file_name": "vale/code/crypto/ecc/curve25519/Vale.Curve25519.FastMul_helpers.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | _: _ -> FStar.Tactics.Effect.Tac Prims.unit | {
"end_col": 66,
"end_line": 11,
"start_col": 16,
"start_line": 11
} |
|
Prims.Tot | val merkle_tree_conditions
(#hsz: Ghost.erased hash_size_t)
(offset: uint64_t)
(i j: uint32_t)
(hs: hash_vv hsz)
(rhs_ok: bool)
(rhs: hash_vec #hsz)
(mroot: hash #hsz)
: Tot bool | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let merkle_tree_conditions (#hsz:Ghost.erased hash_size_t) (offset:uint64_t) (i j:uint32_t) (hs:hash_vv hsz) (rhs_ok:bool) (rhs:hash_vec #hsz) (mroot:hash #hsz): Tot bool =
j >= i && add64_fits offset j &&
V.size_of hs = merkle_tree_size_lg &&
V.size_of rhs = merkle_tree_size_lg | val merkle_tree_conditions
(#hsz: Ghost.erased hash_size_t)
(offset: uint64_t)
(i j: uint32_t)
(hs: hash_vv hsz)
(rhs_ok: bool)
(rhs: hash_vec #hsz)
(mroot: hash #hsz)
: Tot bool
let merkle_tree_conditions
(#hsz: Ghost.erased hash_size_t)
(offset: uint64_t)
(i j: uint32_t)
(hs: hash_vv hsz)
(rhs_ok: bool)
(rhs: hash_vec #hsz)
(mroot: hash #hsz)
: Tot bool = | false | null | false | j >= i && add64_fits offset j && V.size_of hs = merkle_tree_size_lg &&
V.size_of rhs = merkle_tree_size_lg | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"total"
] | [
"FStar.Ghost.erased",
"MerkleTree.Low.Datastructures.hash_size_t",
"EverCrypt.Helpers.uint64_t",
"LowStar.Vector.uint32_t",
"MerkleTree.Low.Datastructures.hash_vv",
"FStar.Ghost.reveal",
"Prims.bool",
"MerkleTree.Low.Datastructures.hash_vec",
"MerkleTree.Low.Datastructures.hash",
"Prims.op_AmpAmp",
"FStar.Integers.op_Greater_Equals",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"MerkleTree.Low.add64_fits",
"Prims.op_Equality",
"LowStar.Vector.size_of",
"MerkleTree.Low.merkle_tree_size_lg"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction
let uint32_max = 4294967295UL
let uint64_max = 18446744073709551615UL
let offset_range_limit = uint32_max
type offset_t = uint64_t
inline_for_extraction noextract unfold let u32_64 = Int.Cast.uint32_to_uint64
inline_for_extraction noextract unfold let u64_32 = Int.Cast.uint64_to_uint32
private inline_for_extraction
let offsets_connect (x:offset_t) (y:offset_t): Tot bool = y >= x && (y - x) <= offset_range_limit
private inline_for_extraction
let split_offset (tree:offset_t) (index:offset_t{offsets_connect tree index}): Tot index_t =
[@inline_let] let diff = U64.sub_mod index tree in
assert (diff <= offset_range_limit);
Int.Cast.uint64_to_uint32 diff
private inline_for_extraction
let add64_fits (x:offset_t) (i:index_t): Tot bool = uint64_max - x >= (u32_64 i)
private inline_for_extraction
let join_offset (tree:offset_t) (i:index_t{add64_fits tree i}): Tot (r:offset_t{offsets_connect tree r}) =
U64.add tree (u32_64 i)
inline_for_extraction val merkle_tree_size_lg: uint32_t
let merkle_tree_size_lg = 32ul
// A Merkle tree `MT i j hs rhs_ok rhs` stores all necessary hashes to generate
// a Merkle path for each element from the index `i` to `j-1`.
// - Parameters
// `hs`: a 2-dim store for hashes, where `hs[0]` contains leaf hash values.
// `rhs_ok`: to check the rightmost hashes are up-to-date
// `rhs`: a store for "rightmost" hashes, manipulated only when required to
// calculate some merkle paths that need the rightmost hashes
// as a part of them.
// `mroot`: during the construction of `rhs` we can also calculate the Merkle
// root of the tree. If `rhs_ok` is true then it has the up-to-date
// root value.
noeq type merkle_tree =
| MT: hash_size:hash_size_t ->
offset:offset_t ->
i:index_t -> j:index_t{i <= j /\ add64_fits offset j} ->
hs:hash_vv hash_size {V.size_of hs = merkle_tree_size_lg} ->
rhs_ok:bool ->
rhs:hash_vec #hash_size {V.size_of rhs = merkle_tree_size_lg} ->
mroot:hash #hash_size ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
merkle_tree
type mt_p = B.pointer merkle_tree
type const_mt_p = const_pointer merkle_tree
inline_for_extraction | false | false | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val merkle_tree_conditions
(#hsz: Ghost.erased hash_size_t)
(offset: uint64_t)
(i j: uint32_t)
(hs: hash_vv hsz)
(rhs_ok: bool)
(rhs: hash_vec #hsz)
(mroot: hash #hsz)
: Tot bool | [] | MerkleTree.Low.merkle_tree_conditions | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} |
offset: EverCrypt.Helpers.uint64_t ->
i: LowStar.Vector.uint32_t ->
j: LowStar.Vector.uint32_t ->
hs: MerkleTree.Low.Datastructures.hash_vv (FStar.Ghost.reveal hsz) ->
rhs_ok: Prims.bool ->
rhs: MerkleTree.Low.Datastructures.hash_vec ->
mroot: MerkleTree.Low.Datastructures.hash
-> Prims.bool | {
"end_col": 37,
"end_line": 110,
"start_col": 2,
"start_line": 108
} |
Prims.Tot | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let uint64_max = 18446744073709551615UL | let uint64_max = | false | null | false | 18446744073709551615uL | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"total"
] | [
"FStar.UInt64.__uint_to_t"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction | false | true | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val uint64_max : FStar.UInt64.t | [] | MerkleTree.Low.uint64_max | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} | FStar.UInt64.t | {
"end_col": 39,
"end_line": 54,
"start_col": 17,
"start_line": 54
} |
|
Prims.GTot | val mt_loc: mt_p -> GTot loc | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mt_loc mt = B.loc_all_regions_from false (B.frameOf mt) | val mt_loc: mt_p -> GTot loc
let mt_loc mt = | false | null | false | B.loc_all_regions_from false (B.frameOf mt) | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"sometrivial"
] | [
"MerkleTree.Low.mt_p",
"LowStar.Monotonic.Buffer.loc_all_regions_from",
"LowStar.Monotonic.Buffer.frameOf",
"MerkleTree.Low.merkle_tree",
"LowStar.Buffer.trivial_preorder",
"LowStar.Monotonic.Buffer.loc"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction
let uint32_max = 4294967295UL
let uint64_max = 18446744073709551615UL
let offset_range_limit = uint32_max
type offset_t = uint64_t
inline_for_extraction noextract unfold let u32_64 = Int.Cast.uint32_to_uint64
inline_for_extraction noextract unfold let u64_32 = Int.Cast.uint64_to_uint32
private inline_for_extraction
let offsets_connect (x:offset_t) (y:offset_t): Tot bool = y >= x && (y - x) <= offset_range_limit
private inline_for_extraction
let split_offset (tree:offset_t) (index:offset_t{offsets_connect tree index}): Tot index_t =
[@inline_let] let diff = U64.sub_mod index tree in
assert (diff <= offset_range_limit);
Int.Cast.uint64_to_uint32 diff
private inline_for_extraction
let add64_fits (x:offset_t) (i:index_t): Tot bool = uint64_max - x >= (u32_64 i)
private inline_for_extraction
let join_offset (tree:offset_t) (i:index_t{add64_fits tree i}): Tot (r:offset_t{offsets_connect tree r}) =
U64.add tree (u32_64 i)
inline_for_extraction val merkle_tree_size_lg: uint32_t
let merkle_tree_size_lg = 32ul
// A Merkle tree `MT i j hs rhs_ok rhs` stores all necessary hashes to generate
// a Merkle path for each element from the index `i` to `j-1`.
// - Parameters
// `hs`: a 2-dim store for hashes, where `hs[0]` contains leaf hash values.
// `rhs_ok`: to check the rightmost hashes are up-to-date
// `rhs`: a store for "rightmost" hashes, manipulated only when required to
// calculate some merkle paths that need the rightmost hashes
// as a part of them.
// `mroot`: during the construction of `rhs` we can also calculate the Merkle
// root of the tree. If `rhs_ok` is true then it has the up-to-date
// root value.
noeq type merkle_tree =
| MT: hash_size:hash_size_t ->
offset:offset_t ->
i:index_t -> j:index_t{i <= j /\ add64_fits offset j} ->
hs:hash_vv hash_size {V.size_of hs = merkle_tree_size_lg} ->
rhs_ok:bool ->
rhs:hash_vec #hash_size {V.size_of rhs = merkle_tree_size_lg} ->
mroot:hash #hash_size ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
merkle_tree
type mt_p = B.pointer merkle_tree
type const_mt_p = const_pointer merkle_tree
inline_for_extraction
let merkle_tree_conditions (#hsz:Ghost.erased hash_size_t) (offset:uint64_t) (i j:uint32_t) (hs:hash_vv hsz) (rhs_ok:bool) (rhs:hash_vec #hsz) (mroot:hash #hsz): Tot bool =
j >= i && add64_fits offset j &&
V.size_of hs = merkle_tree_size_lg &&
V.size_of rhs = merkle_tree_size_lg
// The maximum number of currently held elements in the tree is (2^32 - 1).
// cwinter: even when using 64-bit indices, we fail if the underlying 32-bit
// vector is full; this can be fixed if necessary.
private inline_for_extraction
val mt_not_full_nst: mtv:merkle_tree -> Tot bool
let mt_not_full_nst mtv = MT?.j mtv < uint32_32_max
val mt_not_full: HS.mem -> mt_p -> GTot bool
let mt_not_full h mt = mt_not_full_nst (B.get h mt 0)
/// (Memory) Safety
val offset_of: i:index_t -> Tot index_t
let offset_of i = if i % 2ul = 0ul then i else i - 1ul
// `mt_safe_elts` says that it is safe to access an element from `i` to `j - 1`
// at level `lv` in the Merkle tree, i.e., hs[lv][k] (i <= k < j) is a valid
// element.
inline_for_extraction noextract
val mt_safe_elts:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
GTot Type0 (decreases (32 - U32.v lv))
let rec mt_safe_elts #hsz h lv hs i j =
if lv = merkle_tree_size_lg then true
else (let ofs = offset_of i in
V.size_of (V.get h hs lv) == j - ofs /\
mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul))
#push-options "--initial_fuel 1 --max_fuel 1"
val mt_safe_elts_constr:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (V.size_of (V.get h hs lv) == j - offset_of i /\
mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)))
(ensures (mt_safe_elts #hsz h lv hs i j))
let mt_safe_elts_constr #_ h lv hs i j = ()
val mt_safe_elts_head:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (mt_safe_elts #hsz h lv hs i j))
(ensures (V.size_of (V.get h hs lv) == j - offset_of i))
let mt_safe_elts_head #_ h lv hs i j = ()
val mt_safe_elts_rec:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (mt_safe_elts #hsz h lv hs i j))
(ensures (mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)))
let mt_safe_elts_rec #_ h lv hs i j = ()
val mt_safe_elts_init:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
Lemma (requires (V.forall_ h hs lv (V.size_of hs)
(fun hv -> V.size_of hv = 0ul)))
(ensures (mt_safe_elts #hsz h lv hs 0ul 0ul))
(decreases (32 - U32.v lv))
let rec mt_safe_elts_init #hsz h lv hs =
if lv = merkle_tree_size_lg then ()
else mt_safe_elts_init #hsz h (lv + 1ul) hs
#pop-options
val mt_safe_elts_preserved:
#hsz:hash_size_t ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 hs /\
mt_safe_elts #hsz h0 lv hs i j /\
loc_disjoint p (V.loc_vector_within hs lv (V.size_of hs)) /\
modifies p h0 h1))
(ensures (mt_safe_elts #hsz h1 lv hs i j))
(decreases (32 - U32.v lv))
[SMTPat (V.live h0 hs);
SMTPat (mt_safe_elts #hsz h0 lv hs i j);
SMTPat (loc_disjoint p (RV.loc_rvector hs));
SMTPat (modifies p h0 h1)]
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let rec mt_safe_elts_preserved #hsz lv hs i j p h0 h1 =
if lv = merkle_tree_size_lg then ()
else (V.get_preserved hs lv p h0 h1;
mt_safe_elts_preserved #hsz (lv + 1ul) hs (i / 2ul) (j / 2ul) p h0 h1)
#pop-options
// `mt_safe` is the invariant of a Merkle tree through its lifetime.
// It includes liveness, regionality, disjointness (to each data structure),
// and valid element access (`mt_safe_elts`).
inline_for_extraction noextract
val mt_safe: HS.mem -> mt_p -> GTot Type0
let mt_safe h mt =
B.live h mt /\ B.freeable mt /\
(let mtv = B.get h mt 0 in
// Liveness & Accessibility
RV.rv_inv h (MT?.hs mtv) /\
RV.rv_inv h (MT?.rhs mtv) /\
Rgl?.r_inv (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv) /\
mt_safe_elts h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv) /\
// Regionality
HH.extends (V.frameOf (MT?.hs mtv)) (B.frameOf mt) /\
HH.extends (V.frameOf (MT?.rhs mtv)) (B.frameOf mt) /\
HH.extends (B.frameOf (MT?.mroot mtv)) (B.frameOf mt) /\
HH.disjoint (V.frameOf (MT?.hs mtv)) (V.frameOf (MT?.rhs mtv)) /\
HH.disjoint (V.frameOf (MT?.hs mtv)) (B.frameOf (MT?.mroot mtv)) /\
HH.disjoint (V.frameOf (MT?.rhs mtv)) (B.frameOf (MT?.mroot mtv)))
// Since a Merkle tree satisfies regionality, it's ok to take all regions from
// a tree pointer as a location of the tree. | false | false | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mt_loc: mt_p -> GTot loc | [] | MerkleTree.Low.mt_loc | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} | mt: MerkleTree.Low.mt_p -> Prims.GTot LowStar.Monotonic.Buffer.loc | {
"end_col": 59,
"end_line": 232,
"start_col": 16,
"start_line": 232
} |
Prims.Tot | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let uint32_32_max = 4294967295ul | let uint32_32_max = | false | null | false | 4294967295ul | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"total"
] | [
"FStar.UInt32.__uint_to_t"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t | false | true | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val uint32_32_max : FStar.UInt32.t | [] | MerkleTree.Low.uint32_32_max | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} | FStar.UInt32.t | {
"end_col": 32,
"end_line": 51,
"start_col": 20,
"start_line": 51
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let uint32_max = 4294967295UL | let uint32_max = | false | null | false | 4294967295uL | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"total"
] | [
"FStar.UInt64.__uint_to_t"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul | false | true | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val uint32_max : FStar.UInt64.t | [] | MerkleTree.Low.uint32_max | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} | FStar.UInt64.t | {
"end_col": 29,
"end_line": 53,
"start_col": 17,
"start_line": 53
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let u32_64 = Int.Cast.uint32_to_uint64 | let u32_64 = | false | null | false | Int.Cast.uint32_to_uint64 | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"total"
] | [
"FStar.Int.Cast.uint32_to_uint64"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction
let uint32_max = 4294967295UL
let uint64_max = 18446744073709551615UL
let offset_range_limit = uint32_max | false | false | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val u32_64 : a: FStar.UInt32.t -> b: FStar.UInt64.t{FStar.UInt64.v b = FStar.UInt32.v a} | [] | MerkleTree.Low.u32_64 | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} | a: FStar.UInt32.t -> b: FStar.UInt64.t{FStar.UInt64.v b = FStar.UInt32.v a} | {
"end_col": 77,
"end_line": 58,
"start_col": 52,
"start_line": 58
} |
|
Prims.Tot | val mt_flush_to_pre_nst: mtv:merkle_tree -> idx:offset_t -> Tot bool | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mt_flush_to_pre_nst mtv idx =
offsets_connect (MT?.offset mtv) idx &&
([@inline_let] let idx = split_offset (MT?.offset mtv) idx in
idx >= MT?.i mtv &&
idx < MT?.j mtv) | val mt_flush_to_pre_nst: mtv:merkle_tree -> idx:offset_t -> Tot bool
let mt_flush_to_pre_nst mtv idx = | false | null | false | offsets_connect (MT?.offset mtv) idx &&
([@@ inline_let ]let idx = split_offset (MT?.offset mtv) idx in
idx >= MT?.i mtv && idx < MT?.j mtv) | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"total"
] | [
"MerkleTree.Low.merkle_tree",
"MerkleTree.Low.offset_t",
"Prims.op_AmpAmp",
"MerkleTree.Low.offsets_connect",
"MerkleTree.Low.__proj__MT__item__offset",
"FStar.Integers.op_Greater_Equals",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"MerkleTree.Low.__proj__MT__item__i",
"FStar.Integers.op_Less",
"MerkleTree.Low.__proj__MT__item__j",
"MerkleTree.Low.index_t",
"MerkleTree.Low.split_offset",
"Prims.bool"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction
let uint32_max = 4294967295UL
let uint64_max = 18446744073709551615UL
let offset_range_limit = uint32_max
type offset_t = uint64_t
inline_for_extraction noextract unfold let u32_64 = Int.Cast.uint32_to_uint64
inline_for_extraction noextract unfold let u64_32 = Int.Cast.uint64_to_uint32
private inline_for_extraction
let offsets_connect (x:offset_t) (y:offset_t): Tot bool = y >= x && (y - x) <= offset_range_limit
private inline_for_extraction
let split_offset (tree:offset_t) (index:offset_t{offsets_connect tree index}): Tot index_t =
[@inline_let] let diff = U64.sub_mod index tree in
assert (diff <= offset_range_limit);
Int.Cast.uint64_to_uint32 diff
private inline_for_extraction
let add64_fits (x:offset_t) (i:index_t): Tot bool = uint64_max - x >= (u32_64 i)
private inline_for_extraction
let join_offset (tree:offset_t) (i:index_t{add64_fits tree i}): Tot (r:offset_t{offsets_connect tree r}) =
U64.add tree (u32_64 i)
inline_for_extraction val merkle_tree_size_lg: uint32_t
let merkle_tree_size_lg = 32ul
// A Merkle tree `MT i j hs rhs_ok rhs` stores all necessary hashes to generate
// a Merkle path for each element from the index `i` to `j-1`.
// - Parameters
// `hs`: a 2-dim store for hashes, where `hs[0]` contains leaf hash values.
// `rhs_ok`: to check the rightmost hashes are up-to-date
// `rhs`: a store for "rightmost" hashes, manipulated only when required to
// calculate some merkle paths that need the rightmost hashes
// as a part of them.
// `mroot`: during the construction of `rhs` we can also calculate the Merkle
// root of the tree. If `rhs_ok` is true then it has the up-to-date
// root value.
noeq type merkle_tree =
| MT: hash_size:hash_size_t ->
offset:offset_t ->
i:index_t -> j:index_t{i <= j /\ add64_fits offset j} ->
hs:hash_vv hash_size {V.size_of hs = merkle_tree_size_lg} ->
rhs_ok:bool ->
rhs:hash_vec #hash_size {V.size_of rhs = merkle_tree_size_lg} ->
mroot:hash #hash_size ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
merkle_tree
type mt_p = B.pointer merkle_tree
type const_mt_p = const_pointer merkle_tree
inline_for_extraction
let merkle_tree_conditions (#hsz:Ghost.erased hash_size_t) (offset:uint64_t) (i j:uint32_t) (hs:hash_vv hsz) (rhs_ok:bool) (rhs:hash_vec #hsz) (mroot:hash #hsz): Tot bool =
j >= i && add64_fits offset j &&
V.size_of hs = merkle_tree_size_lg &&
V.size_of rhs = merkle_tree_size_lg
// The maximum number of currently held elements in the tree is (2^32 - 1).
// cwinter: even when using 64-bit indices, we fail if the underlying 32-bit
// vector is full; this can be fixed if necessary.
private inline_for_extraction
val mt_not_full_nst: mtv:merkle_tree -> Tot bool
let mt_not_full_nst mtv = MT?.j mtv < uint32_32_max
val mt_not_full: HS.mem -> mt_p -> GTot bool
let mt_not_full h mt = mt_not_full_nst (B.get h mt 0)
/// (Memory) Safety
val offset_of: i:index_t -> Tot index_t
let offset_of i = if i % 2ul = 0ul then i else i - 1ul
// `mt_safe_elts` says that it is safe to access an element from `i` to `j - 1`
// at level `lv` in the Merkle tree, i.e., hs[lv][k] (i <= k < j) is a valid
// element.
inline_for_extraction noextract
val mt_safe_elts:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
GTot Type0 (decreases (32 - U32.v lv))
let rec mt_safe_elts #hsz h lv hs i j =
if lv = merkle_tree_size_lg then true
else (let ofs = offset_of i in
V.size_of (V.get h hs lv) == j - ofs /\
mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul))
#push-options "--initial_fuel 1 --max_fuel 1"
val mt_safe_elts_constr:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (V.size_of (V.get h hs lv) == j - offset_of i /\
mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)))
(ensures (mt_safe_elts #hsz h lv hs i j))
let mt_safe_elts_constr #_ h lv hs i j = ()
val mt_safe_elts_head:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (mt_safe_elts #hsz h lv hs i j))
(ensures (V.size_of (V.get h hs lv) == j - offset_of i))
let mt_safe_elts_head #_ h lv hs i j = ()
val mt_safe_elts_rec:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (mt_safe_elts #hsz h lv hs i j))
(ensures (mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)))
let mt_safe_elts_rec #_ h lv hs i j = ()
val mt_safe_elts_init:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
Lemma (requires (V.forall_ h hs lv (V.size_of hs)
(fun hv -> V.size_of hv = 0ul)))
(ensures (mt_safe_elts #hsz h lv hs 0ul 0ul))
(decreases (32 - U32.v lv))
let rec mt_safe_elts_init #hsz h lv hs =
if lv = merkle_tree_size_lg then ()
else mt_safe_elts_init #hsz h (lv + 1ul) hs
#pop-options
val mt_safe_elts_preserved:
#hsz:hash_size_t ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 hs /\
mt_safe_elts #hsz h0 lv hs i j /\
loc_disjoint p (V.loc_vector_within hs lv (V.size_of hs)) /\
modifies p h0 h1))
(ensures (mt_safe_elts #hsz h1 lv hs i j))
(decreases (32 - U32.v lv))
[SMTPat (V.live h0 hs);
SMTPat (mt_safe_elts #hsz h0 lv hs i j);
SMTPat (loc_disjoint p (RV.loc_rvector hs));
SMTPat (modifies p h0 h1)]
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let rec mt_safe_elts_preserved #hsz lv hs i j p h0 h1 =
if lv = merkle_tree_size_lg then ()
else (V.get_preserved hs lv p h0 h1;
mt_safe_elts_preserved #hsz (lv + 1ul) hs (i / 2ul) (j / 2ul) p h0 h1)
#pop-options
// `mt_safe` is the invariant of a Merkle tree through its lifetime.
// It includes liveness, regionality, disjointness (to each data structure),
// and valid element access (`mt_safe_elts`).
inline_for_extraction noextract
val mt_safe: HS.mem -> mt_p -> GTot Type0
let mt_safe h mt =
B.live h mt /\ B.freeable mt /\
(let mtv = B.get h mt 0 in
// Liveness & Accessibility
RV.rv_inv h (MT?.hs mtv) /\
RV.rv_inv h (MT?.rhs mtv) /\
Rgl?.r_inv (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv) /\
mt_safe_elts h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv) /\
// Regionality
HH.extends (V.frameOf (MT?.hs mtv)) (B.frameOf mt) /\
HH.extends (V.frameOf (MT?.rhs mtv)) (B.frameOf mt) /\
HH.extends (B.frameOf (MT?.mroot mtv)) (B.frameOf mt) /\
HH.disjoint (V.frameOf (MT?.hs mtv)) (V.frameOf (MT?.rhs mtv)) /\
HH.disjoint (V.frameOf (MT?.hs mtv)) (B.frameOf (MT?.mroot mtv)) /\
HH.disjoint (V.frameOf (MT?.rhs mtv)) (B.frameOf (MT?.mroot mtv)))
// Since a Merkle tree satisfies regionality, it's ok to take all regions from
// a tree pointer as a location of the tree.
val mt_loc: mt_p -> GTot loc
let mt_loc mt = B.loc_all_regions_from false (B.frameOf mt)
val mt_safe_preserved:
mt:mt_p -> p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (mt_safe h0 mt /\
loc_disjoint p (mt_loc mt) /\
modifies p h0 h1))
(ensures (B.get h0 mt 0 == B.get h1 mt 0 /\
mt_safe h1 mt))
let mt_safe_preserved mt p h0 h1 =
assert (loc_includes (mt_loc mt) (B.loc_buffer mt));
let mtv = B.get h0 mt 0 in
assert (loc_includes (mt_loc mt) (RV.loc_rvector (MT?.hs mtv)));
assert (loc_includes (mt_loc mt) (RV.loc_rvector (MT?.rhs mtv)));
assert (loc_includes (mt_loc mt) (V.loc_vector (MT?.hs mtv)));
assert (loc_includes (mt_loc mt)
(B.loc_all_regions_from false (B.frameOf (MT?.mroot mtv))));
RV.rv_inv_preserved (MT?.hs mtv) p h0 h1;
RV.rv_inv_preserved (MT?.rhs mtv) p h0 h1;
Rgl?.r_sep (hreg (MT?.hash_size mtv)) (MT?.mroot mtv) p h0 h1;
V.loc_vector_within_included (MT?.hs mtv) 0ul (V.size_of (MT?.hs mtv));
mt_safe_elts_preserved 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv) p h0 h1
/// Lifting to a high-level Merkle tree structure
val mt_safe_elts_spec:
#hsz:hash_size_t ->
h:HS.mem ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t ->
j:index_t{j >= i} ->
Lemma (requires (RV.rv_inv h hs /\
mt_safe_elts #hsz h lv hs i j))
(ensures (MTH.hs_wf_elts #(U32.v hsz)
(U32.v lv) (RV.as_seq h hs)
(U32.v i) (U32.v j)))
(decreases (32 - U32.v lv))
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let rec mt_safe_elts_spec #_ h lv hs i j =
if lv = merkle_tree_size_lg then ()
else mt_safe_elts_spec h (lv + 1ul) hs (i / 2ul) (j / 2ul)
#pop-options
val merkle_tree_lift:
h:HS.mem ->
mtv:merkle_tree{
RV.rv_inv h (MT?.hs mtv) /\
RV.rv_inv h (MT?.rhs mtv) /\
Rgl?.r_inv (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv) /\
mt_safe_elts #(MT?.hash_size mtv) h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv)} ->
GTot (r:MTH.merkle_tree #(U32.v (MT?.hash_size mtv)) {MTH.mt_wf_elts #_ r})
let merkle_tree_lift h mtv =
mt_safe_elts_spec h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv);
MTH.MT #(U32.v (MT?.hash_size mtv))
(U32.v (MT?.i mtv))
(U32.v (MT?.j mtv))
(RV.as_seq h (MT?.hs mtv))
(MT?.rhs_ok mtv)
(RV.as_seq h (MT?.rhs mtv))
(Rgl?.r_repr (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv))
(Ghost.reveal (MT?.hash_spec mtv))
val mt_lift:
h:HS.mem -> mt:mt_p{mt_safe h mt} ->
GTot (r:MTH.merkle_tree #(U32.v (MT?.hash_size (B.get h mt 0))) {MTH.mt_wf_elts #_ r})
let mt_lift h mt =
merkle_tree_lift h (B.get h mt 0)
val mt_preserved:
mt:mt_p -> p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (mt_safe h0 mt /\
loc_disjoint p (mt_loc mt) /\
modifies p h0 h1))
(ensures (mt_safe_preserved mt p h0 h1;
mt_lift h0 mt == mt_lift h1 mt))
let mt_preserved mt p h0 h1 =
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(B.loc_buffer mt));
B.modifies_buffer_elim mt p h0 h1;
assert (B.get h0 mt 0 == B.get h1 mt 0);
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(RV.loc_rvector (MT?.hs (B.get h0 mt 0))));
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(RV.loc_rvector (MT?.rhs (B.get h0 mt 0))));
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(B.loc_buffer (MT?.mroot (B.get h0 mt 0))));
RV.as_seq_preserved (MT?.hs (B.get h0 mt 0)) p h0 h1;
RV.as_seq_preserved (MT?.rhs (B.get h0 mt 0)) p h0 h1;
B.modifies_buffer_elim (MT?.mroot (B.get h0 mt 0)) p h0 h1
/// Construction
// Note that the public function for creation is `mt_create` defined below,
// which builds a tree with an initial hash.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
private
val create_empty_mt:
hash_size:hash_size_t ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
r:HST.erid ->
HST.ST mt_p
(requires (fun _ -> true))
(ensures (fun h0 mt h1 ->
let dmt = B.get h1 mt 0 in
// memory safety
B.frameOf mt = r /\
modifies (mt_loc mt) h0 h1 /\
mt_safe h1 mt /\
mt_not_full h1 mt /\
// correctness
MT?.hash_size dmt = hash_size /\
MT?.offset dmt = 0UL /\
merkle_tree_lift h1 dmt == MTH.create_empty_mt #_ #(Ghost.reveal hash_spec) ()))
let create_empty_mt hsz hash_spec hash_fun r =
[@inline_let] let hrg = hreg hsz in
[@inline_let] let hvrg = hvreg hsz in
[@inline_let] let hvvrg = hvvreg hsz in
let hs_region = HST.new_region r in
let hs = RV.alloc_rid hvrg merkle_tree_size_lg hs_region in
let h0 = HST.get () in
mt_safe_elts_init #hsz h0 0ul hs;
let rhs_region = HST.new_region r in
let rhs = RV.alloc_rid hrg merkle_tree_size_lg rhs_region in
let h1 = HST.get () in
assert (RV.as_seq h1 rhs == S.create 32 (MTH.hash_init #(U32.v hsz)));
RV.rv_inv_preserved hs (V.loc_vector rhs) h0 h1;
RV.as_seq_preserved hs (V.loc_vector rhs) h0 h1;
V.loc_vector_within_included hs 0ul (V.size_of hs);
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul (V.loc_vector rhs) h0 h1;
let mroot_region = HST.new_region r in
let mroot = rg_alloc hrg mroot_region in
let h2 = HST.get () in
RV.as_seq_preserved hs loc_none h1 h2;
RV.as_seq_preserved rhs loc_none h1 h2;
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul loc_none h1 h2;
let mt = B.malloc r (MT hsz 0UL 0ul 0ul hs false rhs mroot hash_spec hash_fun) 1ul in
let h3 = HST.get () in
RV.as_seq_preserved hs loc_none h2 h3;
RV.as_seq_preserved rhs loc_none h2 h3;
Rgl?.r_sep hrg mroot loc_none h2 h3;
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul loc_none h2 h3;
mt
#pop-options
/// Destruction (free)
val mt_free: mt:mt_p ->
HST.ST unit
(requires (fun h0 -> mt_safe h0 mt))
(ensures (fun h0 _ h1 -> modifies (mt_loc mt) h0 h1))
#push-options "--z3rlimit 100"
let mt_free mt =
let mtv = !*mt in
RV.free (MT?.hs mtv);
RV.free (MT?.rhs mtv);
[@inline_let] let rg = hreg (MT?.hash_size mtv) in
rg_free rg (MT?.mroot mtv);
B.free mt
#pop-options
/// Insertion
private
val as_seq_sub_upd:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector #a #rst rg ->
i:uint32_t{i < V.size_of rv} -> v:Rgl?.repr rg ->
Lemma (requires (RV.rv_inv h rv))
(ensures (S.equal (S.upd (RV.as_seq h rv) (U32.v i) v)
(S.append
(RV.as_seq_sub h rv 0ul i)
(S.cons v (RV.as_seq_sub h rv (i + 1ul) (V.size_of rv))))))
#push-options "--z3rlimit 20"
let as_seq_sub_upd #a #rst #rg h rv i v =
Seq.Properties.slice_upd (RV.as_seq h rv) 0 (U32.v i) (U32.v i) v;
Seq.Properties.slice_upd (RV.as_seq h rv) (U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i) v;
RV.as_seq_seq_slice rg h (V.as_seq h rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i);
assert (S.equal (S.slice (RV.as_seq h rv) 0 (U32.v i))
(RV.as_seq_sub h rv 0ul i));
RV.as_seq_seq_slice rg h (V.as_seq h rv)
0 (U32.v (V.size_of rv)) (U32.v i + 1) (U32.v (V.size_of rv));
assert (S.equal (S.slice (RV.as_seq h rv) (U32.v i + 1) (U32.v (V.size_of rv)))
(RV.as_seq_sub h rv (i + 1ul) (V.size_of rv)));
assert (S.index (S.upd (RV.as_seq h rv) (U32.v i) v) (U32.v i) == v)
#pop-options
// `hash_vv_insert_copy` inserts a hash element at a level `lv`, by copying
// and pushing its content to `hs[lv]`. For detailed insertion procedure, see
// `insert_` and `mt_insert`.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1"
private
inline_for_extraction
val hash_vv_insert_copy:
#hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:Ghost.erased index_t ->
j:index_t{
Ghost.reveal i <= j &&
U32.v j < pow2 (32 - U32.v lv) - 1 &&
j < uint32_32_max} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
v:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\
Rgl?.r_inv (hreg hsz) h0 v /\
HH.disjoint (V.frameOf hs) (B.frameOf v) /\
mt_safe_elts #hsz h0 lv hs (Ghost.reveal i) j))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
h0 h1 /\
RV.rv_inv h1 hs /\
Rgl?.r_inv (hreg hsz) h1 v /\
V.size_of (V.get h1 hs lv) == j + 1ul - offset_of (Ghost.reveal i) /\
V.size_of (V.get h1 hs lv) == V.size_of (V.get h0 hs lv) + 1ul /\
mt_safe_elts #hsz h1 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul) /\
RV.rv_loc_elems h0 hs (lv + 1ul) (V.size_of hs) ==
RV.rv_loc_elems h1 hs (lv + 1ul) (V.size_of hs) /\
// correctness
(mt_safe_elts_spec #hsz h0 lv hs (Ghost.reveal i) j;
S.equal (RV.as_seq h1 hs)
(MTH.hashess_insert
(U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq h0 hs) (Rgl?.r_repr (hreg hsz) h0 v))) /\
S.equal (S.index (RV.as_seq h1 hs) (U32.v lv))
(S.snoc (S.index (RV.as_seq h0 hs) (U32.v lv))
(Rgl?.r_repr (hreg hsz) h0 v))))
let hash_vv_insert_copy #hsz lv i j hs v =
let hh0 = HST.get () in
mt_safe_elts_rec hh0 lv hs (Ghost.reveal i) j;
/// 1) Insert an element at the level `lv`, where the new vector is not yet
/// connected to `hs`.
let ihv = RV.insert_copy (hcpy hsz) (V.index hs lv) v in
let hh1 = HST.get () in
// 1-0) Basic disjointness conditions
V.forall2_forall_left hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.forall2_forall_right hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.loc_vector_within_included hs lv (lv + 1ul);
V.loc_vector_within_included hs (lv + 1ul) (V.size_of hs);
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
// 1-1) For the `modifies` postcondition.
assert (modifies (RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv)) hh0 hh1);
// 1-2) Preservation
Rgl?.r_sep (hreg hsz) v (RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
// 1-3) For `mt_safe_elts`
assert (V.size_of ihv == j + 1ul - offset_of (Ghost.reveal i)); // head updated
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1; // tail not yet
// 1-4) For the `rv_inv` postcondition
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs)) 0 (U32.v lv) (U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v lv);
RV.rv_elems_inv_preserved
hs 0ul lv (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs 0ul lv);
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs))
(U32.v lv + 1) (U32.v (V.size_of hs))
(U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
(U32.v lv + 1) (U32.v (V.size_of hs));
RV.rv_elems_inv_preserved
hs (lv + 1ul) (V.size_of hs) (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs (lv + 1ul) (V.size_of hs));
// assert (rv_itself_inv hh1 hs);
// assert (elems_reg hh1 hs);
// 1-5) Correctness
assert (S.equal (RV.as_seq hh1 ihv)
(S.snoc (RV.as_seq hh0 (V.get hh0 hs lv)) (Rgl?.r_repr (hreg hsz) hh0 v)));
/// 2) Assign the updated vector to `hs` at the level `lv`.
RV.assign hs lv ihv;
let hh2 = HST.get () in
// 2-1) For the `modifies` postcondition.
assert (modifies (V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2);
assert (modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))) hh0 hh2);
// 2-2) Preservation
Rgl?.r_sep (hreg hsz) v (RV.loc_rvector hs) hh1 hh2;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-3) For `mt_safe_elts`
assert (V.size_of (V.get hh2 hs lv) == j + 1ul - offset_of (Ghost.reveal i));
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-4) Correctness
RV.as_seq_sub_preserved hs 0ul lv (loc_rvector ihv) hh0 hh1;
RV.as_seq_sub_preserved hs (lv + 1ul) merkle_tree_size_lg (loc_rvector ihv) hh0 hh1;
assert (S.equal (RV.as_seq hh2 hs)
(S.append
(RV.as_seq_sub hh0 hs 0ul lv)
(S.cons (RV.as_seq hh1 ihv)
(RV.as_seq_sub hh0 hs (lv + 1ul) merkle_tree_size_lg))));
as_seq_sub_upd hh0 hs lv (RV.as_seq hh1 ihv)
#pop-options
private
val insert_index_helper_even:
lv:uint32_t{lv < merkle_tree_size_lg} ->
j:index_t{U32.v j < pow2 (32 - U32.v lv) - 1} ->
Lemma (requires (j % 2ul <> 1ul))
(ensures (U32.v j % 2 <> 1 /\ j / 2ul == (j + 1ul) / 2ul))
let insert_index_helper_even lv j = ()
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
private
val insert_index_helper_odd:
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:index_t ->
j:index_t{i <= j && U32.v j < pow2 (32 - U32.v lv) - 1} ->
Lemma (requires (j % 2ul = 1ul /\
j < uint32_32_max))
(ensures (U32.v j % 2 = 1 /\
U32.v (j / 2ul) < pow2 (32 - U32.v (lv + 1ul)) - 1 /\
(j + 1ul) / 2ul == j / 2ul + 1ul /\
j - offset_of i > 0ul))
let insert_index_helper_odd lv i j = ()
#pop-options
private
val loc_union_assoc_4:
a:loc -> b:loc -> c:loc -> d:loc ->
Lemma (loc_union (loc_union a b) (loc_union c d) ==
loc_union (loc_union a c) (loc_union b d))
let loc_union_assoc_4 a b c d =
loc_union_assoc (loc_union a b) c d;
loc_union_assoc a b c;
loc_union_assoc a c b;
loc_union_assoc (loc_union a c) b d
private
val insert_modifies_rec_helper:
#hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
aloc:loc ->
h:HS.mem ->
Lemma (loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
aloc)
(loc_union
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc) ==
loc_union
(loc_union
(RV.rv_loc_elems h hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
aloc)
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let insert_modifies_rec_helper #hsz lv hs aloc h =
assert (V.loc_vector_within hs lv (V.size_of hs) ==
loc_union (V.loc_vector_within hs lv (lv + 1ul))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)));
RV.rs_loc_elems_rec_inverse (hvreg hsz) (V.as_seq h hs) (U32.v lv) (U32.v (V.size_of hs));
assert (RV.rv_loc_elems h hs lv (V.size_of hs) ==
loc_union (RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs)));
// Applying some association rules...
loc_union_assoc
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))) aloc
(loc_union
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc);
loc_union_assoc
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))) aloc aloc;
loc_union_assoc
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc;
loc_union_assoc_4
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))
#pop-options
private
val insert_modifies_union_loc_weakening:
l1:loc -> l2:loc -> l3:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (modifies l1 h0 h1))
(ensures (modifies (loc_union (loc_union l1 l2) l3) h0 h1))
let insert_modifies_union_loc_weakening l1 l2 l3 h0 h1 =
B.loc_includes_union_l l1 l2 l1;
B.loc_includes_union_l (loc_union l1 l2) l3 (loc_union l1 l2)
private
val insert_snoc_last_helper:
#a:Type -> s:S.seq a{S.length s > 0} -> v:a ->
Lemma (S.index (S.snoc s v) (S.length s - 1) == S.last s)
let insert_snoc_last_helper #a s v = ()
private
val rv_inv_rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (RV.rv_inv h rv))
(ensures (RV.rv_elems_reg h rv i j))
let rv_inv_rv_elems_reg #a #rst #rg h rv i j = ()
// `insert_` recursively inserts proper hashes to each level `lv` by
// accumulating a compressed hash. For example, if there are three leaf elements
// in the tree, `insert_` will change `hs` as follow:
// (`hij` is a compressed hash from `hi` to `hj`)
//
// BEFORE INSERTION AFTER INSERTION
// lv
// 0 h0 h1 h2 ====> h0 h1 h2 h3
// 1 h01 h01 h23
// 2 h03
//
private
val insert_:
#hsz:hash_size_t ->
#hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hsz)) ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:Ghost.erased index_t ->
j:index_t{
Ghost.reveal i <= j &&
U32.v j < pow2 (32 - U32.v lv) - 1 &&
j < uint32_32_max} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
acc:hash #hsz ->
hash_fun:hash_fun_t #hsz #hash_spec ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\
Rgl?.r_inv (hreg hsz) h0 acc /\
HH.disjoint (V.frameOf hs) (B.frameOf acc) /\
mt_safe_elts h0 lv hs (Ghost.reveal i) j))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(loc_union
(RV.rv_loc_elems h0 hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
h0 h1 /\
RV.rv_inv h1 hs /\
Rgl?.r_inv (hreg hsz) h1 acc /\
mt_safe_elts h1 lv hs (Ghost.reveal i) (j + 1ul) /\
// correctness
(mt_safe_elts_spec h0 lv hs (Ghost.reveal i) j;
S.equal (RV.as_seq h1 hs)
(MTH.insert_ #(U32.v hsz) #hash_spec (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq h0 hs) (Rgl?.r_repr (hreg hsz) h0 acc)))))
(decreases (U32.v j))
#push-options "--z3rlimit 800 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
let rec insert_ #hsz #hash_spec lv i j hs acc hash_fun =
let hh0 = HST.get () in
hash_vv_insert_copy lv i j hs acc;
let hh1 = HST.get () in
// Base conditions
V.loc_vector_within_included hs lv (lv + 1ul);
V.loc_vector_within_included hs (lv + 1ul) (V.size_of hs);
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
assert (V.size_of (V.get hh1 hs lv) == j + 1ul - offset_of (Ghost.reveal i));
assert (mt_safe_elts hh1 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul));
if j % 2ul = 1ul
then (insert_index_helper_odd lv (Ghost.reveal i) j;
assert (S.length (S.index (RV.as_seq hh0 hs) (U32.v lv)) > 0);
let lvhs = V.index hs lv in
assert (U32.v (V.size_of lvhs) ==
S.length (S.index (RV.as_seq hh0 hs) (U32.v lv)) + 1);
assert (V.size_of lvhs > 1ul);
/// 3) Update the accumulator `acc`.
hash_vec_rv_inv_r_inv hh1 (V.get hh1 hs lv) (V.size_of (V.get hh1 hs lv) - 2ul);
assert (Rgl?.r_inv (hreg hsz) hh1 acc);
hash_fun (V.index lvhs (V.size_of lvhs - 2ul)) acc acc;
let hh2 = HST.get () in
// 3-1) For the `modifies` postcondition
assert (modifies (B.loc_all_regions_from false (B.frameOf acc)) hh1 hh2);
assert (modifies
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh0 hh2);
// 3-2) Preservation
RV.rv_inv_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.as_seq_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(B.loc_region_only false (B.frameOf acc)) hh1 hh2;
assert (RV.rv_inv hh2 hs);
assert (Rgl?.r_inv (hreg hsz) hh2 acc);
// 3-3) For `mt_safe_elts`
V.get_preserved hs lv
(B.loc_region_only false (B.frameOf acc)) hh1 hh2; // head preserved
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(B.loc_region_only false (B.frameOf acc)) hh1 hh2; // tail preserved
// 3-4) Correctness
insert_snoc_last_helper
(RV.as_seq hh0 (V.get hh0 hs lv))
(Rgl?.r_repr (hreg hsz) hh0 acc);
assert (S.equal (Rgl?.r_repr (hreg hsz) hh2 acc) // `nacc` in `MTH.insert_`
((Ghost.reveal hash_spec)
(S.last (S.index (RV.as_seq hh0 hs) (U32.v lv)))
(Rgl?.r_repr (hreg hsz) hh0 acc)));
/// 4) Recursion
insert_ (lv + 1ul)
(Ghost.hide (Ghost.reveal i / 2ul)) (j / 2ul)
hs acc hash_fun;
let hh3 = HST.get () in
// 4-0) Memory safety brought from the postcondition of the recursion
assert (RV.rv_inv hh3 hs);
assert (Rgl?.r_inv (hreg hsz) hh3 acc);
assert (modifies (loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh2 hh3);
assert (modifies
(loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc)))
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc))))
hh0 hh3);
// 4-1) For `mt_safe_elts`
rv_inv_rv_elems_reg hh2 hs (lv + 1ul) (V.size_of hs);
RV.rv_loc_elems_included hh2 hs (lv + 1ul) (V.size_of hs);
assert (loc_disjoint
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs)));
assert (loc_disjoint
(V.loc_vector_within hs lv (lv + 1ul))
(B.loc_all_regions_from false (B.frameOf acc)));
V.get_preserved hs lv
(loc_union
(loc_union
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh2 hh3;
assert (V.size_of (V.get hh3 hs lv) ==
j + 1ul - offset_of (Ghost.reveal i)); // head preserved
assert (mt_safe_elts hh3 (lv + 1ul) hs
(Ghost.reveal i / 2ul) (j / 2ul + 1ul)); // tail by recursion
mt_safe_elts_constr hh3 lv hs (Ghost.reveal i) (j + 1ul);
assert (mt_safe_elts hh3 lv hs (Ghost.reveal i) (j + 1ul));
// 4-2) Correctness
mt_safe_elts_spec hh2 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul);
assert (S.equal (RV.as_seq hh3 hs)
(MTH.insert_ #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv + 1) (U32.v (Ghost.reveal i) / 2) (U32.v j / 2)
(RV.as_seq hh2 hs) (Rgl?.r_repr (hreg hsz) hh2 acc)));
mt_safe_elts_spec hh0 lv hs (Ghost.reveal i) j;
MTH.insert_rec #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc);
assert (S.equal (RV.as_seq hh3 hs)
(MTH.insert_ #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc))))
else (insert_index_helper_even lv j;
// memory safety
assert (mt_safe_elts hh1 (lv + 1ul) hs (Ghost.reveal i / 2ul) ((j + 1ul) / 2ul));
mt_safe_elts_constr hh1 lv hs (Ghost.reveal i) (j + 1ul);
assert (mt_safe_elts hh1 lv hs (Ghost.reveal i) (j + 1ul));
assert (modifies
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
hh0 hh1);
insert_modifies_union_loc_weakening
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc))
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh0 hh1;
// correctness
mt_safe_elts_spec hh0 lv hs (Ghost.reveal i) j;
MTH.insert_base #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc);
assert (S.equal (RV.as_seq hh1 hs)
(MTH.insert_ #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc))));
/// 5) Proving the postcondition after recursion
let hh4 = HST.get () in
// 5-1) For the `modifies` postcondition.
assert (modifies
(loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc)))
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc))))
hh0 hh4);
insert_modifies_rec_helper
lv hs (B.loc_all_regions_from false (B.frameOf acc)) hh0;
// 5-2) For `mt_safe_elts`
assert (mt_safe_elts hh4 lv hs (Ghost.reveal i) (j + 1ul));
// 5-3) Preservation
assert (RV.rv_inv hh4 hs);
assert (Rgl?.r_inv (hreg hsz) hh4 acc);
// 5-4) Correctness
mt_safe_elts_spec hh0 lv hs (Ghost.reveal i) j;
assert (S.equal (RV.as_seq hh4 hs)
(MTH.insert_ #(U32.v hsz) #hash_spec (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc))) // QED
#pop-options
private inline_for_extraction
val mt_insert_pre_nst: mtv:merkle_tree -> v:hash #(MT?.hash_size mtv) -> Tot bool
let mt_insert_pre_nst mtv v = mt_not_full_nst mtv && add64_fits (MT?.offset mtv) ((MT?.j mtv) + 1ul)
val mt_insert_pre: #hsz:Ghost.erased hash_size_t -> mt:const_mt_p -> v:hash #hsz -> HST.ST bool
(requires (fun h0 -> mt_safe h0 (CB.cast mt) /\ (MT?.hash_size (B.get h0 (CB.cast mt) 0)) = Ghost.reveal hsz))
(ensures (fun _ _ _ -> True))
let mt_insert_pre #hsz mt v =
let mt = !*(CB.cast mt) in
assert (MT?.hash_size mt == (MT?.hash_size mt));
mt_insert_pre_nst mt v
// `mt_insert` inserts a hash to a Merkle tree. Note that this operation
// manipulates the content in `v`, since it uses `v` as an accumulator during
// insertion.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
val mt_insert:
hsz:Ghost.erased hash_size_t ->
mt:mt_p -> v:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
let dmt = B.get h0 mt 0 in
mt_safe h0 mt /\
Rgl?.r_inv (hreg hsz) h0 v /\
HH.disjoint (B.frameOf mt) (B.frameOf v) /\
MT?.hash_size dmt = Ghost.reveal hsz /\
mt_insert_pre_nst dmt v))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(mt_loc mt)
(B.loc_all_regions_from false (B.frameOf v)))
h0 h1 /\
mt_safe h1 mt /\
// correctness
MT?.hash_size (B.get h1 mt 0) = Ghost.reveal hsz /\
mt_lift h1 mt == MTH.mt_insert (mt_lift h0 mt) (Rgl?.r_repr (hreg hsz) h0 v)))
#pop-options
#push-options "--z3rlimit 40"
let mt_insert hsz mt v =
let hh0 = HST.get () in
let mtv = !*mt in
let hs = MT?.hs mtv in
let hsz = MT?.hash_size mtv in
insert_ #hsz #(Ghost.reveal (MT?.hash_spec mtv)) 0ul (Ghost.hide (MT?.i mtv)) (MT?.j mtv) hs v (MT?.hash_fun mtv);
let hh1 = HST.get () in
RV.rv_loc_elems_included hh0 (MT?.hs mtv) 0ul (V.size_of hs);
V.loc_vector_within_included hs 0ul (V.size_of hs);
RV.rv_inv_preserved
(MT?.rhs mtv)
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf v)))
hh0 hh1;
RV.as_seq_preserved
(MT?.rhs mtv)
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf v)))
hh0 hh1;
Rgl?.r_sep (hreg hsz) (MT?.mroot mtv)
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf v)))
hh0 hh1;
mt *= MT (MT?.hash_size mtv)
(MT?.offset mtv)
(MT?.i mtv)
(MT?.j mtv + 1ul)
(MT?.hs mtv)
false // `rhs` is always deprecated right after an insertion.
(MT?.rhs mtv)
(MT?.mroot mtv)
(MT?.hash_spec mtv)
(MT?.hash_fun mtv);
let hh2 = HST.get () in
RV.rv_inv_preserved
(MT?.hs mtv) (B.loc_buffer mt) hh1 hh2;
RV.rv_inv_preserved
(MT?.rhs mtv) (B.loc_buffer mt) hh1 hh2;
RV.as_seq_preserved
(MT?.hs mtv) (B.loc_buffer mt) hh1 hh2;
RV.as_seq_preserved
(MT?.rhs mtv) (B.loc_buffer mt) hh1 hh2;
Rgl?.r_sep (hreg hsz) (MT?.mroot mtv) (B.loc_buffer mt) hh1 hh2;
mt_safe_elts_preserved
0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv + 1ul) (B.loc_buffer mt)
hh1 hh2
#pop-options
// `mt_create` initiates a Merkle tree with a given initial hash `init`.
// A valid Merkle tree should contain at least one element.
val mt_create_custom:
hsz:hash_size_t ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hsz)) ->
r:HST.erid -> init:hash #hsz -> hash_fun:hash_fun_t #hsz #hash_spec -> HST.ST mt_p
(requires (fun h0 ->
Rgl?.r_inv (hreg hsz) h0 init /\
HH.disjoint r (B.frameOf init)))
(ensures (fun h0 mt h1 ->
// memory safety
modifies (loc_union (mt_loc mt) (B.loc_all_regions_from false (B.frameOf init))) h0 h1 /\
mt_safe h1 mt /\
// correctness
MT?.hash_size (B.get h1 mt 0) = hsz /\
mt_lift h1 mt == MTH.mt_create (U32.v hsz) (Ghost.reveal hash_spec) (Rgl?.r_repr (hreg hsz) h0 init)))
#push-options "--z3rlimit 40"
let mt_create_custom hsz hash_spec r init hash_fun =
let hh0 = HST.get () in
let mt = create_empty_mt hsz hash_spec hash_fun r in
mt_insert hsz mt init;
let hh2 = HST.get () in
mt
#pop-options
/// Construction and Destruction of paths
// Since each element pointer in `path` is from the target Merkle tree and
// each element has different location in `MT?.hs` (thus different region id),
// we cannot use the regionality property for `path`s. Hence here we manually
// define invariants and representation.
noeq type path =
| Path: hash_size:hash_size_t ->
hashes:V.vector (hash #hash_size) ->
path
type path_p = B.pointer path
type const_path_p = const_pointer path
private
let phashes (h:HS.mem) (p:path_p)
: GTot (V.vector (hash #(Path?.hash_size (B.get h p 0))))
= Path?.hashes (B.get h p 0)
// Memory safety of a path as an invariant
inline_for_extraction noextract
val path_safe:
h:HS.mem -> mtr:HH.rid -> p:path_p -> GTot Type0
let path_safe h mtr p =
B.live h p /\ B.freeable p /\
V.live h (phashes h p) /\ V.freeable (phashes h p) /\
HST.is_eternal_region (V.frameOf (phashes h p)) /\
(let hsz = Path?.hash_size (B.get h p 0) in
V.forall_all h (phashes h p)
(fun hp -> Rgl?.r_inv (hreg hsz) h hp /\
HH.includes mtr (Rgl?.region_of (hreg hsz) hp)) /\
HH.extends (V.frameOf (phashes h p)) (B.frameOf p) /\
HH.disjoint mtr (B.frameOf p))
val path_loc: path_p -> GTot loc
let path_loc p = B.loc_all_regions_from false (B.frameOf p)
val lift_path_:
#hsz:hash_size_t ->
h:HS.mem ->
hs:S.seq (hash #hsz) ->
i:nat ->
j:nat{
i <= j /\ j <= S.length hs /\
V.forall_seq hs i j (fun hp -> Rgl?.r_inv (hreg hsz) h hp)} ->
GTot (hp:MTH.path #(U32.v hsz) {S.length hp = j - i}) (decreases j)
let rec lift_path_ #hsz h hs i j =
if i = j then S.empty
else (S.snoc (lift_path_ h hs i (j - 1))
(Rgl?.r_repr (hreg hsz) h (S.index hs (j - 1))))
// Representation of a path
val lift_path:
#hsz:hash_size_t ->
h:HS.mem -> mtr:HH.rid -> p:path_p {path_safe h mtr p /\ (Path?.hash_size (B.get h p 0)) = hsz} ->
GTot (hp:MTH.path #(U32.v hsz) {S.length hp = U32.v (V.size_of (phashes h p))})
let lift_path #hsz h mtr p =
lift_path_ h (V.as_seq h (phashes h p))
0 (S.length (V.as_seq h (phashes h p)))
val lift_path_index_:
#hsz:hash_size_t ->
h:HS.mem ->
hs:S.seq (hash #hsz) ->
i:nat -> j:nat{i <= j && j <= S.length hs} ->
k:nat{i <= k && k < j} ->
Lemma (requires (V.forall_seq hs i j (fun hp -> Rgl?.r_inv (hreg hsz) h hp)))
(ensures (Rgl?.r_repr (hreg hsz) h (S.index hs k) ==
S.index (lift_path_ h hs i j) (k - i)))
(decreases j)
[SMTPat (S.index (lift_path_ h hs i j) (k - i))]
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
let rec lift_path_index_ #hsz h hs i j k =
if i = j then ()
else if k = j - 1 then ()
else lift_path_index_ #hsz h hs i (j - 1) k
#pop-options
val lift_path_index:
h:HS.mem -> mtr:HH.rid ->
p:path_p -> i:uint32_t ->
Lemma (requires (path_safe h mtr p /\
i < V.size_of (phashes h p)))
(ensures (let hsz = Path?.hash_size (B.get h p 0) in
Rgl?.r_repr (hreg hsz) h (V.get h (phashes h p) i) ==
S.index (lift_path #(hsz) h mtr p) (U32.v i)))
let lift_path_index h mtr p i =
lift_path_index_ h (V.as_seq h (phashes h p))
0 (S.length (V.as_seq h (phashes h p))) (U32.v i)
val lift_path_eq:
#hsz:hash_size_t ->
h:HS.mem ->
hs1:S.seq (hash #hsz) -> hs2:S.seq (hash #hsz) ->
i:nat -> j:nat ->
Lemma (requires (i <= j /\ j <= S.length hs1 /\ j <= S.length hs2 /\
S.equal (S.slice hs1 i j) (S.slice hs2 i j) /\
V.forall_seq hs1 i j (fun hp -> Rgl?.r_inv (hreg hsz) h hp) /\
V.forall_seq hs2 i j (fun hp -> Rgl?.r_inv (hreg hsz) h hp)))
(ensures (S.equal (lift_path_ h hs1 i j) (lift_path_ h hs2 i j)))
let lift_path_eq #hsz h hs1 hs2 i j =
assert (forall (k:nat{i <= k && k < j}).
S.index (lift_path_ h hs1 i j) (k - i) ==
Rgl?.r_repr (hreg hsz) h (S.index hs1 k));
assert (forall (k:nat{i <= k && k < j}).
S.index (lift_path_ h hs2 i j) (k - i) ==
Rgl?.r_repr (hreg hsz) h (S.index hs2 k));
assert (forall (k:nat{k < j - i}).
S.index (lift_path_ h hs1 i j) k ==
Rgl?.r_repr (hreg hsz) h (S.index hs1 (k + i)));
assert (forall (k:nat{k < j - i}).
S.index (lift_path_ h hs2 i j) k ==
Rgl?.r_repr (hreg hsz) h (S.index hs2 (k + i)));
assert (forall (k:nat{k < j - i}).
S.index (S.slice hs1 i j) k == S.index (S.slice hs2 i j) k);
assert (forall (k:nat{i <= k && k < j}).
S.index (S.slice hs1 i j) (k - i) == S.index (S.slice hs2 i j) (k - i))
private
val path_safe_preserved_:
#hsz:hash_size_t ->
mtr:HH.rid -> hs:S.seq (hash #hsz) ->
i:nat -> j:nat{i <= j && j <= S.length hs} ->
dl:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma
(requires (V.forall_seq hs i j
(fun hp ->
Rgl?.r_inv (hreg hsz) h0 hp /\
HH.includes mtr (Rgl?.region_of (hreg hsz) hp)) /\
loc_disjoint dl (B.loc_all_regions_from false mtr) /\
modifies dl h0 h1))
(ensures (V.forall_seq hs i j
(fun hp ->
Rgl?.r_inv (hreg hsz) h1 hp /\
HH.includes mtr (Rgl?.region_of (hreg hsz) hp))))
(decreases j)
let rec path_safe_preserved_ #hsz mtr hs i j dl h0 h1 =
if i = j then ()
else (assert (loc_includes
(B.loc_all_regions_from false mtr)
(B.loc_all_regions_from false
(Rgl?.region_of (hreg hsz) (S.index hs (j - 1)))));
Rgl?.r_sep (hreg hsz) (S.index hs (j - 1)) dl h0 h1;
path_safe_preserved_ mtr hs i (j - 1) dl h0 h1)
val path_safe_preserved:
mtr:HH.rid -> p:path_p ->
dl:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (path_safe h0 mtr p /\
loc_disjoint dl (path_loc p) /\
loc_disjoint dl (B.loc_all_regions_from false mtr) /\
modifies dl h0 h1))
(ensures (path_safe h1 mtr p))
let path_safe_preserved mtr p dl h0 h1 =
assert (loc_includes (path_loc p) (B.loc_buffer p));
assert (loc_includes (path_loc p) (V.loc_vector (phashes h0 p)));
path_safe_preserved_
mtr (V.as_seq h0 (phashes h0 p))
0 (S.length (V.as_seq h0 (phashes h0 p))) dl h0 h1
val path_safe_init_preserved:
mtr:HH.rid -> p:path_p ->
dl:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (path_safe h0 mtr p /\
V.size_of (phashes h0 p) = 0ul /\
B.loc_disjoint dl (path_loc p) /\
modifies dl h0 h1))
(ensures (path_safe h1 mtr p /\
V.size_of (phashes h1 p) = 0ul))
let path_safe_init_preserved mtr p dl h0 h1 =
assert (loc_includes (path_loc p) (B.loc_buffer p));
assert (loc_includes (path_loc p) (V.loc_vector (phashes h0 p)))
val path_preserved_:
#hsz:hash_size_t ->
mtr:HH.rid ->
hs:S.seq (hash #hsz) ->
i:nat -> j:nat{i <= j && j <= S.length hs} ->
dl:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.forall_seq hs i j
(fun hp -> Rgl?.r_inv (hreg hsz) h0 hp /\
HH.includes mtr (Rgl?.region_of (hreg hsz) hp)) /\
loc_disjoint dl (B.loc_all_regions_from false mtr) /\
modifies dl h0 h1))
(ensures (path_safe_preserved_ mtr hs i j dl h0 h1;
S.equal (lift_path_ h0 hs i j)
(lift_path_ h1 hs i j)))
(decreases j)
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
let rec path_preserved_ #hsz mtr hs i j dl h0 h1 =
if i = j then ()
else (path_safe_preserved_ mtr hs i (j - 1) dl h0 h1;
path_preserved_ mtr hs i (j - 1) dl h0 h1;
assert (loc_includes
(B.loc_all_regions_from false mtr)
(B.loc_all_regions_from false
(Rgl?.region_of (hreg hsz) (S.index hs (j - 1)))));
Rgl?.r_sep (hreg hsz) (S.index hs (j - 1)) dl h0 h1)
#pop-options
val path_preserved:
mtr:HH.rid -> p:path_p ->
dl:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (path_safe h0 mtr p /\
loc_disjoint dl (path_loc p) /\
loc_disjoint dl (B.loc_all_regions_from false mtr) /\
modifies dl h0 h1))
(ensures (path_safe_preserved mtr p dl h0 h1;
let hsz0 = (Path?.hash_size (B.get h0 p 0)) in
let hsz1 = (Path?.hash_size (B.get h1 p 0)) in
let b:MTH.path = lift_path #hsz0 h0 mtr p in
let a:MTH.path = lift_path #hsz1 h1 mtr p in
hsz0 = hsz1 /\ S.equal b a))
let path_preserved mtr p dl h0 h1 =
assert (loc_includes (path_loc p) (B.loc_buffer p));
assert (loc_includes (path_loc p) (V.loc_vector (phashes h0 p)));
path_preserved_ mtr (V.as_seq h0 (phashes h0 p))
0 (S.length (V.as_seq h0 (phashes h0 p)))
dl h0 h1
val init_path:
hsz:hash_size_t ->
mtr:HH.rid -> r:HST.erid ->
HST.ST path_p
(requires (fun h0 -> HH.disjoint mtr r))
(ensures (fun h0 p h1 ->
// memory safety
path_safe h1 mtr p /\
// correctness
Path?.hash_size (B.get h1 p 0) = hsz /\
S.equal (lift_path #hsz h1 mtr p) S.empty))
let init_path hsz mtr r =
let nrid = HST.new_region r in
(B.malloc r (Path hsz (rg_alloc (hvreg hsz) nrid)) 1ul)
val clear_path:
mtr:HH.rid -> p:path_p ->
HST.ST unit
(requires (fun h0 -> path_safe h0 mtr p))
(ensures (fun h0 _ h1 ->
// memory safety
path_safe h1 mtr p /\
// correctness
V.size_of (phashes h1 p) = 0ul /\
S.equal (lift_path #(Path?.hash_size (B.get h1 p 0)) h1 mtr p) S.empty))
let clear_path mtr p =
let pv = !*p in
p *= Path (Path?.hash_size pv) (V.clear (Path?.hashes pv))
val free_path:
p:path_p ->
HST.ST unit
(requires (fun h0 ->
B.live h0 p /\ B.freeable p /\
V.live h0 (phashes h0 p) /\ V.freeable (phashes h0 p) /\
HH.extends (V.frameOf (phashes h0 p)) (B.frameOf p)))
(ensures (fun h0 _ h1 ->
modifies (path_loc p) h0 h1))
let free_path p =
let pv = !*p in
V.free (Path?.hashes pv);
B.free p
/// Getting the Merkle root and path
// Construct "rightmost hashes" for a given (incomplete) Merkle tree.
// This function calculates the Merkle root as well, which is the final
// accumulator value.
private
val construct_rhs:
#hsz:hash_size_t ->
#hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hsz)) ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
rhs:hash_vec #hsz {V.size_of rhs = merkle_tree_size_lg} ->
i:index_t ->
j:index_t{i <= j && (U32.v j) < pow2 (32 - U32.v lv)} ->
acc:hash #hsz ->
actd:bool ->
hash_fun:hash_fun_t #hsz #(Ghost.reveal hash_spec) ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\ RV.rv_inv h0 rhs /\
HH.disjoint (V.frameOf hs) (V.frameOf rhs) /\
Rgl?.r_inv (hreg hsz) h0 acc /\
HH.disjoint (B.frameOf acc) (V.frameOf hs) /\
HH.disjoint (B.frameOf acc) (V.frameOf rhs) /\
mt_safe_elts #hsz h0 lv hs i j))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(RV.loc_rvector rhs)
(B.loc_all_regions_from false (B.frameOf acc)))
h0 h1 /\
RV.rv_inv h1 rhs /\
Rgl?.r_inv (hreg hsz) h1 acc /\
// correctness
(mt_safe_elts_spec #hsz h0 lv hs i j;
MTH.construct_rhs #(U32.v hsz) #(Ghost.reveal hash_spec)
(U32.v lv)
(Rgl?.r_repr (hvvreg hsz) h0 hs)
(Rgl?.r_repr (hvreg hsz) h0 rhs)
(U32.v i) (U32.v j)
(Rgl?.r_repr (hreg hsz) h0 acc) actd ==
(Rgl?.r_repr (hvreg hsz) h1 rhs, Rgl?.r_repr (hreg hsz) h1 acc)
)))
(decreases (U32.v j))
#push-options "--z3rlimit 250 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
let rec construct_rhs #hsz #hash_spec lv hs rhs i j acc actd hash_fun =
let hh0 = HST.get () in
if j = 0ul then begin
assert (RV.rv_inv hh0 hs);
assert (mt_safe_elts #hsz hh0 lv hs i j);
mt_safe_elts_spec #hsz hh0 lv hs 0ul 0ul;
assert (MTH.hs_wf_elts #(U32.v hsz)
(U32.v lv) (RV.as_seq hh0 hs)
(U32.v i) (U32.v j));
let hh1 = HST.get() in
assert (MTH.construct_rhs #(U32.v hsz) #(Ghost.reveal hash_spec)
(U32.v lv)
(Rgl?.r_repr (hvvreg hsz) hh0 hs)
(Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j)
(Rgl?.r_repr (hreg hsz) hh0 acc) actd ==
(Rgl?.r_repr (hvreg hsz) hh1 rhs, Rgl?.r_repr (hreg hsz) hh1 acc))
end
else
let ofs = offset_of i in
begin
(if j % 2ul = 0ul
then begin
Math.Lemmas.pow2_double_mult (32 - U32.v lv - 1);
mt_safe_elts_rec #hsz hh0 lv hs i j;
construct_rhs #hsz #hash_spec (lv + 1ul) hs rhs (i / 2ul) (j / 2ul) acc actd hash_fun;
let hh1 = HST.get () in
// correctness
mt_safe_elts_spec #hsz hh0 lv hs i j;
MTH.construct_rhs_even #(U32.v hsz) #hash_spec
(U32.v lv) (Rgl?.r_repr (hvvreg hsz) hh0 hs) (Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j) (Rgl?.r_repr (hreg hsz) hh0 acc) actd;
assert (MTH.construct_rhs #(U32.v hsz) #hash_spec
(U32.v lv)
(Rgl?.r_repr (hvvreg hsz) hh0 hs)
(Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j)
(Rgl?.r_repr (hreg hsz) hh0 acc)
actd ==
(Rgl?.r_repr (hvreg hsz) hh1 rhs, Rgl?.r_repr (hreg hsz) hh1 acc))
end
else begin
if actd
then begin
RV.assign_copy (hcpy hsz) rhs lv acc;
let hh1 = HST.get () in
// memory safety
Rgl?.r_sep (hreg hsz) acc
(B.loc_all_regions_from false (V.frameOf rhs)) hh0 hh1;
RV.rv_inv_preserved
hs (B.loc_all_regions_from false (V.frameOf rhs))
hh0 hh1;
RV.as_seq_preserved
hs (B.loc_all_regions_from false (V.frameOf rhs))
hh0 hh1;
RV.rv_inv_preserved
(V.get hh0 hs lv) (B.loc_all_regions_from false (V.frameOf rhs))
hh0 hh1;
V.loc_vector_within_included hs lv (V.size_of hs);
mt_safe_elts_preserved lv hs i j
(B.loc_all_regions_from false (V.frameOf rhs))
hh0 hh1;
mt_safe_elts_head hh1 lv hs i j;
hash_vv_rv_inv_r_inv hh1 hs lv (j - 1ul - ofs);
// correctness
assert (S.equal (RV.as_seq hh1 rhs)
(S.upd (RV.as_seq hh0 rhs) (U32.v lv)
(Rgl?.r_repr (hreg hsz) hh0 acc)));
hash_fun (V.index (V.index hs lv) (j - 1ul - ofs)) acc acc;
let hh2 = HST.get () in
// memory safety
mt_safe_elts_preserved lv hs i j
(B.loc_all_regions_from false (B.frameOf acc)) hh1 hh2;
RV.rv_inv_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.rv_inv_preserved
rhs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.as_seq_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.as_seq_preserved
rhs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
// correctness
hash_vv_as_seq_get_index hh0 hs lv (j - 1ul - ofs);
assert (Rgl?.r_repr (hreg hsz) hh2 acc ==
(Ghost.reveal hash_spec) (S.index (S.index (RV.as_seq hh0 hs) (U32.v lv))
(U32.v j - 1 - U32.v ofs))
(Rgl?.r_repr (hreg hsz) hh0 acc))
end
else begin
mt_safe_elts_head hh0 lv hs i j;
hash_vv_rv_inv_r_inv hh0 hs lv (j - 1ul - ofs);
hash_vv_rv_inv_disjoint hh0 hs lv (j - 1ul - ofs) (B.frameOf acc);
Cpy?.copy (hcpy hsz) hsz (V.index (V.index hs lv) (j - 1ul - ofs)) acc;
let hh1 = HST.get () in
// memory safety
V.loc_vector_within_included hs lv (V.size_of hs);
mt_safe_elts_preserved lv hs i j
(B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
RV.rv_inv_preserved
hs (B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
RV.rv_inv_preserved
rhs (B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
RV.as_seq_preserved
hs (B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
RV.as_seq_preserved
rhs (B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
// correctness
hash_vv_as_seq_get_index hh0 hs lv (j - 1ul - ofs);
assert (Rgl?.r_repr (hreg hsz) hh1 acc ==
S.index (S.index (RV.as_seq hh0 hs) (U32.v lv))
(U32.v j - 1 - U32.v ofs))
end;
let hh3 = HST.get () in
assert (S.equal (RV.as_seq hh3 hs) (RV.as_seq hh0 hs));
assert (S.equal (RV.as_seq hh3 rhs)
(if actd
then S.upd (RV.as_seq hh0 rhs) (U32.v lv)
(Rgl?.r_repr (hreg hsz) hh0 acc)
else RV.as_seq hh0 rhs));
assert (Rgl?.r_repr (hreg hsz) hh3 acc ==
(if actd
then (Ghost.reveal hash_spec) (S.index (S.index (RV.as_seq hh0 hs) (U32.v lv))
(U32.v j - 1 - U32.v ofs))
(Rgl?.r_repr (hreg hsz) hh0 acc)
else S.index (S.index (RV.as_seq hh0 hs) (U32.v lv))
(U32.v j - 1 - U32.v ofs)));
mt_safe_elts_rec hh3 lv hs i j;
construct_rhs #hsz #hash_spec (lv + 1ul) hs rhs (i / 2ul) (j / 2ul) acc true hash_fun;
let hh4 = HST.get () in
mt_safe_elts_spec hh3 (lv + 1ul) hs (i / 2ul) (j / 2ul);
assert (MTH.construct_rhs #(U32.v hsz) #hash_spec
(U32.v lv + 1)
(Rgl?.r_repr (hvvreg hsz) hh3 hs)
(Rgl?.r_repr (hvreg hsz) hh3 rhs)
(U32.v i / 2) (U32.v j / 2)
(Rgl?.r_repr (hreg hsz) hh3 acc) true ==
(Rgl?.r_repr (hvreg hsz) hh4 rhs, Rgl?.r_repr (hreg hsz) hh4 acc));
mt_safe_elts_spec hh0 lv hs i j;
MTH.construct_rhs_odd #(U32.v hsz) #hash_spec
(U32.v lv) (Rgl?.r_repr (hvvreg hsz) hh0 hs) (Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j) (Rgl?.r_repr (hreg hsz) hh0 acc) actd;
assert (MTH.construct_rhs #(U32.v hsz) #hash_spec
(U32.v lv)
(Rgl?.r_repr (hvvreg hsz) hh0 hs)
(Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j)
(Rgl?.r_repr (hreg hsz) hh0 acc) actd ==
(Rgl?.r_repr (hvreg hsz) hh4 rhs, Rgl?.r_repr (hreg hsz) hh4 acc))
end)
end
#pop-options
private inline_for_extraction
val mt_get_root_pre_nst: mtv:merkle_tree -> rt:hash #(MT?.hash_size mtv) -> Tot bool
let mt_get_root_pre_nst mtv rt = true
val mt_get_root_pre:
#hsz:Ghost.erased hash_size_t ->
mt:const_mt_p ->
rt:hash #hsz ->
HST.ST bool
(requires (fun h0 ->
let mt = CB.cast mt in
MT?.hash_size (B.get h0 mt 0) = Ghost.reveal hsz /\
mt_safe h0 mt /\ Rgl?.r_inv (hreg hsz) h0 rt /\
HH.disjoint (B.frameOf mt) (B.frameOf rt)))
(ensures (fun _ _ _ -> True))
let mt_get_root_pre #hsz mt rt =
let mt = CB.cast mt in
let mt = !*mt in
let hsz = MT?.hash_size mt in
assert (MT?.hash_size mt = hsz);
mt_get_root_pre_nst mt rt
// `mt_get_root` returns the Merkle root. If it's already calculated with
// up-to-date hashes, the root is returned immediately. Otherwise it calls
// `construct_rhs` to build rightmost hashes and to calculate the Merkle root
// as well.
val mt_get_root:
#hsz:Ghost.erased hash_size_t ->
mt:const_mt_p ->
rt:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
let mt = CB.cast mt in
let dmt = B.get h0 mt 0 in
MT?.hash_size dmt = (Ghost.reveal hsz) /\
mt_get_root_pre_nst dmt rt /\
mt_safe h0 mt /\ Rgl?.r_inv (hreg hsz) h0 rt /\
HH.disjoint (B.frameOf mt) (B.frameOf rt)))
(ensures (fun h0 _ h1 ->
let mt = CB.cast mt in
// memory safety
modifies (loc_union
(mt_loc mt)
(B.loc_all_regions_from false (B.frameOf rt)))
h0 h1 /\
mt_safe h1 mt /\
(let mtv0 = B.get h0 mt 0 in
let mtv1 = B.get h1 mt 0 in
MT?.hash_size mtv0 = (Ghost.reveal hsz) /\
MT?.hash_size mtv1 = (Ghost.reveal hsz) /\
MT?.i mtv1 = MT?.i mtv0 /\ MT?.j mtv1 = MT?.j mtv0 /\
MT?.hs mtv1 == MT?.hs mtv0 /\ MT?.rhs mtv1 == MT?.rhs mtv0 /\
MT?.offset mtv1 == MT?.offset mtv0 /\
MT?.rhs_ok mtv1 = true /\
Rgl?.r_inv (hreg hsz) h1 rt /\
// correctness
MTH.mt_get_root (mt_lift h0 mt) (Rgl?.r_repr (hreg hsz) h0 rt) ==
(mt_lift h1 mt, Rgl?.r_repr (hreg hsz) h1 rt))))
#push-options "--z3rlimit 150 --initial_fuel 1 --max_fuel 1"
let mt_get_root #hsz mt rt =
let mt = CB.cast mt in
let hh0 = HST.get () in
let mtv = !*mt in
let prefix = MT?.offset mtv in
let i = MT?.i mtv in
let j = MT?.j mtv in
let hs = MT?.hs mtv in
let rhs = MT?.rhs mtv in
let mroot = MT?.mroot mtv in
let hash_size = MT?.hash_size mtv in
let hash_spec = MT?.hash_spec mtv in
let hash_fun = MT?.hash_fun mtv in
if MT?.rhs_ok mtv
then begin
Cpy?.copy (hcpy hash_size) hash_size mroot rt;
let hh1 = HST.get () in
mt_safe_preserved mt
(B.loc_all_regions_from false (Rgl?.region_of (hreg hsz) rt)) hh0 hh1;
mt_preserved mt
(B.loc_all_regions_from false (Rgl?.region_of (hreg hsz) rt)) hh0 hh1;
MTH.mt_get_root_rhs_ok_true
(mt_lift hh0 mt) (Rgl?.r_repr (hreg hsz) hh0 rt);
assert (MTH.mt_get_root (mt_lift hh0 mt) (Rgl?.r_repr (hreg hsz) hh0 rt) ==
(mt_lift hh1 mt, Rgl?.r_repr (hreg hsz) hh1 rt))
end
else begin
construct_rhs #hash_size #hash_spec 0ul hs rhs i j rt false hash_fun;
let hh1 = HST.get () in
// memory safety
assert (RV.rv_inv hh1 rhs);
assert (Rgl?.r_inv (hreg hsz) hh1 rt);
assert (B.live hh1 mt);
RV.rv_inv_preserved
hs (loc_union
(RV.loc_rvector rhs)
(B.loc_all_regions_from false (B.frameOf rt)))
hh0 hh1;
RV.as_seq_preserved
hs (loc_union
(RV.loc_rvector rhs)
(B.loc_all_regions_from false (B.frameOf rt)))
hh0 hh1;
V.loc_vector_within_included hs 0ul (V.size_of hs);
mt_safe_elts_preserved 0ul hs i j
(loc_union
(RV.loc_rvector rhs)
(B.loc_all_regions_from false (B.frameOf rt)))
hh0 hh1;
// correctness
mt_safe_elts_spec hh0 0ul hs i j;
assert (MTH.construct_rhs #(U32.v hash_size) #hash_spec 0
(Rgl?.r_repr (hvvreg hsz) hh0 hs)
(Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j)
(Rgl?.r_repr (hreg hsz) hh0 rt) false ==
(Rgl?.r_repr (hvreg hsz) hh1 rhs, Rgl?.r_repr (hreg hsz) hh1 rt));
Cpy?.copy (hcpy hash_size) hash_size rt mroot;
let hh2 = HST.get () in
// memory safety
RV.rv_inv_preserved
hs (B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
RV.rv_inv_preserved
rhs (B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
RV.as_seq_preserved
hs (B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
RV.as_seq_preserved
rhs (B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
B.modifies_buffer_elim
rt (B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
mt_safe_elts_preserved 0ul hs i j
(B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
// correctness
assert (Rgl?.r_repr (hreg hsz) hh2 mroot == Rgl?.r_repr (hreg hsz) hh1 rt);
mt *= MT hash_size prefix i j hs true rhs mroot hash_spec hash_fun;
let hh3 = HST.get () in
// memory safety
Rgl?.r_sep (hreg hsz) rt (B.loc_buffer mt) hh2 hh3;
RV.rv_inv_preserved hs (B.loc_buffer mt) hh2 hh3;
RV.rv_inv_preserved rhs (B.loc_buffer mt) hh2 hh3;
RV.as_seq_preserved hs (B.loc_buffer mt) hh2 hh3;
RV.as_seq_preserved rhs (B.loc_buffer mt) hh2 hh3;
Rgl?.r_sep (hreg hsz) mroot (B.loc_buffer mt) hh2 hh3;
mt_safe_elts_preserved 0ul hs i j
(B.loc_buffer mt) hh2 hh3;
assert (mt_safe hh3 mt);
// correctness
MTH.mt_get_root_rhs_ok_false
(mt_lift hh0 mt) (Rgl?.r_repr (hreg hsz) hh0 rt);
assert (MTH.mt_get_root (mt_lift hh0 mt) (Rgl?.r_repr (hreg hsz) hh0 rt) ==
(MTH.MT #(U32.v hash_size)
(U32.v i) (U32.v j)
(RV.as_seq hh0 hs)
true
(RV.as_seq hh1 rhs)
(Rgl?.r_repr (hreg hsz) hh1 rt)
hash_spec,
Rgl?.r_repr (hreg hsz) hh1 rt));
assert (MTH.mt_get_root (mt_lift hh0 mt) (Rgl?.r_repr (hreg hsz) hh0 rt) ==
(mt_lift hh3 mt, Rgl?.r_repr (hreg hsz) hh3 rt))
end
#pop-options
inline_for_extraction
val mt_path_insert:
#hsz:hash_size_t ->
mtr:HH.rid -> p:path_p -> hp:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
path_safe h0 mtr p /\
not (V.is_full (phashes h0 p)) /\
Rgl?.r_inv (hreg hsz) h0 hp /\
HH.disjoint mtr (B.frameOf p) /\
HH.includes mtr (B.frameOf hp) /\
Path?.hash_size (B.get h0 p 0) = hsz))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (path_loc p) h0 h1 /\
path_safe h1 mtr p /\
// correctness
(let hsz0 = Path?.hash_size (B.get h0 p 0) in
let hsz1 = Path?.hash_size (B.get h1 p 0) in
(let before:(S.seq (MTH.hash #(U32.v hsz0))) = lift_path h0 mtr p in
let after:(S.seq (MTH.hash #(U32.v hsz1))) = lift_path h1 mtr p in
V.size_of (phashes h1 p) = V.size_of (phashes h0 p) + 1ul /\
hsz = hsz0 /\ hsz = hsz1 /\
(let hspec:(S.seq (MTH.hash #(U32.v hsz))) = (MTH.path_insert #(U32.v hsz) before (Rgl?.r_repr (hreg hsz) h0 hp)) in
S.equal hspec after)))))
#push-options "--z3rlimit 20 --initial_fuel 1 --max_fuel 1"
let mt_path_insert #hsz mtr p hp =
let pth = !*p in
let pv = Path?.hashes pth in
let hh0 = HST.get () in
let ipv = V.insert pv hp in
let hh1 = HST.get () in
path_safe_preserved_
mtr (V.as_seq hh0 pv) 0 (S.length (V.as_seq hh0 pv))
(B.loc_all_regions_from false (V.frameOf ipv)) hh0 hh1;
path_preserved_
mtr (V.as_seq hh0 pv) 0 (S.length (V.as_seq hh0 pv))
(B.loc_all_regions_from false (V.frameOf ipv)) hh0 hh1;
Rgl?.r_sep (hreg hsz) hp
(B.loc_all_regions_from false (V.frameOf ipv)) hh0 hh1;
p *= Path hsz ipv;
let hh2 = HST.get () in
path_safe_preserved_
mtr (V.as_seq hh1 ipv) 0 (S.length (V.as_seq hh1 ipv))
(B.loc_region_only false (B.frameOf p)) hh1 hh2;
path_preserved_
mtr (V.as_seq hh1 ipv) 0 (S.length (V.as_seq hh1 ipv))
(B.loc_region_only false (B.frameOf p)) hh1 hh2;
Rgl?.r_sep (hreg hsz) hp
(B.loc_region_only false (B.frameOf p)) hh1 hh2;
assert (S.equal (lift_path hh2 mtr p)
(lift_path_ hh1 (S.snoc (V.as_seq hh0 pv) hp)
0 (S.length (V.as_seq hh1 ipv))));
lift_path_eq hh1 (S.snoc (V.as_seq hh0 pv) hp) (V.as_seq hh0 pv)
0 (S.length (V.as_seq hh0 pv))
#pop-options
// For given a target index `k`, the number of elements (in the tree) `j`,
// and a boolean flag (to check the existence of rightmost hashes), we can
// calculate a required Merkle path length.
//
// `mt_path_length` is a postcondition of `mt_get_path`, and a precondition
// of `mt_verify`. For detailed description, see `mt_get_path` and `mt_verify`.
private
val mt_path_length_step:
k:index_t ->
j:index_t{k <= j} ->
actd:bool ->
Tot (sl:uint32_t{U32.v sl = MTH.mt_path_length_step (U32.v k) (U32.v j) actd})
let mt_path_length_step k j actd =
if j = 0ul then 0ul
else (if k % 2ul = 0ul
then (if j = k || (j = k + 1ul && not actd) then 0ul else 1ul)
else 1ul)
private inline_for_extraction
val mt_path_length:
lv:uint32_t{lv <= merkle_tree_size_lg} ->
k:index_t ->
j:index_t{k <= j && U32.v j < pow2 (32 - U32.v lv)} ->
actd:bool ->
Tot (l:uint32_t{
U32.v l = MTH.mt_path_length (U32.v k) (U32.v j) actd &&
l <= 32ul - lv})
(decreases (U32.v j))
#push-options "--z3rlimit 10 --initial_fuel 1 --max_fuel 1"
let rec mt_path_length lv k j actd =
if j = 0ul then 0ul
else (let nactd = actd || (j % 2ul = 1ul) in
mt_path_length_step k j actd +
mt_path_length (lv + 1ul) (k / 2ul) (j / 2ul) nactd)
#pop-options
val mt_get_path_length:
mtr:HH.rid ->
p:const_path_p ->
HST.ST uint32_t
(requires (fun h0 -> path_safe h0 mtr (CB.cast p)))
(ensures (fun h0 _ h1 -> True))
let mt_get_path_length mtr p =
let pd = !*(CB.cast p) in
V.size_of (Path?.hashes pd)
private inline_for_extraction
val mt_make_path_step:
#hsz:hash_size_t ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
mtr:HH.rid ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
rhs:hash_vec #hsz {V.size_of rhs = merkle_tree_size_lg} ->
i:index_t ->
j:index_t{j <> 0ul /\ i <= j /\ U32.v j < pow2 (32 - U32.v lv)} ->
k:index_t{i <= k && k <= j} ->
p:path_p ->
actd:bool ->
HST.ST unit
(requires (fun h0 ->
HH.includes mtr (V.frameOf hs) /\
HH.includes mtr (V.frameOf rhs) /\
RV.rv_inv h0 hs /\ RV.rv_inv h0 rhs /\
mt_safe_elts h0 lv hs i j /\
path_safe h0 mtr p /\
Path?.hash_size (B.get h0 p 0) = hsz /\
V.size_of (phashes h0 p) <= lv + 1ul))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (path_loc p) h0 h1 /\
path_safe h1 mtr p /\
V.size_of (phashes h1 p) == V.size_of (phashes h0 p) + mt_path_length_step k j actd /\
V.size_of (phashes h1 p) <= lv + 2ul /\
// correctness
(mt_safe_elts_spec h0 lv hs i j;
(let hsz0 = Path?.hash_size (B.get h0 p 0) in
let hsz1 = Path?.hash_size (B.get h1 p 0) in
let before:(S.seq (MTH.hash #(U32.v hsz0))) = lift_path h0 mtr p in
let after:(S.seq (MTH.hash #(U32.v hsz1))) = lift_path h1 mtr p in
hsz = hsz0 /\ hsz = hsz1 /\
S.equal after
(MTH.mt_make_path_step
(U32.v lv) (RV.as_seq h0 hs) (RV.as_seq h0 rhs)
(U32.v i) (U32.v j) (U32.v k) before actd)))))
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 2 --max_ifuel 2"
let mt_make_path_step #hsz lv mtr hs rhs i j k p actd =
let pth = !*p in
let hh0 = HST.get () in
let ofs = offset_of i in
if k % 2ul = 1ul
then begin
hash_vv_rv_inv_includes hh0 hs lv (k - 1ul - ofs);
assert (HH.includes mtr
(B.frameOf (V.get hh0 (V.get hh0 hs lv) (k - 1ul - ofs))));
assert(Path?.hash_size pth = hsz);
mt_path_insert #hsz mtr p (V.index (V.index hs lv) (k - 1ul - ofs))
end
else begin
if k = j then ()
else if k + 1ul = j
then (if actd
then (assert (HH.includes mtr (B.frameOf (V.get hh0 rhs lv)));
mt_path_insert mtr p (V.index rhs lv)))
else (hash_vv_rv_inv_includes hh0 hs lv (k + 1ul - ofs);
assert (HH.includes mtr
(B.frameOf (V.get hh0 (V.get hh0 hs lv) (k + 1ul - ofs))));
mt_path_insert mtr p (V.index (V.index hs lv) (k + 1ul - ofs)))
end
#pop-options
private inline_for_extraction
val mt_get_path_step_pre_nst:
#hsz:Ghost.erased hash_size_t ->
mtr:HH.rid ->
p:path ->
i:uint32_t ->
Tot bool
let mt_get_path_step_pre_nst #hsz mtr p i =
i < V.size_of (Path?.hashes p)
val mt_get_path_step_pre:
#hsz:Ghost.erased hash_size_t ->
mtr:HH.rid ->
p:const_path_p ->
i:uint32_t ->
HST.ST bool
(requires (fun h0 ->
path_safe h0 mtr (CB.cast p) /\
(let pv = B.get h0 (CB.cast p) 0 in
Path?.hash_size pv = Ghost.reveal hsz /\
live h0 (Path?.hashes pv) /\
mt_get_path_step_pre_nst #hsz mtr pv i)))
(ensures (fun _ _ _ -> True))
let mt_get_path_step_pre #hsz mtr p i =
let p = CB.cast p in
mt_get_path_step_pre_nst #hsz mtr !*p i
val mt_get_path_step:
#hsz:Ghost.erased hash_size_t ->
mtr:HH.rid ->
p:const_path_p ->
i:uint32_t ->
HST.ST (hash #hsz)
(requires (fun h0 ->
path_safe h0 mtr (CB.cast p) /\
(let pv = B.get h0 (CB.cast p) 0 in
Path?.hash_size pv = Ghost.reveal hsz /\
live h0 (Path?.hashes pv) /\
i < V.size_of (Path?.hashes pv))))
(ensures (fun h0 r h1 -> True ))
let mt_get_path_step #hsz mtr p i =
let pd = !*(CB.cast p) in
V.index #(hash #(Path?.hash_size pd)) (Path?.hashes pd) i
private
val mt_get_path_:
#hsz:hash_size_t ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
mtr:HH.rid ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
rhs:hash_vec #hsz {V.size_of rhs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{i <= j /\ U32.v j < pow2 (32 - U32.v lv)} ->
k:index_t{i <= k && k <= j} ->
p:path_p ->
actd:bool ->
HST.ST unit
(requires (fun h0 ->
HH.includes mtr (V.frameOf hs) /\
HH.includes mtr (V.frameOf rhs) /\
RV.rv_inv h0 hs /\ RV.rv_inv h0 rhs /\
mt_safe_elts h0 lv hs i j /\
path_safe h0 mtr p /\
Path?.hash_size (B.get h0 p 0) = hsz /\
V.size_of (phashes h0 p) <= lv + 1ul))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (path_loc p) h0 h1 /\
path_safe h1 mtr p /\
V.size_of (phashes h1 p) ==
V.size_of (phashes h0 p) + mt_path_length lv k j actd /\
// correctness
(mt_safe_elts_spec h0 lv hs i j;
(let hsz0 = Path?.hash_size (B.get h0 p 0) in
let hsz1 = Path?.hash_size (B.get h1 p 0) in
let before:(S.seq (MTH.hash #(U32.v hsz0))) = lift_path h0 mtr p in
let after:(S.seq (MTH.hash #(U32.v hsz1))) = lift_path h1 mtr p in
hsz = hsz0 /\ hsz = hsz1 /\
S.equal after
(MTH.mt_get_path_ (U32.v lv) (RV.as_seq h0 hs) (RV.as_seq h0 rhs)
(U32.v i) (U32.v j) (U32.v k) before actd)))))
(decreases (32 - U32.v lv))
#push-options "--z3rlimit 300 --initial_fuel 1 --max_fuel 1 --max_ifuel 2 --initial_ifuel 2"
let rec mt_get_path_ #hsz lv mtr hs rhs i j k p actd =
let hh0 = HST.get () in
mt_safe_elts_spec hh0 lv hs i j;
let ofs = offset_of i in
if j = 0ul then ()
else
(mt_make_path_step lv mtr hs rhs i j k p actd;
let hh1 = HST.get () in
mt_safe_elts_spec hh0 lv hs i j;
assert (S.equal (lift_path hh1 mtr p)
(MTH.mt_make_path_step
(U32.v lv) (RV.as_seq hh0 hs) (RV.as_seq hh0 rhs)
(U32.v i) (U32.v j) (U32.v k)
(lift_path hh0 mtr p) actd));
RV.rv_inv_preserved hs (path_loc p) hh0 hh1;
RV.rv_inv_preserved rhs (path_loc p) hh0 hh1;
RV.as_seq_preserved hs (path_loc p) hh0 hh1;
RV.as_seq_preserved rhs (path_loc p) hh0 hh1;
V.loc_vector_within_included hs lv (V.size_of hs);
mt_safe_elts_preserved lv hs i j (path_loc p) hh0 hh1;
assert (mt_safe_elts hh1 lv hs i j);
mt_safe_elts_rec hh1 lv hs i j;
mt_safe_elts_spec hh1 (lv + 1ul) hs (i / 2ul) (j / 2ul);
mt_get_path_ (lv + 1ul) mtr hs rhs (i / 2ul) (j / 2ul) (k / 2ul) p
(if j % 2ul = 0ul then actd else true);
let hh2 = HST.get () in
assert (S.equal (lift_path hh2 mtr p)
(MTH.mt_get_path_ (U32.v lv + 1)
(RV.as_seq hh1 hs) (RV.as_seq hh1 rhs)
(U32.v i / 2) (U32.v j / 2) (U32.v k / 2)
(lift_path hh1 mtr p)
(if U32.v j % 2 = 0 then actd else true)));
assert (S.equal (lift_path hh2 mtr p)
(MTH.mt_get_path_ (U32.v lv)
(RV.as_seq hh0 hs) (RV.as_seq hh0 rhs)
(U32.v i) (U32.v j) (U32.v k)
(lift_path hh0 mtr p) actd)))
#pop-options
private inline_for_extraction
val mt_get_path_pre_nst:
mtv:merkle_tree ->
idx:offset_t ->
p:path ->
root:(hash #(MT?.hash_size mtv)) ->
Tot bool
let mt_get_path_pre_nst mtv idx p root =
offsets_connect (MT?.offset mtv) idx &&
Path?.hash_size p = MT?.hash_size mtv &&
([@inline_let] let idx = split_offset (MT?.offset mtv) idx in
MT?.i mtv <= idx && idx < MT?.j mtv &&
V.size_of (Path?.hashes p) = 0ul)
val mt_get_path_pre:
#hsz:Ghost.erased hash_size_t ->
mt:const_mt_p ->
idx:offset_t ->
p:const_path_p ->
root:hash #hsz ->
HST.ST bool
(requires (fun h0 ->
let mt = CB.cast mt in
let p = CB.cast p in
let dmt = B.get h0 mt 0 in
let dp = B.get h0 p 0 in
MT?.hash_size dmt = (Ghost.reveal hsz) /\
Path?.hash_size dp = (Ghost.reveal hsz) /\
mt_safe h0 mt /\
path_safe h0 (B.frameOf mt) p /\
Rgl?.r_inv (hreg hsz) h0 root /\
HH.disjoint (B.frameOf root) (B.frameOf mt) /\
HH.disjoint (B.frameOf root) (B.frameOf p)))
(ensures (fun _ _ _ -> True))
let mt_get_path_pre #_ mt idx p root =
let mt = CB.cast mt in
let p = CB.cast p in
let mtv = !*mt in
mt_get_path_pre_nst mtv idx !*p root
val mt_get_path_loc_union_helper:
l1:loc -> l2:loc ->
Lemma (loc_union (loc_union l1 l2) l2 == loc_union l1 l2)
let mt_get_path_loc_union_helper l1 l2 = ()
// Construct a Merkle path for a given index `idx`, hashes `mt.hs`, and rightmost
// hashes `mt.rhs`. Note that this operation copies "pointers" into the Merkle tree
// to the output path.
#push-options "--z3rlimit 60"
val mt_get_path:
#hsz:Ghost.erased hash_size_t ->
mt:const_mt_p ->
idx:offset_t ->
p:path_p ->
root:hash #hsz ->
HST.ST index_t
(requires (fun h0 ->
let mt = CB.cast mt in
let dmt = B.get h0 mt 0 in
MT?.hash_size dmt = Ghost.reveal hsz /\
Path?.hash_size (B.get h0 p 0) = Ghost.reveal hsz /\
mt_get_path_pre_nst (B.get h0 mt 0) idx (B.get h0 p 0) root /\
mt_safe h0 mt /\
path_safe h0 (B.frameOf mt) p /\
Rgl?.r_inv (hreg hsz) h0 root /\
HH.disjoint (B.frameOf root) (B.frameOf mt) /\
HH.disjoint (B.frameOf root) (B.frameOf p)))
(ensures (fun h0 _ h1 ->
let mt = CB.cast mt in
let mtv0 = B.get h0 mt 0 in
let mtv1 = B.get h1 mt 0 in
let idx = split_offset (MT?.offset mtv0) idx in
MT?.hash_size mtv0 = Ghost.reveal hsz /\
MT?.hash_size mtv1 = Ghost.reveal hsz /\
Path?.hash_size (B.get h0 p 0) = Ghost.reveal hsz /\
Path?.hash_size (B.get h1 p 0) = Ghost.reveal hsz /\
// memory safety
modifies (loc_union
(loc_union
(mt_loc mt)
(B.loc_all_regions_from false (B.frameOf root)))
(path_loc p))
h0 h1 /\
mt_safe h1 mt /\
path_safe h1 (B.frameOf mt) p /\
Rgl?.r_inv (hreg hsz) h1 root /\
V.size_of (phashes h1 p) ==
1ul + mt_path_length 0ul idx (MT?.j mtv0) false /\
// correctness
(let sj, sp, srt =
MTH.mt_get_path
(mt_lift h0 mt) (U32.v idx) (Rgl?.r_repr (hreg hsz) h0 root) in
sj == U32.v (MT?.j mtv1) /\
S.equal sp (lift_path #hsz h1 (B.frameOf mt) p) /\
srt == Rgl?.r_repr (hreg hsz) h1 root)))
#pop-options
#push-options "--z3rlimit 300 --initial_fuel 1 --max_fuel 1"
let mt_get_path #hsz mt idx p root =
let ncmt = CB.cast mt in
let mtframe = B.frameOf ncmt in
let hh0 = HST.get () in
mt_get_root mt root;
let mtv = !*ncmt in
let hsz = MT?.hash_size mtv in
let hh1 = HST.get () in
path_safe_init_preserved mtframe p
(B.loc_union (mt_loc ncmt)
(B.loc_all_regions_from false (B.frameOf root)))
hh0 hh1;
assert (MTH.mt_get_root (mt_lift hh0 ncmt) (Rgl?.r_repr (hreg hsz) hh0 root) ==
(mt_lift hh1 ncmt, Rgl?.r_repr (hreg hsz) hh1 root));
assert (S.equal (lift_path #hsz hh1 mtframe p) S.empty);
let idx = split_offset (MT?.offset mtv) idx in
let i = MT?.i mtv in
let ofs = offset_of (MT?.i mtv) in
let j = MT?.j mtv in
let hs = MT?.hs mtv in
let rhs = MT?.rhs mtv in
assert (mt_safe_elts hh1 0ul hs i j);
assert (V.size_of (V.get hh1 hs 0ul) == j - ofs);
assert (idx < j);
hash_vv_rv_inv_includes hh1 hs 0ul (idx - ofs);
hash_vv_rv_inv_r_inv hh1 hs 0ul (idx - ofs);
hash_vv_as_seq_get_index hh1 hs 0ul (idx - ofs);
let ih = V.index (V.index hs 0ul) (idx - ofs) in
mt_path_insert #hsz mtframe p ih;
let hh2 = HST.get () in
assert (S.equal (lift_path hh2 mtframe p)
(MTH.path_insert
(lift_path hh1 mtframe p)
(S.index (S.index (RV.as_seq hh1 hs) 0) (U32.v idx - U32.v ofs))));
Rgl?.r_sep (hreg hsz) root (path_loc p) hh1 hh2;
mt_safe_preserved ncmt (path_loc p) hh1 hh2;
mt_preserved ncmt (path_loc p) hh1 hh2;
assert (V.size_of (phashes hh2 p) == 1ul);
mt_get_path_ 0ul mtframe hs rhs i j idx p false;
let hh3 = HST.get () in
// memory safety
mt_get_path_loc_union_helper
(loc_union (mt_loc ncmt)
(B.loc_all_regions_from false (B.frameOf root)))
(path_loc p);
Rgl?.r_sep (hreg hsz) root (path_loc p) hh2 hh3;
mt_safe_preserved ncmt (path_loc p) hh2 hh3;
mt_preserved ncmt (path_loc p) hh2 hh3;
assert (V.size_of (phashes hh3 p) ==
1ul + mt_path_length 0ul idx (MT?.j (B.get hh0 ncmt 0)) false);
assert (S.length (lift_path #hsz hh3 mtframe p) ==
S.length (lift_path #hsz hh2 mtframe p) +
MTH.mt_path_length (U32.v idx) (U32.v (MT?.j (B.get hh0 ncmt 0))) false);
assert (modifies (loc_union
(loc_union
(mt_loc ncmt)
(B.loc_all_regions_from false (B.frameOf root)))
(path_loc p))
hh0 hh3);
assert (mt_safe hh3 ncmt);
assert (path_safe hh3 mtframe p);
assert (Rgl?.r_inv (hreg hsz) hh3 root);
assert (V.size_of (phashes hh3 p) ==
1ul + mt_path_length 0ul idx (MT?.j (B.get hh0 ncmt 0)) false);
// correctness
mt_safe_elts_spec hh2 0ul hs i j;
assert (S.equal (lift_path hh3 mtframe p)
(MTH.mt_get_path_ 0 (RV.as_seq hh2 hs) (RV.as_seq hh2 rhs)
(U32.v i) (U32.v j) (U32.v idx)
(lift_path hh2 mtframe p) false));
assert (MTH.mt_get_path
(mt_lift hh0 ncmt) (U32.v idx) (Rgl?.r_repr (hreg hsz) hh0 root) ==
(U32.v (MT?.j (B.get hh3 ncmt 0)),
lift_path hh3 mtframe p,
Rgl?.r_repr (hreg hsz) hh3 root));
j
#pop-options
/// Flushing
private val
mt_flush_to_modifies_rec_helper:
#hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
h:HS.mem ->
Lemma (loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))) ==
loc_union
(RV.rv_loc_elems h hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
#push-options "--initial_fuel 2 --max_fuel 2"
let mt_flush_to_modifies_rec_helper #hsz lv hs h =
assert (V.loc_vector_within hs lv (V.size_of hs) ==
loc_union (V.loc_vector_within hs lv (lv + 1ul))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)));
RV.rs_loc_elems_rec_inverse (hvreg hsz) (V.as_seq h hs) (U32.v lv) (U32.v (V.size_of hs));
assert (RV.rv_loc_elems h hs lv (V.size_of hs) ==
loc_union (RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs)));
loc_union_assoc_4
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))
#pop-options
private
val mt_flush_to_:
hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
pi:index_t ->
i:index_t{i >= pi} ->
j:Ghost.erased index_t{
Ghost.reveal j >= i &&
U32.v (Ghost.reveal j) < pow2 (32 - U32.v lv)} ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\
mt_safe_elts h0 lv hs pi (Ghost.reveal j)))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(RV.rv_loc_elems h0 hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
h0 h1 /\
RV.rv_inv h1 hs /\
mt_safe_elts h1 lv hs i (Ghost.reveal j) /\
// correctness
(mt_safe_elts_spec h0 lv hs pi (Ghost.reveal j);
S.equal (RV.as_seq h1 hs)
(MTH.mt_flush_to_
(U32.v lv) (RV.as_seq h0 hs) (U32.v pi)
(U32.v i) (U32.v (Ghost.reveal j))))))
(decreases (U32.v i))
#restart-solver
#push-options "--z3rlimit 1500 --fuel 1 --ifuel 0"
let rec mt_flush_to_ hsz lv hs pi i j =
let hh0 = HST.get () in
// Base conditions
mt_safe_elts_rec hh0 lv hs pi (Ghost.reveal j);
V.loc_vector_within_included hs 0ul lv;
V.loc_vector_within_included hs lv (lv + 1ul);
V.loc_vector_within_included hs (lv + 1ul) (V.size_of hs);
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
let oi = offset_of i in
let opi = offset_of pi in
if oi = opi then mt_safe_elts_spec hh0 lv hs pi (Ghost.reveal j)
else begin
/// 1) Flush hashes at the level `lv`, where the new vector is
/// not yet connected to `hs`.
let ofs = oi - opi in
let hvec = V.index hs lv in
let flushed:(rvector (hreg hsz)) = rv_flush_inplace hvec ofs in
let hh1 = HST.get () in
// 1-0) Basic disjointness conditions for `RV.assign`
V.forall2_forall_left hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.forall2_forall_right hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.forall_preserved
hs 0ul lv
(fun b -> HH.disjoint (Rgl?.region_of (hvreg hsz) hvec)
(Rgl?.region_of (hvreg hsz) b))
(RV.loc_rvector hvec)
hh0 hh1;
V.forall_preserved
hs (lv + 1ul) (V.size_of hs)
(fun b -> HH.disjoint (Rgl?.region_of (hvreg hsz) hvec)
(Rgl?.region_of (hvreg hsz) b))
(RV.loc_rvector hvec)
hh0 hh1;
assert (Rgl?.region_of (hvreg hsz) hvec == Rgl?.region_of (hvreg hsz) flushed);
// 1-1) For the `modifies` postcondition.
assert (modifies (RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv)) hh0 hh1);
// 1-2) Preservation
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
// 1-3) For `mt_safe_elts`
assert (V.size_of flushed == Ghost.reveal j - offset_of i); // head updated
mt_safe_elts_preserved
(lv + 1ul) hs (pi / 2ul) (Ghost.reveal j / 2ul)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1; // tail not yet
// 1-4) For the `rv_inv` postcondition
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs)) 0 (U32.v lv) (U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v lv);
RV.rv_elems_inv_preserved
hs 0ul lv (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs 0ul lv);
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs))
(U32.v lv + 1) (U32.v (V.size_of hs))
(U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
(U32.v lv + 1) (U32.v (V.size_of hs));
RV.rv_elems_inv_preserved
hs (lv + 1ul) (V.size_of hs) (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs (lv + 1ul) (V.size_of hs));
assert (rv_itself_inv hh1 hs);
assert (elems_reg hh1 hs);
// 1-5) Correctness
assert (S.equal (RV.as_seq hh1 flushed)
(S.slice (RV.as_seq hh0 (V.get hh0 hs lv)) (U32.v ofs)
(S.length (RV.as_seq hh0 (V.get hh0 hs lv)))));
/// 2) Assign the flushed vector to `hs` at the level `lv`.
RV.assign hs lv flushed;
let hh2 = HST.get () in
// 2-1) For the `modifies` postcondition.
assert (modifies (V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2);
assert (modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))) hh0 hh2);
// 2-2) Preservation
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-3) For `mt_safe_elts`
assert (V.size_of (V.get hh2 hs lv) ==
Ghost.reveal j - offset_of i);
mt_safe_elts_preserved
(lv + 1ul) hs (pi / 2ul) (Ghost.reveal j / 2ul)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-4) Correctness
RV.as_seq_sub_preserved hs 0ul lv (loc_rvector flushed) hh0 hh1;
RV.as_seq_sub_preserved hs (lv + 1ul) merkle_tree_size_lg (loc_rvector flushed) hh0 hh1;
assert (S.equal (RV.as_seq hh2 hs)
(S.append
(RV.as_seq_sub hh0 hs 0ul lv)
(S.cons (RV.as_seq hh1 flushed)
(RV.as_seq_sub hh0 hs (lv + 1ul) merkle_tree_size_lg))));
as_seq_sub_upd hh0 hs lv (RV.as_seq hh1 flushed);
// if `lv = 31` then `pi <= i <= j < 2` thus `oi = opi`,
// contradicting the branch.
assert (lv + 1ul < merkle_tree_size_lg);
assert (U32.v (Ghost.reveal j / 2ul) < pow2 (32 - U32.v (lv + 1ul)));
assert (RV.rv_inv hh2 hs);
assert (mt_safe_elts hh2 (lv + 1ul) hs (pi / 2ul) (Ghost.reveal j / 2ul));
/// 3) Recursion
mt_flush_to_ hsz (lv + 1ul) hs (pi / 2ul) (i / 2ul)
(Ghost.hide (Ghost.reveal j / 2ul));
let hh3 = HST.get () in
// 3-0) Memory safety brought from the postcondition of the recursion
assert (modifies
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))))
hh0 hh3);
mt_flush_to_modifies_rec_helper lv hs hh0;
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
V.loc_vector_within_included hs lv (lv + 1ul);
RV.rv_loc_elems_included hh2 hs (lv + 1ul) (V.size_of hs);
assert (loc_disjoint
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs)));
V.get_preserved hs lv
(loc_union
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
hh2 hh3;
assert (V.size_of (V.get hh3 hs lv) ==
Ghost.reveal j - offset_of i);
assert (RV.rv_inv hh3 hs);
mt_safe_elts_constr hh3 lv hs i (Ghost.reveal j);
assert (mt_safe_elts hh3 lv hs i (Ghost.reveal j));
// 3-1) Correctness
mt_safe_elts_spec hh2 (lv + 1ul) hs (pi / 2ul) (Ghost.reveal j / 2ul);
assert (S.equal (RV.as_seq hh3 hs)
(MTH.mt_flush_to_ (U32.v lv + 1) (RV.as_seq hh2 hs)
(U32.v pi / 2) (U32.v i / 2) (U32.v (Ghost.reveal j) / 2)));
mt_safe_elts_spec hh0 lv hs pi (Ghost.reveal j);
MTH.mt_flush_to_rec
(U32.v lv) (RV.as_seq hh0 hs)
(U32.v pi) (U32.v i) (U32.v (Ghost.reveal j));
assert (S.equal (RV.as_seq hh3 hs)
(MTH.mt_flush_to_ (U32.v lv) (RV.as_seq hh0 hs)
(U32.v pi) (U32.v i) (U32.v (Ghost.reveal j))))
end
#pop-options
// `mt_flush_to` flushes old hashes in the Merkle tree. It removes hash elements
// from `MT?.i` to **`offset_of (idx - 1)`**, but maintains the tree structure,
// i.e., the tree still holds some old internal hashes (compressed from old
// hashes) which are required to generate Merkle paths for remaining hashes.
//
// Note that `mt_flush_to` (and `mt_flush`) always remain at least one base hash
// elements. If there are `MT?.j` number of elements in the tree, because of the
// precondition `MT?.i <= idx < MT?.j` we still have `idx`-th element after
// flushing.
private inline_for_extraction
val mt_flush_to_pre_nst: mtv:merkle_tree -> idx:offset_t -> Tot bool | false | true | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mt_flush_to_pre_nst: mtv:merkle_tree -> idx:offset_t -> Tot bool | [] | MerkleTree.Low.mt_flush_to_pre_nst | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} | mtv: MerkleTree.Low.merkle_tree -> idx: MerkleTree.Low.offset_t -> Prims.bool | {
"end_col": 19,
"end_line": 2423,
"start_col": 2,
"start_line": 2420
} |
Prims.GTot | val path_loc: path_p -> GTot loc | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let path_loc p = B.loc_all_regions_from false (B.frameOf p) | val path_loc: path_p -> GTot loc
let path_loc p = | false | null | false | B.loc_all_regions_from false (B.frameOf p) | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"sometrivial"
] | [
"MerkleTree.Low.path_p",
"LowStar.Monotonic.Buffer.loc_all_regions_from",
"LowStar.Monotonic.Buffer.frameOf",
"MerkleTree.Low.path",
"LowStar.Buffer.trivial_preorder",
"LowStar.Monotonic.Buffer.loc"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction
let uint32_max = 4294967295UL
let uint64_max = 18446744073709551615UL
let offset_range_limit = uint32_max
type offset_t = uint64_t
inline_for_extraction noextract unfold let u32_64 = Int.Cast.uint32_to_uint64
inline_for_extraction noextract unfold let u64_32 = Int.Cast.uint64_to_uint32
private inline_for_extraction
let offsets_connect (x:offset_t) (y:offset_t): Tot bool = y >= x && (y - x) <= offset_range_limit
private inline_for_extraction
let split_offset (tree:offset_t) (index:offset_t{offsets_connect tree index}): Tot index_t =
[@inline_let] let diff = U64.sub_mod index tree in
assert (diff <= offset_range_limit);
Int.Cast.uint64_to_uint32 diff
private inline_for_extraction
let add64_fits (x:offset_t) (i:index_t): Tot bool = uint64_max - x >= (u32_64 i)
private inline_for_extraction
let join_offset (tree:offset_t) (i:index_t{add64_fits tree i}): Tot (r:offset_t{offsets_connect tree r}) =
U64.add tree (u32_64 i)
inline_for_extraction val merkle_tree_size_lg: uint32_t
let merkle_tree_size_lg = 32ul
// A Merkle tree `MT i j hs rhs_ok rhs` stores all necessary hashes to generate
// a Merkle path for each element from the index `i` to `j-1`.
// - Parameters
// `hs`: a 2-dim store for hashes, where `hs[0]` contains leaf hash values.
// `rhs_ok`: to check the rightmost hashes are up-to-date
// `rhs`: a store for "rightmost" hashes, manipulated only when required to
// calculate some merkle paths that need the rightmost hashes
// as a part of them.
// `mroot`: during the construction of `rhs` we can also calculate the Merkle
// root of the tree. If `rhs_ok` is true then it has the up-to-date
// root value.
noeq type merkle_tree =
| MT: hash_size:hash_size_t ->
offset:offset_t ->
i:index_t -> j:index_t{i <= j /\ add64_fits offset j} ->
hs:hash_vv hash_size {V.size_of hs = merkle_tree_size_lg} ->
rhs_ok:bool ->
rhs:hash_vec #hash_size {V.size_of rhs = merkle_tree_size_lg} ->
mroot:hash #hash_size ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
merkle_tree
type mt_p = B.pointer merkle_tree
type const_mt_p = const_pointer merkle_tree
inline_for_extraction
let merkle_tree_conditions (#hsz:Ghost.erased hash_size_t) (offset:uint64_t) (i j:uint32_t) (hs:hash_vv hsz) (rhs_ok:bool) (rhs:hash_vec #hsz) (mroot:hash #hsz): Tot bool =
j >= i && add64_fits offset j &&
V.size_of hs = merkle_tree_size_lg &&
V.size_of rhs = merkle_tree_size_lg
// The maximum number of currently held elements in the tree is (2^32 - 1).
// cwinter: even when using 64-bit indices, we fail if the underlying 32-bit
// vector is full; this can be fixed if necessary.
private inline_for_extraction
val mt_not_full_nst: mtv:merkle_tree -> Tot bool
let mt_not_full_nst mtv = MT?.j mtv < uint32_32_max
val mt_not_full: HS.mem -> mt_p -> GTot bool
let mt_not_full h mt = mt_not_full_nst (B.get h mt 0)
/// (Memory) Safety
val offset_of: i:index_t -> Tot index_t
let offset_of i = if i % 2ul = 0ul then i else i - 1ul
// `mt_safe_elts` says that it is safe to access an element from `i` to `j - 1`
// at level `lv` in the Merkle tree, i.e., hs[lv][k] (i <= k < j) is a valid
// element.
inline_for_extraction noextract
val mt_safe_elts:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
GTot Type0 (decreases (32 - U32.v lv))
let rec mt_safe_elts #hsz h lv hs i j =
if lv = merkle_tree_size_lg then true
else (let ofs = offset_of i in
V.size_of (V.get h hs lv) == j - ofs /\
mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul))
#push-options "--initial_fuel 1 --max_fuel 1"
val mt_safe_elts_constr:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (V.size_of (V.get h hs lv) == j - offset_of i /\
mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)))
(ensures (mt_safe_elts #hsz h lv hs i j))
let mt_safe_elts_constr #_ h lv hs i j = ()
val mt_safe_elts_head:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (mt_safe_elts #hsz h lv hs i j))
(ensures (V.size_of (V.get h hs lv) == j - offset_of i))
let mt_safe_elts_head #_ h lv hs i j = ()
val mt_safe_elts_rec:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (mt_safe_elts #hsz h lv hs i j))
(ensures (mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)))
let mt_safe_elts_rec #_ h lv hs i j = ()
val mt_safe_elts_init:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
Lemma (requires (V.forall_ h hs lv (V.size_of hs)
(fun hv -> V.size_of hv = 0ul)))
(ensures (mt_safe_elts #hsz h lv hs 0ul 0ul))
(decreases (32 - U32.v lv))
let rec mt_safe_elts_init #hsz h lv hs =
if lv = merkle_tree_size_lg then ()
else mt_safe_elts_init #hsz h (lv + 1ul) hs
#pop-options
val mt_safe_elts_preserved:
#hsz:hash_size_t ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 hs /\
mt_safe_elts #hsz h0 lv hs i j /\
loc_disjoint p (V.loc_vector_within hs lv (V.size_of hs)) /\
modifies p h0 h1))
(ensures (mt_safe_elts #hsz h1 lv hs i j))
(decreases (32 - U32.v lv))
[SMTPat (V.live h0 hs);
SMTPat (mt_safe_elts #hsz h0 lv hs i j);
SMTPat (loc_disjoint p (RV.loc_rvector hs));
SMTPat (modifies p h0 h1)]
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let rec mt_safe_elts_preserved #hsz lv hs i j p h0 h1 =
if lv = merkle_tree_size_lg then ()
else (V.get_preserved hs lv p h0 h1;
mt_safe_elts_preserved #hsz (lv + 1ul) hs (i / 2ul) (j / 2ul) p h0 h1)
#pop-options
// `mt_safe` is the invariant of a Merkle tree through its lifetime.
// It includes liveness, regionality, disjointness (to each data structure),
// and valid element access (`mt_safe_elts`).
inline_for_extraction noextract
val mt_safe: HS.mem -> mt_p -> GTot Type0
let mt_safe h mt =
B.live h mt /\ B.freeable mt /\
(let mtv = B.get h mt 0 in
// Liveness & Accessibility
RV.rv_inv h (MT?.hs mtv) /\
RV.rv_inv h (MT?.rhs mtv) /\
Rgl?.r_inv (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv) /\
mt_safe_elts h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv) /\
// Regionality
HH.extends (V.frameOf (MT?.hs mtv)) (B.frameOf mt) /\
HH.extends (V.frameOf (MT?.rhs mtv)) (B.frameOf mt) /\
HH.extends (B.frameOf (MT?.mroot mtv)) (B.frameOf mt) /\
HH.disjoint (V.frameOf (MT?.hs mtv)) (V.frameOf (MT?.rhs mtv)) /\
HH.disjoint (V.frameOf (MT?.hs mtv)) (B.frameOf (MT?.mroot mtv)) /\
HH.disjoint (V.frameOf (MT?.rhs mtv)) (B.frameOf (MT?.mroot mtv)))
// Since a Merkle tree satisfies regionality, it's ok to take all regions from
// a tree pointer as a location of the tree.
val mt_loc: mt_p -> GTot loc
let mt_loc mt = B.loc_all_regions_from false (B.frameOf mt)
val mt_safe_preserved:
mt:mt_p -> p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (mt_safe h0 mt /\
loc_disjoint p (mt_loc mt) /\
modifies p h0 h1))
(ensures (B.get h0 mt 0 == B.get h1 mt 0 /\
mt_safe h1 mt))
let mt_safe_preserved mt p h0 h1 =
assert (loc_includes (mt_loc mt) (B.loc_buffer mt));
let mtv = B.get h0 mt 0 in
assert (loc_includes (mt_loc mt) (RV.loc_rvector (MT?.hs mtv)));
assert (loc_includes (mt_loc mt) (RV.loc_rvector (MT?.rhs mtv)));
assert (loc_includes (mt_loc mt) (V.loc_vector (MT?.hs mtv)));
assert (loc_includes (mt_loc mt)
(B.loc_all_regions_from false (B.frameOf (MT?.mroot mtv))));
RV.rv_inv_preserved (MT?.hs mtv) p h0 h1;
RV.rv_inv_preserved (MT?.rhs mtv) p h0 h1;
Rgl?.r_sep (hreg (MT?.hash_size mtv)) (MT?.mroot mtv) p h0 h1;
V.loc_vector_within_included (MT?.hs mtv) 0ul (V.size_of (MT?.hs mtv));
mt_safe_elts_preserved 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv) p h0 h1
/// Lifting to a high-level Merkle tree structure
val mt_safe_elts_spec:
#hsz:hash_size_t ->
h:HS.mem ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t ->
j:index_t{j >= i} ->
Lemma (requires (RV.rv_inv h hs /\
mt_safe_elts #hsz h lv hs i j))
(ensures (MTH.hs_wf_elts #(U32.v hsz)
(U32.v lv) (RV.as_seq h hs)
(U32.v i) (U32.v j)))
(decreases (32 - U32.v lv))
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let rec mt_safe_elts_spec #_ h lv hs i j =
if lv = merkle_tree_size_lg then ()
else mt_safe_elts_spec h (lv + 1ul) hs (i / 2ul) (j / 2ul)
#pop-options
val merkle_tree_lift:
h:HS.mem ->
mtv:merkle_tree{
RV.rv_inv h (MT?.hs mtv) /\
RV.rv_inv h (MT?.rhs mtv) /\
Rgl?.r_inv (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv) /\
mt_safe_elts #(MT?.hash_size mtv) h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv)} ->
GTot (r:MTH.merkle_tree #(U32.v (MT?.hash_size mtv)) {MTH.mt_wf_elts #_ r})
let merkle_tree_lift h mtv =
mt_safe_elts_spec h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv);
MTH.MT #(U32.v (MT?.hash_size mtv))
(U32.v (MT?.i mtv))
(U32.v (MT?.j mtv))
(RV.as_seq h (MT?.hs mtv))
(MT?.rhs_ok mtv)
(RV.as_seq h (MT?.rhs mtv))
(Rgl?.r_repr (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv))
(Ghost.reveal (MT?.hash_spec mtv))
val mt_lift:
h:HS.mem -> mt:mt_p{mt_safe h mt} ->
GTot (r:MTH.merkle_tree #(U32.v (MT?.hash_size (B.get h mt 0))) {MTH.mt_wf_elts #_ r})
let mt_lift h mt =
merkle_tree_lift h (B.get h mt 0)
val mt_preserved:
mt:mt_p -> p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (mt_safe h0 mt /\
loc_disjoint p (mt_loc mt) /\
modifies p h0 h1))
(ensures (mt_safe_preserved mt p h0 h1;
mt_lift h0 mt == mt_lift h1 mt))
let mt_preserved mt p h0 h1 =
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(B.loc_buffer mt));
B.modifies_buffer_elim mt p h0 h1;
assert (B.get h0 mt 0 == B.get h1 mt 0);
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(RV.loc_rvector (MT?.hs (B.get h0 mt 0))));
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(RV.loc_rvector (MT?.rhs (B.get h0 mt 0))));
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(B.loc_buffer (MT?.mroot (B.get h0 mt 0))));
RV.as_seq_preserved (MT?.hs (B.get h0 mt 0)) p h0 h1;
RV.as_seq_preserved (MT?.rhs (B.get h0 mt 0)) p h0 h1;
B.modifies_buffer_elim (MT?.mroot (B.get h0 mt 0)) p h0 h1
/// Construction
// Note that the public function for creation is `mt_create` defined below,
// which builds a tree with an initial hash.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
private
val create_empty_mt:
hash_size:hash_size_t ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
r:HST.erid ->
HST.ST mt_p
(requires (fun _ -> true))
(ensures (fun h0 mt h1 ->
let dmt = B.get h1 mt 0 in
// memory safety
B.frameOf mt = r /\
modifies (mt_loc mt) h0 h1 /\
mt_safe h1 mt /\
mt_not_full h1 mt /\
// correctness
MT?.hash_size dmt = hash_size /\
MT?.offset dmt = 0UL /\
merkle_tree_lift h1 dmt == MTH.create_empty_mt #_ #(Ghost.reveal hash_spec) ()))
let create_empty_mt hsz hash_spec hash_fun r =
[@inline_let] let hrg = hreg hsz in
[@inline_let] let hvrg = hvreg hsz in
[@inline_let] let hvvrg = hvvreg hsz in
let hs_region = HST.new_region r in
let hs = RV.alloc_rid hvrg merkle_tree_size_lg hs_region in
let h0 = HST.get () in
mt_safe_elts_init #hsz h0 0ul hs;
let rhs_region = HST.new_region r in
let rhs = RV.alloc_rid hrg merkle_tree_size_lg rhs_region in
let h1 = HST.get () in
assert (RV.as_seq h1 rhs == S.create 32 (MTH.hash_init #(U32.v hsz)));
RV.rv_inv_preserved hs (V.loc_vector rhs) h0 h1;
RV.as_seq_preserved hs (V.loc_vector rhs) h0 h1;
V.loc_vector_within_included hs 0ul (V.size_of hs);
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul (V.loc_vector rhs) h0 h1;
let mroot_region = HST.new_region r in
let mroot = rg_alloc hrg mroot_region in
let h2 = HST.get () in
RV.as_seq_preserved hs loc_none h1 h2;
RV.as_seq_preserved rhs loc_none h1 h2;
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul loc_none h1 h2;
let mt = B.malloc r (MT hsz 0UL 0ul 0ul hs false rhs mroot hash_spec hash_fun) 1ul in
let h3 = HST.get () in
RV.as_seq_preserved hs loc_none h2 h3;
RV.as_seq_preserved rhs loc_none h2 h3;
Rgl?.r_sep hrg mroot loc_none h2 h3;
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul loc_none h2 h3;
mt
#pop-options
/// Destruction (free)
val mt_free: mt:mt_p ->
HST.ST unit
(requires (fun h0 -> mt_safe h0 mt))
(ensures (fun h0 _ h1 -> modifies (mt_loc mt) h0 h1))
#push-options "--z3rlimit 100"
let mt_free mt =
let mtv = !*mt in
RV.free (MT?.hs mtv);
RV.free (MT?.rhs mtv);
[@inline_let] let rg = hreg (MT?.hash_size mtv) in
rg_free rg (MT?.mroot mtv);
B.free mt
#pop-options
/// Insertion
private
val as_seq_sub_upd:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector #a #rst rg ->
i:uint32_t{i < V.size_of rv} -> v:Rgl?.repr rg ->
Lemma (requires (RV.rv_inv h rv))
(ensures (S.equal (S.upd (RV.as_seq h rv) (U32.v i) v)
(S.append
(RV.as_seq_sub h rv 0ul i)
(S.cons v (RV.as_seq_sub h rv (i + 1ul) (V.size_of rv))))))
#push-options "--z3rlimit 20"
let as_seq_sub_upd #a #rst #rg h rv i v =
Seq.Properties.slice_upd (RV.as_seq h rv) 0 (U32.v i) (U32.v i) v;
Seq.Properties.slice_upd (RV.as_seq h rv) (U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i) v;
RV.as_seq_seq_slice rg h (V.as_seq h rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i);
assert (S.equal (S.slice (RV.as_seq h rv) 0 (U32.v i))
(RV.as_seq_sub h rv 0ul i));
RV.as_seq_seq_slice rg h (V.as_seq h rv)
0 (U32.v (V.size_of rv)) (U32.v i + 1) (U32.v (V.size_of rv));
assert (S.equal (S.slice (RV.as_seq h rv) (U32.v i + 1) (U32.v (V.size_of rv)))
(RV.as_seq_sub h rv (i + 1ul) (V.size_of rv)));
assert (S.index (S.upd (RV.as_seq h rv) (U32.v i) v) (U32.v i) == v)
#pop-options
// `hash_vv_insert_copy` inserts a hash element at a level `lv`, by copying
// and pushing its content to `hs[lv]`. For detailed insertion procedure, see
// `insert_` and `mt_insert`.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1"
private
inline_for_extraction
val hash_vv_insert_copy:
#hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:Ghost.erased index_t ->
j:index_t{
Ghost.reveal i <= j &&
U32.v j < pow2 (32 - U32.v lv) - 1 &&
j < uint32_32_max} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
v:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\
Rgl?.r_inv (hreg hsz) h0 v /\
HH.disjoint (V.frameOf hs) (B.frameOf v) /\
mt_safe_elts #hsz h0 lv hs (Ghost.reveal i) j))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
h0 h1 /\
RV.rv_inv h1 hs /\
Rgl?.r_inv (hreg hsz) h1 v /\
V.size_of (V.get h1 hs lv) == j + 1ul - offset_of (Ghost.reveal i) /\
V.size_of (V.get h1 hs lv) == V.size_of (V.get h0 hs lv) + 1ul /\
mt_safe_elts #hsz h1 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul) /\
RV.rv_loc_elems h0 hs (lv + 1ul) (V.size_of hs) ==
RV.rv_loc_elems h1 hs (lv + 1ul) (V.size_of hs) /\
// correctness
(mt_safe_elts_spec #hsz h0 lv hs (Ghost.reveal i) j;
S.equal (RV.as_seq h1 hs)
(MTH.hashess_insert
(U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq h0 hs) (Rgl?.r_repr (hreg hsz) h0 v))) /\
S.equal (S.index (RV.as_seq h1 hs) (U32.v lv))
(S.snoc (S.index (RV.as_seq h0 hs) (U32.v lv))
(Rgl?.r_repr (hreg hsz) h0 v))))
let hash_vv_insert_copy #hsz lv i j hs v =
let hh0 = HST.get () in
mt_safe_elts_rec hh0 lv hs (Ghost.reveal i) j;
/// 1) Insert an element at the level `lv`, where the new vector is not yet
/// connected to `hs`.
let ihv = RV.insert_copy (hcpy hsz) (V.index hs lv) v in
let hh1 = HST.get () in
// 1-0) Basic disjointness conditions
V.forall2_forall_left hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.forall2_forall_right hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.loc_vector_within_included hs lv (lv + 1ul);
V.loc_vector_within_included hs (lv + 1ul) (V.size_of hs);
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
// 1-1) For the `modifies` postcondition.
assert (modifies (RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv)) hh0 hh1);
// 1-2) Preservation
Rgl?.r_sep (hreg hsz) v (RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
// 1-3) For `mt_safe_elts`
assert (V.size_of ihv == j + 1ul - offset_of (Ghost.reveal i)); // head updated
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1; // tail not yet
// 1-4) For the `rv_inv` postcondition
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs)) 0 (U32.v lv) (U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v lv);
RV.rv_elems_inv_preserved
hs 0ul lv (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs 0ul lv);
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs))
(U32.v lv + 1) (U32.v (V.size_of hs))
(U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
(U32.v lv + 1) (U32.v (V.size_of hs));
RV.rv_elems_inv_preserved
hs (lv + 1ul) (V.size_of hs) (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs (lv + 1ul) (V.size_of hs));
// assert (rv_itself_inv hh1 hs);
// assert (elems_reg hh1 hs);
// 1-5) Correctness
assert (S.equal (RV.as_seq hh1 ihv)
(S.snoc (RV.as_seq hh0 (V.get hh0 hs lv)) (Rgl?.r_repr (hreg hsz) hh0 v)));
/// 2) Assign the updated vector to `hs` at the level `lv`.
RV.assign hs lv ihv;
let hh2 = HST.get () in
// 2-1) For the `modifies` postcondition.
assert (modifies (V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2);
assert (modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))) hh0 hh2);
// 2-2) Preservation
Rgl?.r_sep (hreg hsz) v (RV.loc_rvector hs) hh1 hh2;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-3) For `mt_safe_elts`
assert (V.size_of (V.get hh2 hs lv) == j + 1ul - offset_of (Ghost.reveal i));
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-4) Correctness
RV.as_seq_sub_preserved hs 0ul lv (loc_rvector ihv) hh0 hh1;
RV.as_seq_sub_preserved hs (lv + 1ul) merkle_tree_size_lg (loc_rvector ihv) hh0 hh1;
assert (S.equal (RV.as_seq hh2 hs)
(S.append
(RV.as_seq_sub hh0 hs 0ul lv)
(S.cons (RV.as_seq hh1 ihv)
(RV.as_seq_sub hh0 hs (lv + 1ul) merkle_tree_size_lg))));
as_seq_sub_upd hh0 hs lv (RV.as_seq hh1 ihv)
#pop-options
private
val insert_index_helper_even:
lv:uint32_t{lv < merkle_tree_size_lg} ->
j:index_t{U32.v j < pow2 (32 - U32.v lv) - 1} ->
Lemma (requires (j % 2ul <> 1ul))
(ensures (U32.v j % 2 <> 1 /\ j / 2ul == (j + 1ul) / 2ul))
let insert_index_helper_even lv j = ()
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
private
val insert_index_helper_odd:
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:index_t ->
j:index_t{i <= j && U32.v j < pow2 (32 - U32.v lv) - 1} ->
Lemma (requires (j % 2ul = 1ul /\
j < uint32_32_max))
(ensures (U32.v j % 2 = 1 /\
U32.v (j / 2ul) < pow2 (32 - U32.v (lv + 1ul)) - 1 /\
(j + 1ul) / 2ul == j / 2ul + 1ul /\
j - offset_of i > 0ul))
let insert_index_helper_odd lv i j = ()
#pop-options
private
val loc_union_assoc_4:
a:loc -> b:loc -> c:loc -> d:loc ->
Lemma (loc_union (loc_union a b) (loc_union c d) ==
loc_union (loc_union a c) (loc_union b d))
let loc_union_assoc_4 a b c d =
loc_union_assoc (loc_union a b) c d;
loc_union_assoc a b c;
loc_union_assoc a c b;
loc_union_assoc (loc_union a c) b d
private
val insert_modifies_rec_helper:
#hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
aloc:loc ->
h:HS.mem ->
Lemma (loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
aloc)
(loc_union
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc) ==
loc_union
(loc_union
(RV.rv_loc_elems h hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
aloc)
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let insert_modifies_rec_helper #hsz lv hs aloc h =
assert (V.loc_vector_within hs lv (V.size_of hs) ==
loc_union (V.loc_vector_within hs lv (lv + 1ul))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)));
RV.rs_loc_elems_rec_inverse (hvreg hsz) (V.as_seq h hs) (U32.v lv) (U32.v (V.size_of hs));
assert (RV.rv_loc_elems h hs lv (V.size_of hs) ==
loc_union (RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs)));
// Applying some association rules...
loc_union_assoc
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))) aloc
(loc_union
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc);
loc_union_assoc
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))) aloc aloc;
loc_union_assoc
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc;
loc_union_assoc_4
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))
#pop-options
private
val insert_modifies_union_loc_weakening:
l1:loc -> l2:loc -> l3:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (modifies l1 h0 h1))
(ensures (modifies (loc_union (loc_union l1 l2) l3) h0 h1))
let insert_modifies_union_loc_weakening l1 l2 l3 h0 h1 =
B.loc_includes_union_l l1 l2 l1;
B.loc_includes_union_l (loc_union l1 l2) l3 (loc_union l1 l2)
private
val insert_snoc_last_helper:
#a:Type -> s:S.seq a{S.length s > 0} -> v:a ->
Lemma (S.index (S.snoc s v) (S.length s - 1) == S.last s)
let insert_snoc_last_helper #a s v = ()
private
val rv_inv_rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (RV.rv_inv h rv))
(ensures (RV.rv_elems_reg h rv i j))
let rv_inv_rv_elems_reg #a #rst #rg h rv i j = ()
// `insert_` recursively inserts proper hashes to each level `lv` by
// accumulating a compressed hash. For example, if there are three leaf elements
// in the tree, `insert_` will change `hs` as follow:
// (`hij` is a compressed hash from `hi` to `hj`)
//
// BEFORE INSERTION AFTER INSERTION
// lv
// 0 h0 h1 h2 ====> h0 h1 h2 h3
// 1 h01 h01 h23
// 2 h03
//
private
val insert_:
#hsz:hash_size_t ->
#hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hsz)) ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:Ghost.erased index_t ->
j:index_t{
Ghost.reveal i <= j &&
U32.v j < pow2 (32 - U32.v lv) - 1 &&
j < uint32_32_max} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
acc:hash #hsz ->
hash_fun:hash_fun_t #hsz #hash_spec ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\
Rgl?.r_inv (hreg hsz) h0 acc /\
HH.disjoint (V.frameOf hs) (B.frameOf acc) /\
mt_safe_elts h0 lv hs (Ghost.reveal i) j))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(loc_union
(RV.rv_loc_elems h0 hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
h0 h1 /\
RV.rv_inv h1 hs /\
Rgl?.r_inv (hreg hsz) h1 acc /\
mt_safe_elts h1 lv hs (Ghost.reveal i) (j + 1ul) /\
// correctness
(mt_safe_elts_spec h0 lv hs (Ghost.reveal i) j;
S.equal (RV.as_seq h1 hs)
(MTH.insert_ #(U32.v hsz) #hash_spec (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq h0 hs) (Rgl?.r_repr (hreg hsz) h0 acc)))))
(decreases (U32.v j))
#push-options "--z3rlimit 800 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
let rec insert_ #hsz #hash_spec lv i j hs acc hash_fun =
let hh0 = HST.get () in
hash_vv_insert_copy lv i j hs acc;
let hh1 = HST.get () in
// Base conditions
V.loc_vector_within_included hs lv (lv + 1ul);
V.loc_vector_within_included hs (lv + 1ul) (V.size_of hs);
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
assert (V.size_of (V.get hh1 hs lv) == j + 1ul - offset_of (Ghost.reveal i));
assert (mt_safe_elts hh1 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul));
if j % 2ul = 1ul
then (insert_index_helper_odd lv (Ghost.reveal i) j;
assert (S.length (S.index (RV.as_seq hh0 hs) (U32.v lv)) > 0);
let lvhs = V.index hs lv in
assert (U32.v (V.size_of lvhs) ==
S.length (S.index (RV.as_seq hh0 hs) (U32.v lv)) + 1);
assert (V.size_of lvhs > 1ul);
/// 3) Update the accumulator `acc`.
hash_vec_rv_inv_r_inv hh1 (V.get hh1 hs lv) (V.size_of (V.get hh1 hs lv) - 2ul);
assert (Rgl?.r_inv (hreg hsz) hh1 acc);
hash_fun (V.index lvhs (V.size_of lvhs - 2ul)) acc acc;
let hh2 = HST.get () in
// 3-1) For the `modifies` postcondition
assert (modifies (B.loc_all_regions_from false (B.frameOf acc)) hh1 hh2);
assert (modifies
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh0 hh2);
// 3-2) Preservation
RV.rv_inv_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.as_seq_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(B.loc_region_only false (B.frameOf acc)) hh1 hh2;
assert (RV.rv_inv hh2 hs);
assert (Rgl?.r_inv (hreg hsz) hh2 acc);
// 3-3) For `mt_safe_elts`
V.get_preserved hs lv
(B.loc_region_only false (B.frameOf acc)) hh1 hh2; // head preserved
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(B.loc_region_only false (B.frameOf acc)) hh1 hh2; // tail preserved
// 3-4) Correctness
insert_snoc_last_helper
(RV.as_seq hh0 (V.get hh0 hs lv))
(Rgl?.r_repr (hreg hsz) hh0 acc);
assert (S.equal (Rgl?.r_repr (hreg hsz) hh2 acc) // `nacc` in `MTH.insert_`
((Ghost.reveal hash_spec)
(S.last (S.index (RV.as_seq hh0 hs) (U32.v lv)))
(Rgl?.r_repr (hreg hsz) hh0 acc)));
/// 4) Recursion
insert_ (lv + 1ul)
(Ghost.hide (Ghost.reveal i / 2ul)) (j / 2ul)
hs acc hash_fun;
let hh3 = HST.get () in
// 4-0) Memory safety brought from the postcondition of the recursion
assert (RV.rv_inv hh3 hs);
assert (Rgl?.r_inv (hreg hsz) hh3 acc);
assert (modifies (loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh2 hh3);
assert (modifies
(loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc)))
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc))))
hh0 hh3);
// 4-1) For `mt_safe_elts`
rv_inv_rv_elems_reg hh2 hs (lv + 1ul) (V.size_of hs);
RV.rv_loc_elems_included hh2 hs (lv + 1ul) (V.size_of hs);
assert (loc_disjoint
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs)));
assert (loc_disjoint
(V.loc_vector_within hs lv (lv + 1ul))
(B.loc_all_regions_from false (B.frameOf acc)));
V.get_preserved hs lv
(loc_union
(loc_union
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh2 hh3;
assert (V.size_of (V.get hh3 hs lv) ==
j + 1ul - offset_of (Ghost.reveal i)); // head preserved
assert (mt_safe_elts hh3 (lv + 1ul) hs
(Ghost.reveal i / 2ul) (j / 2ul + 1ul)); // tail by recursion
mt_safe_elts_constr hh3 lv hs (Ghost.reveal i) (j + 1ul);
assert (mt_safe_elts hh3 lv hs (Ghost.reveal i) (j + 1ul));
// 4-2) Correctness
mt_safe_elts_spec hh2 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul);
assert (S.equal (RV.as_seq hh3 hs)
(MTH.insert_ #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv + 1) (U32.v (Ghost.reveal i) / 2) (U32.v j / 2)
(RV.as_seq hh2 hs) (Rgl?.r_repr (hreg hsz) hh2 acc)));
mt_safe_elts_spec hh0 lv hs (Ghost.reveal i) j;
MTH.insert_rec #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc);
assert (S.equal (RV.as_seq hh3 hs)
(MTH.insert_ #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc))))
else (insert_index_helper_even lv j;
// memory safety
assert (mt_safe_elts hh1 (lv + 1ul) hs (Ghost.reveal i / 2ul) ((j + 1ul) / 2ul));
mt_safe_elts_constr hh1 lv hs (Ghost.reveal i) (j + 1ul);
assert (mt_safe_elts hh1 lv hs (Ghost.reveal i) (j + 1ul));
assert (modifies
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
hh0 hh1);
insert_modifies_union_loc_weakening
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc))
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh0 hh1;
// correctness
mt_safe_elts_spec hh0 lv hs (Ghost.reveal i) j;
MTH.insert_base #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc);
assert (S.equal (RV.as_seq hh1 hs)
(MTH.insert_ #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc))));
/// 5) Proving the postcondition after recursion
let hh4 = HST.get () in
// 5-1) For the `modifies` postcondition.
assert (modifies
(loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc)))
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc))))
hh0 hh4);
insert_modifies_rec_helper
lv hs (B.loc_all_regions_from false (B.frameOf acc)) hh0;
// 5-2) For `mt_safe_elts`
assert (mt_safe_elts hh4 lv hs (Ghost.reveal i) (j + 1ul));
// 5-3) Preservation
assert (RV.rv_inv hh4 hs);
assert (Rgl?.r_inv (hreg hsz) hh4 acc);
// 5-4) Correctness
mt_safe_elts_spec hh0 lv hs (Ghost.reveal i) j;
assert (S.equal (RV.as_seq hh4 hs)
(MTH.insert_ #(U32.v hsz) #hash_spec (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc))) // QED
#pop-options
private inline_for_extraction
val mt_insert_pre_nst: mtv:merkle_tree -> v:hash #(MT?.hash_size mtv) -> Tot bool
let mt_insert_pre_nst mtv v = mt_not_full_nst mtv && add64_fits (MT?.offset mtv) ((MT?.j mtv) + 1ul)
val mt_insert_pre: #hsz:Ghost.erased hash_size_t -> mt:const_mt_p -> v:hash #hsz -> HST.ST bool
(requires (fun h0 -> mt_safe h0 (CB.cast mt) /\ (MT?.hash_size (B.get h0 (CB.cast mt) 0)) = Ghost.reveal hsz))
(ensures (fun _ _ _ -> True))
let mt_insert_pre #hsz mt v =
let mt = !*(CB.cast mt) in
assert (MT?.hash_size mt == (MT?.hash_size mt));
mt_insert_pre_nst mt v
// `mt_insert` inserts a hash to a Merkle tree. Note that this operation
// manipulates the content in `v`, since it uses `v` as an accumulator during
// insertion.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
val mt_insert:
hsz:Ghost.erased hash_size_t ->
mt:mt_p -> v:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
let dmt = B.get h0 mt 0 in
mt_safe h0 mt /\
Rgl?.r_inv (hreg hsz) h0 v /\
HH.disjoint (B.frameOf mt) (B.frameOf v) /\
MT?.hash_size dmt = Ghost.reveal hsz /\
mt_insert_pre_nst dmt v))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(mt_loc mt)
(B.loc_all_regions_from false (B.frameOf v)))
h0 h1 /\
mt_safe h1 mt /\
// correctness
MT?.hash_size (B.get h1 mt 0) = Ghost.reveal hsz /\
mt_lift h1 mt == MTH.mt_insert (mt_lift h0 mt) (Rgl?.r_repr (hreg hsz) h0 v)))
#pop-options
#push-options "--z3rlimit 40"
let mt_insert hsz mt v =
let hh0 = HST.get () in
let mtv = !*mt in
let hs = MT?.hs mtv in
let hsz = MT?.hash_size mtv in
insert_ #hsz #(Ghost.reveal (MT?.hash_spec mtv)) 0ul (Ghost.hide (MT?.i mtv)) (MT?.j mtv) hs v (MT?.hash_fun mtv);
let hh1 = HST.get () in
RV.rv_loc_elems_included hh0 (MT?.hs mtv) 0ul (V.size_of hs);
V.loc_vector_within_included hs 0ul (V.size_of hs);
RV.rv_inv_preserved
(MT?.rhs mtv)
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf v)))
hh0 hh1;
RV.as_seq_preserved
(MT?.rhs mtv)
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf v)))
hh0 hh1;
Rgl?.r_sep (hreg hsz) (MT?.mroot mtv)
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf v)))
hh0 hh1;
mt *= MT (MT?.hash_size mtv)
(MT?.offset mtv)
(MT?.i mtv)
(MT?.j mtv + 1ul)
(MT?.hs mtv)
false // `rhs` is always deprecated right after an insertion.
(MT?.rhs mtv)
(MT?.mroot mtv)
(MT?.hash_spec mtv)
(MT?.hash_fun mtv);
let hh2 = HST.get () in
RV.rv_inv_preserved
(MT?.hs mtv) (B.loc_buffer mt) hh1 hh2;
RV.rv_inv_preserved
(MT?.rhs mtv) (B.loc_buffer mt) hh1 hh2;
RV.as_seq_preserved
(MT?.hs mtv) (B.loc_buffer mt) hh1 hh2;
RV.as_seq_preserved
(MT?.rhs mtv) (B.loc_buffer mt) hh1 hh2;
Rgl?.r_sep (hreg hsz) (MT?.mroot mtv) (B.loc_buffer mt) hh1 hh2;
mt_safe_elts_preserved
0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv + 1ul) (B.loc_buffer mt)
hh1 hh2
#pop-options
// `mt_create` initiates a Merkle tree with a given initial hash `init`.
// A valid Merkle tree should contain at least one element.
val mt_create_custom:
hsz:hash_size_t ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hsz)) ->
r:HST.erid -> init:hash #hsz -> hash_fun:hash_fun_t #hsz #hash_spec -> HST.ST mt_p
(requires (fun h0 ->
Rgl?.r_inv (hreg hsz) h0 init /\
HH.disjoint r (B.frameOf init)))
(ensures (fun h0 mt h1 ->
// memory safety
modifies (loc_union (mt_loc mt) (B.loc_all_regions_from false (B.frameOf init))) h0 h1 /\
mt_safe h1 mt /\
// correctness
MT?.hash_size (B.get h1 mt 0) = hsz /\
mt_lift h1 mt == MTH.mt_create (U32.v hsz) (Ghost.reveal hash_spec) (Rgl?.r_repr (hreg hsz) h0 init)))
#push-options "--z3rlimit 40"
let mt_create_custom hsz hash_spec r init hash_fun =
let hh0 = HST.get () in
let mt = create_empty_mt hsz hash_spec hash_fun r in
mt_insert hsz mt init;
let hh2 = HST.get () in
mt
#pop-options
/// Construction and Destruction of paths
// Since each element pointer in `path` is from the target Merkle tree and
// each element has different location in `MT?.hs` (thus different region id),
// we cannot use the regionality property for `path`s. Hence here we manually
// define invariants and representation.
noeq type path =
| Path: hash_size:hash_size_t ->
hashes:V.vector (hash #hash_size) ->
path
type path_p = B.pointer path
type const_path_p = const_pointer path
private
let phashes (h:HS.mem) (p:path_p)
: GTot (V.vector (hash #(Path?.hash_size (B.get h p 0))))
= Path?.hashes (B.get h p 0)
// Memory safety of a path as an invariant
inline_for_extraction noextract
val path_safe:
h:HS.mem -> mtr:HH.rid -> p:path_p -> GTot Type0
let path_safe h mtr p =
B.live h p /\ B.freeable p /\
V.live h (phashes h p) /\ V.freeable (phashes h p) /\
HST.is_eternal_region (V.frameOf (phashes h p)) /\
(let hsz = Path?.hash_size (B.get h p 0) in
V.forall_all h (phashes h p)
(fun hp -> Rgl?.r_inv (hreg hsz) h hp /\
HH.includes mtr (Rgl?.region_of (hreg hsz) hp)) /\
HH.extends (V.frameOf (phashes h p)) (B.frameOf p) /\
HH.disjoint mtr (B.frameOf p)) | false | false | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val path_loc: path_p -> GTot loc | [] | MerkleTree.Low.path_loc | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} | p: MerkleTree.Low.path_p -> Prims.GTot LowStar.Monotonic.Buffer.loc | {
"end_col": 59,
"end_line": 1080,
"start_col": 17,
"start_line": 1080
} |
Prims.Tot | val mt_flush_pre_nst: mt:merkle_tree -> Tot bool | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mt_flush_pre_nst mt = MT?.j mt > MT?.i mt | val mt_flush_pre_nst: mt:merkle_tree -> Tot bool
let mt_flush_pre_nst mt = | false | null | false | MT?.j mt > MT?.i mt | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"total"
] | [
"MerkleTree.Low.merkle_tree",
"FStar.Integers.op_Greater",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"MerkleTree.Low.__proj__MT__item__j",
"MerkleTree.Low.__proj__MT__item__i",
"Prims.bool"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction
let uint32_max = 4294967295UL
let uint64_max = 18446744073709551615UL
let offset_range_limit = uint32_max
type offset_t = uint64_t
inline_for_extraction noextract unfold let u32_64 = Int.Cast.uint32_to_uint64
inline_for_extraction noextract unfold let u64_32 = Int.Cast.uint64_to_uint32
private inline_for_extraction
let offsets_connect (x:offset_t) (y:offset_t): Tot bool = y >= x && (y - x) <= offset_range_limit
private inline_for_extraction
let split_offset (tree:offset_t) (index:offset_t{offsets_connect tree index}): Tot index_t =
[@inline_let] let diff = U64.sub_mod index tree in
assert (diff <= offset_range_limit);
Int.Cast.uint64_to_uint32 diff
private inline_for_extraction
let add64_fits (x:offset_t) (i:index_t): Tot bool = uint64_max - x >= (u32_64 i)
private inline_for_extraction
let join_offset (tree:offset_t) (i:index_t{add64_fits tree i}): Tot (r:offset_t{offsets_connect tree r}) =
U64.add tree (u32_64 i)
inline_for_extraction val merkle_tree_size_lg: uint32_t
let merkle_tree_size_lg = 32ul
// A Merkle tree `MT i j hs rhs_ok rhs` stores all necessary hashes to generate
// a Merkle path for each element from the index `i` to `j-1`.
// - Parameters
// `hs`: a 2-dim store for hashes, where `hs[0]` contains leaf hash values.
// `rhs_ok`: to check the rightmost hashes are up-to-date
// `rhs`: a store for "rightmost" hashes, manipulated only when required to
// calculate some merkle paths that need the rightmost hashes
// as a part of them.
// `mroot`: during the construction of `rhs` we can also calculate the Merkle
// root of the tree. If `rhs_ok` is true then it has the up-to-date
// root value.
noeq type merkle_tree =
| MT: hash_size:hash_size_t ->
offset:offset_t ->
i:index_t -> j:index_t{i <= j /\ add64_fits offset j} ->
hs:hash_vv hash_size {V.size_of hs = merkle_tree_size_lg} ->
rhs_ok:bool ->
rhs:hash_vec #hash_size {V.size_of rhs = merkle_tree_size_lg} ->
mroot:hash #hash_size ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
merkle_tree
type mt_p = B.pointer merkle_tree
type const_mt_p = const_pointer merkle_tree
inline_for_extraction
let merkle_tree_conditions (#hsz:Ghost.erased hash_size_t) (offset:uint64_t) (i j:uint32_t) (hs:hash_vv hsz) (rhs_ok:bool) (rhs:hash_vec #hsz) (mroot:hash #hsz): Tot bool =
j >= i && add64_fits offset j &&
V.size_of hs = merkle_tree_size_lg &&
V.size_of rhs = merkle_tree_size_lg
// The maximum number of currently held elements in the tree is (2^32 - 1).
// cwinter: even when using 64-bit indices, we fail if the underlying 32-bit
// vector is full; this can be fixed if necessary.
private inline_for_extraction
val mt_not_full_nst: mtv:merkle_tree -> Tot bool
let mt_not_full_nst mtv = MT?.j mtv < uint32_32_max
val mt_not_full: HS.mem -> mt_p -> GTot bool
let mt_not_full h mt = mt_not_full_nst (B.get h mt 0)
/// (Memory) Safety
val offset_of: i:index_t -> Tot index_t
let offset_of i = if i % 2ul = 0ul then i else i - 1ul
// `mt_safe_elts` says that it is safe to access an element from `i` to `j - 1`
// at level `lv` in the Merkle tree, i.e., hs[lv][k] (i <= k < j) is a valid
// element.
inline_for_extraction noextract
val mt_safe_elts:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
GTot Type0 (decreases (32 - U32.v lv))
let rec mt_safe_elts #hsz h lv hs i j =
if lv = merkle_tree_size_lg then true
else (let ofs = offset_of i in
V.size_of (V.get h hs lv) == j - ofs /\
mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul))
#push-options "--initial_fuel 1 --max_fuel 1"
val mt_safe_elts_constr:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (V.size_of (V.get h hs lv) == j - offset_of i /\
mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)))
(ensures (mt_safe_elts #hsz h lv hs i j))
let mt_safe_elts_constr #_ h lv hs i j = ()
val mt_safe_elts_head:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (mt_safe_elts #hsz h lv hs i j))
(ensures (V.size_of (V.get h hs lv) == j - offset_of i))
let mt_safe_elts_head #_ h lv hs i j = ()
val mt_safe_elts_rec:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (mt_safe_elts #hsz h lv hs i j))
(ensures (mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)))
let mt_safe_elts_rec #_ h lv hs i j = ()
val mt_safe_elts_init:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
Lemma (requires (V.forall_ h hs lv (V.size_of hs)
(fun hv -> V.size_of hv = 0ul)))
(ensures (mt_safe_elts #hsz h lv hs 0ul 0ul))
(decreases (32 - U32.v lv))
let rec mt_safe_elts_init #hsz h lv hs =
if lv = merkle_tree_size_lg then ()
else mt_safe_elts_init #hsz h (lv + 1ul) hs
#pop-options
val mt_safe_elts_preserved:
#hsz:hash_size_t ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 hs /\
mt_safe_elts #hsz h0 lv hs i j /\
loc_disjoint p (V.loc_vector_within hs lv (V.size_of hs)) /\
modifies p h0 h1))
(ensures (mt_safe_elts #hsz h1 lv hs i j))
(decreases (32 - U32.v lv))
[SMTPat (V.live h0 hs);
SMTPat (mt_safe_elts #hsz h0 lv hs i j);
SMTPat (loc_disjoint p (RV.loc_rvector hs));
SMTPat (modifies p h0 h1)]
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let rec mt_safe_elts_preserved #hsz lv hs i j p h0 h1 =
if lv = merkle_tree_size_lg then ()
else (V.get_preserved hs lv p h0 h1;
mt_safe_elts_preserved #hsz (lv + 1ul) hs (i / 2ul) (j / 2ul) p h0 h1)
#pop-options
// `mt_safe` is the invariant of a Merkle tree through its lifetime.
// It includes liveness, regionality, disjointness (to each data structure),
// and valid element access (`mt_safe_elts`).
inline_for_extraction noextract
val mt_safe: HS.mem -> mt_p -> GTot Type0
let mt_safe h mt =
B.live h mt /\ B.freeable mt /\
(let mtv = B.get h mt 0 in
// Liveness & Accessibility
RV.rv_inv h (MT?.hs mtv) /\
RV.rv_inv h (MT?.rhs mtv) /\
Rgl?.r_inv (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv) /\
mt_safe_elts h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv) /\
// Regionality
HH.extends (V.frameOf (MT?.hs mtv)) (B.frameOf mt) /\
HH.extends (V.frameOf (MT?.rhs mtv)) (B.frameOf mt) /\
HH.extends (B.frameOf (MT?.mroot mtv)) (B.frameOf mt) /\
HH.disjoint (V.frameOf (MT?.hs mtv)) (V.frameOf (MT?.rhs mtv)) /\
HH.disjoint (V.frameOf (MT?.hs mtv)) (B.frameOf (MT?.mroot mtv)) /\
HH.disjoint (V.frameOf (MT?.rhs mtv)) (B.frameOf (MT?.mroot mtv)))
// Since a Merkle tree satisfies regionality, it's ok to take all regions from
// a tree pointer as a location of the tree.
val mt_loc: mt_p -> GTot loc
let mt_loc mt = B.loc_all_regions_from false (B.frameOf mt)
val mt_safe_preserved:
mt:mt_p -> p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (mt_safe h0 mt /\
loc_disjoint p (mt_loc mt) /\
modifies p h0 h1))
(ensures (B.get h0 mt 0 == B.get h1 mt 0 /\
mt_safe h1 mt))
let mt_safe_preserved mt p h0 h1 =
assert (loc_includes (mt_loc mt) (B.loc_buffer mt));
let mtv = B.get h0 mt 0 in
assert (loc_includes (mt_loc mt) (RV.loc_rvector (MT?.hs mtv)));
assert (loc_includes (mt_loc mt) (RV.loc_rvector (MT?.rhs mtv)));
assert (loc_includes (mt_loc mt) (V.loc_vector (MT?.hs mtv)));
assert (loc_includes (mt_loc mt)
(B.loc_all_regions_from false (B.frameOf (MT?.mroot mtv))));
RV.rv_inv_preserved (MT?.hs mtv) p h0 h1;
RV.rv_inv_preserved (MT?.rhs mtv) p h0 h1;
Rgl?.r_sep (hreg (MT?.hash_size mtv)) (MT?.mroot mtv) p h0 h1;
V.loc_vector_within_included (MT?.hs mtv) 0ul (V.size_of (MT?.hs mtv));
mt_safe_elts_preserved 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv) p h0 h1
/// Lifting to a high-level Merkle tree structure
val mt_safe_elts_spec:
#hsz:hash_size_t ->
h:HS.mem ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t ->
j:index_t{j >= i} ->
Lemma (requires (RV.rv_inv h hs /\
mt_safe_elts #hsz h lv hs i j))
(ensures (MTH.hs_wf_elts #(U32.v hsz)
(U32.v lv) (RV.as_seq h hs)
(U32.v i) (U32.v j)))
(decreases (32 - U32.v lv))
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let rec mt_safe_elts_spec #_ h lv hs i j =
if lv = merkle_tree_size_lg then ()
else mt_safe_elts_spec h (lv + 1ul) hs (i / 2ul) (j / 2ul)
#pop-options
val merkle_tree_lift:
h:HS.mem ->
mtv:merkle_tree{
RV.rv_inv h (MT?.hs mtv) /\
RV.rv_inv h (MT?.rhs mtv) /\
Rgl?.r_inv (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv) /\
mt_safe_elts #(MT?.hash_size mtv) h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv)} ->
GTot (r:MTH.merkle_tree #(U32.v (MT?.hash_size mtv)) {MTH.mt_wf_elts #_ r})
let merkle_tree_lift h mtv =
mt_safe_elts_spec h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv);
MTH.MT #(U32.v (MT?.hash_size mtv))
(U32.v (MT?.i mtv))
(U32.v (MT?.j mtv))
(RV.as_seq h (MT?.hs mtv))
(MT?.rhs_ok mtv)
(RV.as_seq h (MT?.rhs mtv))
(Rgl?.r_repr (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv))
(Ghost.reveal (MT?.hash_spec mtv))
val mt_lift:
h:HS.mem -> mt:mt_p{mt_safe h mt} ->
GTot (r:MTH.merkle_tree #(U32.v (MT?.hash_size (B.get h mt 0))) {MTH.mt_wf_elts #_ r})
let mt_lift h mt =
merkle_tree_lift h (B.get h mt 0)
val mt_preserved:
mt:mt_p -> p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (mt_safe h0 mt /\
loc_disjoint p (mt_loc mt) /\
modifies p h0 h1))
(ensures (mt_safe_preserved mt p h0 h1;
mt_lift h0 mt == mt_lift h1 mt))
let mt_preserved mt p h0 h1 =
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(B.loc_buffer mt));
B.modifies_buffer_elim mt p h0 h1;
assert (B.get h0 mt 0 == B.get h1 mt 0);
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(RV.loc_rvector (MT?.hs (B.get h0 mt 0))));
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(RV.loc_rvector (MT?.rhs (B.get h0 mt 0))));
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(B.loc_buffer (MT?.mroot (B.get h0 mt 0))));
RV.as_seq_preserved (MT?.hs (B.get h0 mt 0)) p h0 h1;
RV.as_seq_preserved (MT?.rhs (B.get h0 mt 0)) p h0 h1;
B.modifies_buffer_elim (MT?.mroot (B.get h0 mt 0)) p h0 h1
/// Construction
// Note that the public function for creation is `mt_create` defined below,
// which builds a tree with an initial hash.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
private
val create_empty_mt:
hash_size:hash_size_t ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
r:HST.erid ->
HST.ST mt_p
(requires (fun _ -> true))
(ensures (fun h0 mt h1 ->
let dmt = B.get h1 mt 0 in
// memory safety
B.frameOf mt = r /\
modifies (mt_loc mt) h0 h1 /\
mt_safe h1 mt /\
mt_not_full h1 mt /\
// correctness
MT?.hash_size dmt = hash_size /\
MT?.offset dmt = 0UL /\
merkle_tree_lift h1 dmt == MTH.create_empty_mt #_ #(Ghost.reveal hash_spec) ()))
let create_empty_mt hsz hash_spec hash_fun r =
[@inline_let] let hrg = hreg hsz in
[@inline_let] let hvrg = hvreg hsz in
[@inline_let] let hvvrg = hvvreg hsz in
let hs_region = HST.new_region r in
let hs = RV.alloc_rid hvrg merkle_tree_size_lg hs_region in
let h0 = HST.get () in
mt_safe_elts_init #hsz h0 0ul hs;
let rhs_region = HST.new_region r in
let rhs = RV.alloc_rid hrg merkle_tree_size_lg rhs_region in
let h1 = HST.get () in
assert (RV.as_seq h1 rhs == S.create 32 (MTH.hash_init #(U32.v hsz)));
RV.rv_inv_preserved hs (V.loc_vector rhs) h0 h1;
RV.as_seq_preserved hs (V.loc_vector rhs) h0 h1;
V.loc_vector_within_included hs 0ul (V.size_of hs);
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul (V.loc_vector rhs) h0 h1;
let mroot_region = HST.new_region r in
let mroot = rg_alloc hrg mroot_region in
let h2 = HST.get () in
RV.as_seq_preserved hs loc_none h1 h2;
RV.as_seq_preserved rhs loc_none h1 h2;
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul loc_none h1 h2;
let mt = B.malloc r (MT hsz 0UL 0ul 0ul hs false rhs mroot hash_spec hash_fun) 1ul in
let h3 = HST.get () in
RV.as_seq_preserved hs loc_none h2 h3;
RV.as_seq_preserved rhs loc_none h2 h3;
Rgl?.r_sep hrg mroot loc_none h2 h3;
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul loc_none h2 h3;
mt
#pop-options
/// Destruction (free)
val mt_free: mt:mt_p ->
HST.ST unit
(requires (fun h0 -> mt_safe h0 mt))
(ensures (fun h0 _ h1 -> modifies (mt_loc mt) h0 h1))
#push-options "--z3rlimit 100"
let mt_free mt =
let mtv = !*mt in
RV.free (MT?.hs mtv);
RV.free (MT?.rhs mtv);
[@inline_let] let rg = hreg (MT?.hash_size mtv) in
rg_free rg (MT?.mroot mtv);
B.free mt
#pop-options
/// Insertion
private
val as_seq_sub_upd:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector #a #rst rg ->
i:uint32_t{i < V.size_of rv} -> v:Rgl?.repr rg ->
Lemma (requires (RV.rv_inv h rv))
(ensures (S.equal (S.upd (RV.as_seq h rv) (U32.v i) v)
(S.append
(RV.as_seq_sub h rv 0ul i)
(S.cons v (RV.as_seq_sub h rv (i + 1ul) (V.size_of rv))))))
#push-options "--z3rlimit 20"
let as_seq_sub_upd #a #rst #rg h rv i v =
Seq.Properties.slice_upd (RV.as_seq h rv) 0 (U32.v i) (U32.v i) v;
Seq.Properties.slice_upd (RV.as_seq h rv) (U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i) v;
RV.as_seq_seq_slice rg h (V.as_seq h rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i);
assert (S.equal (S.slice (RV.as_seq h rv) 0 (U32.v i))
(RV.as_seq_sub h rv 0ul i));
RV.as_seq_seq_slice rg h (V.as_seq h rv)
0 (U32.v (V.size_of rv)) (U32.v i + 1) (U32.v (V.size_of rv));
assert (S.equal (S.slice (RV.as_seq h rv) (U32.v i + 1) (U32.v (V.size_of rv)))
(RV.as_seq_sub h rv (i + 1ul) (V.size_of rv)));
assert (S.index (S.upd (RV.as_seq h rv) (U32.v i) v) (U32.v i) == v)
#pop-options
// `hash_vv_insert_copy` inserts a hash element at a level `lv`, by copying
// and pushing its content to `hs[lv]`. For detailed insertion procedure, see
// `insert_` and `mt_insert`.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1"
private
inline_for_extraction
val hash_vv_insert_copy:
#hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:Ghost.erased index_t ->
j:index_t{
Ghost.reveal i <= j &&
U32.v j < pow2 (32 - U32.v lv) - 1 &&
j < uint32_32_max} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
v:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\
Rgl?.r_inv (hreg hsz) h0 v /\
HH.disjoint (V.frameOf hs) (B.frameOf v) /\
mt_safe_elts #hsz h0 lv hs (Ghost.reveal i) j))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
h0 h1 /\
RV.rv_inv h1 hs /\
Rgl?.r_inv (hreg hsz) h1 v /\
V.size_of (V.get h1 hs lv) == j + 1ul - offset_of (Ghost.reveal i) /\
V.size_of (V.get h1 hs lv) == V.size_of (V.get h0 hs lv) + 1ul /\
mt_safe_elts #hsz h1 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul) /\
RV.rv_loc_elems h0 hs (lv + 1ul) (V.size_of hs) ==
RV.rv_loc_elems h1 hs (lv + 1ul) (V.size_of hs) /\
// correctness
(mt_safe_elts_spec #hsz h0 lv hs (Ghost.reveal i) j;
S.equal (RV.as_seq h1 hs)
(MTH.hashess_insert
(U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq h0 hs) (Rgl?.r_repr (hreg hsz) h0 v))) /\
S.equal (S.index (RV.as_seq h1 hs) (U32.v lv))
(S.snoc (S.index (RV.as_seq h0 hs) (U32.v lv))
(Rgl?.r_repr (hreg hsz) h0 v))))
let hash_vv_insert_copy #hsz lv i j hs v =
let hh0 = HST.get () in
mt_safe_elts_rec hh0 lv hs (Ghost.reveal i) j;
/// 1) Insert an element at the level `lv`, where the new vector is not yet
/// connected to `hs`.
let ihv = RV.insert_copy (hcpy hsz) (V.index hs lv) v in
let hh1 = HST.get () in
// 1-0) Basic disjointness conditions
V.forall2_forall_left hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.forall2_forall_right hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.loc_vector_within_included hs lv (lv + 1ul);
V.loc_vector_within_included hs (lv + 1ul) (V.size_of hs);
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
// 1-1) For the `modifies` postcondition.
assert (modifies (RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv)) hh0 hh1);
// 1-2) Preservation
Rgl?.r_sep (hreg hsz) v (RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
// 1-3) For `mt_safe_elts`
assert (V.size_of ihv == j + 1ul - offset_of (Ghost.reveal i)); // head updated
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1; // tail not yet
// 1-4) For the `rv_inv` postcondition
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs)) 0 (U32.v lv) (U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v lv);
RV.rv_elems_inv_preserved
hs 0ul lv (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs 0ul lv);
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs))
(U32.v lv + 1) (U32.v (V.size_of hs))
(U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
(U32.v lv + 1) (U32.v (V.size_of hs));
RV.rv_elems_inv_preserved
hs (lv + 1ul) (V.size_of hs) (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs (lv + 1ul) (V.size_of hs));
// assert (rv_itself_inv hh1 hs);
// assert (elems_reg hh1 hs);
// 1-5) Correctness
assert (S.equal (RV.as_seq hh1 ihv)
(S.snoc (RV.as_seq hh0 (V.get hh0 hs lv)) (Rgl?.r_repr (hreg hsz) hh0 v)));
/// 2) Assign the updated vector to `hs` at the level `lv`.
RV.assign hs lv ihv;
let hh2 = HST.get () in
// 2-1) For the `modifies` postcondition.
assert (modifies (V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2);
assert (modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))) hh0 hh2);
// 2-2) Preservation
Rgl?.r_sep (hreg hsz) v (RV.loc_rvector hs) hh1 hh2;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-3) For `mt_safe_elts`
assert (V.size_of (V.get hh2 hs lv) == j + 1ul - offset_of (Ghost.reveal i));
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-4) Correctness
RV.as_seq_sub_preserved hs 0ul lv (loc_rvector ihv) hh0 hh1;
RV.as_seq_sub_preserved hs (lv + 1ul) merkle_tree_size_lg (loc_rvector ihv) hh0 hh1;
assert (S.equal (RV.as_seq hh2 hs)
(S.append
(RV.as_seq_sub hh0 hs 0ul lv)
(S.cons (RV.as_seq hh1 ihv)
(RV.as_seq_sub hh0 hs (lv + 1ul) merkle_tree_size_lg))));
as_seq_sub_upd hh0 hs lv (RV.as_seq hh1 ihv)
#pop-options
private
val insert_index_helper_even:
lv:uint32_t{lv < merkle_tree_size_lg} ->
j:index_t{U32.v j < pow2 (32 - U32.v lv) - 1} ->
Lemma (requires (j % 2ul <> 1ul))
(ensures (U32.v j % 2 <> 1 /\ j / 2ul == (j + 1ul) / 2ul))
let insert_index_helper_even lv j = ()
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
private
val insert_index_helper_odd:
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:index_t ->
j:index_t{i <= j && U32.v j < pow2 (32 - U32.v lv) - 1} ->
Lemma (requires (j % 2ul = 1ul /\
j < uint32_32_max))
(ensures (U32.v j % 2 = 1 /\
U32.v (j / 2ul) < pow2 (32 - U32.v (lv + 1ul)) - 1 /\
(j + 1ul) / 2ul == j / 2ul + 1ul /\
j - offset_of i > 0ul))
let insert_index_helper_odd lv i j = ()
#pop-options
private
val loc_union_assoc_4:
a:loc -> b:loc -> c:loc -> d:loc ->
Lemma (loc_union (loc_union a b) (loc_union c d) ==
loc_union (loc_union a c) (loc_union b d))
let loc_union_assoc_4 a b c d =
loc_union_assoc (loc_union a b) c d;
loc_union_assoc a b c;
loc_union_assoc a c b;
loc_union_assoc (loc_union a c) b d
private
val insert_modifies_rec_helper:
#hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
aloc:loc ->
h:HS.mem ->
Lemma (loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
aloc)
(loc_union
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc) ==
loc_union
(loc_union
(RV.rv_loc_elems h hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
aloc)
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let insert_modifies_rec_helper #hsz lv hs aloc h =
assert (V.loc_vector_within hs lv (V.size_of hs) ==
loc_union (V.loc_vector_within hs lv (lv + 1ul))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)));
RV.rs_loc_elems_rec_inverse (hvreg hsz) (V.as_seq h hs) (U32.v lv) (U32.v (V.size_of hs));
assert (RV.rv_loc_elems h hs lv (V.size_of hs) ==
loc_union (RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs)));
// Applying some association rules...
loc_union_assoc
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))) aloc
(loc_union
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc);
loc_union_assoc
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))) aloc aloc;
loc_union_assoc
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc;
loc_union_assoc_4
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))
#pop-options
private
val insert_modifies_union_loc_weakening:
l1:loc -> l2:loc -> l3:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (modifies l1 h0 h1))
(ensures (modifies (loc_union (loc_union l1 l2) l3) h0 h1))
let insert_modifies_union_loc_weakening l1 l2 l3 h0 h1 =
B.loc_includes_union_l l1 l2 l1;
B.loc_includes_union_l (loc_union l1 l2) l3 (loc_union l1 l2)
private
val insert_snoc_last_helper:
#a:Type -> s:S.seq a{S.length s > 0} -> v:a ->
Lemma (S.index (S.snoc s v) (S.length s - 1) == S.last s)
let insert_snoc_last_helper #a s v = ()
private
val rv_inv_rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (RV.rv_inv h rv))
(ensures (RV.rv_elems_reg h rv i j))
let rv_inv_rv_elems_reg #a #rst #rg h rv i j = ()
// `insert_` recursively inserts proper hashes to each level `lv` by
// accumulating a compressed hash. For example, if there are three leaf elements
// in the tree, `insert_` will change `hs` as follow:
// (`hij` is a compressed hash from `hi` to `hj`)
//
// BEFORE INSERTION AFTER INSERTION
// lv
// 0 h0 h1 h2 ====> h0 h1 h2 h3
// 1 h01 h01 h23
// 2 h03
//
private
val insert_:
#hsz:hash_size_t ->
#hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hsz)) ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:Ghost.erased index_t ->
j:index_t{
Ghost.reveal i <= j &&
U32.v j < pow2 (32 - U32.v lv) - 1 &&
j < uint32_32_max} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
acc:hash #hsz ->
hash_fun:hash_fun_t #hsz #hash_spec ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\
Rgl?.r_inv (hreg hsz) h0 acc /\
HH.disjoint (V.frameOf hs) (B.frameOf acc) /\
mt_safe_elts h0 lv hs (Ghost.reveal i) j))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(loc_union
(RV.rv_loc_elems h0 hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
h0 h1 /\
RV.rv_inv h1 hs /\
Rgl?.r_inv (hreg hsz) h1 acc /\
mt_safe_elts h1 lv hs (Ghost.reveal i) (j + 1ul) /\
// correctness
(mt_safe_elts_spec h0 lv hs (Ghost.reveal i) j;
S.equal (RV.as_seq h1 hs)
(MTH.insert_ #(U32.v hsz) #hash_spec (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq h0 hs) (Rgl?.r_repr (hreg hsz) h0 acc)))))
(decreases (U32.v j))
#push-options "--z3rlimit 800 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
let rec insert_ #hsz #hash_spec lv i j hs acc hash_fun =
let hh0 = HST.get () in
hash_vv_insert_copy lv i j hs acc;
let hh1 = HST.get () in
// Base conditions
V.loc_vector_within_included hs lv (lv + 1ul);
V.loc_vector_within_included hs (lv + 1ul) (V.size_of hs);
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
assert (V.size_of (V.get hh1 hs lv) == j + 1ul - offset_of (Ghost.reveal i));
assert (mt_safe_elts hh1 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul));
if j % 2ul = 1ul
then (insert_index_helper_odd lv (Ghost.reveal i) j;
assert (S.length (S.index (RV.as_seq hh0 hs) (U32.v lv)) > 0);
let lvhs = V.index hs lv in
assert (U32.v (V.size_of lvhs) ==
S.length (S.index (RV.as_seq hh0 hs) (U32.v lv)) + 1);
assert (V.size_of lvhs > 1ul);
/// 3) Update the accumulator `acc`.
hash_vec_rv_inv_r_inv hh1 (V.get hh1 hs lv) (V.size_of (V.get hh1 hs lv) - 2ul);
assert (Rgl?.r_inv (hreg hsz) hh1 acc);
hash_fun (V.index lvhs (V.size_of lvhs - 2ul)) acc acc;
let hh2 = HST.get () in
// 3-1) For the `modifies` postcondition
assert (modifies (B.loc_all_regions_from false (B.frameOf acc)) hh1 hh2);
assert (modifies
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh0 hh2);
// 3-2) Preservation
RV.rv_inv_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.as_seq_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(B.loc_region_only false (B.frameOf acc)) hh1 hh2;
assert (RV.rv_inv hh2 hs);
assert (Rgl?.r_inv (hreg hsz) hh2 acc);
// 3-3) For `mt_safe_elts`
V.get_preserved hs lv
(B.loc_region_only false (B.frameOf acc)) hh1 hh2; // head preserved
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(B.loc_region_only false (B.frameOf acc)) hh1 hh2; // tail preserved
// 3-4) Correctness
insert_snoc_last_helper
(RV.as_seq hh0 (V.get hh0 hs lv))
(Rgl?.r_repr (hreg hsz) hh0 acc);
assert (S.equal (Rgl?.r_repr (hreg hsz) hh2 acc) // `nacc` in `MTH.insert_`
((Ghost.reveal hash_spec)
(S.last (S.index (RV.as_seq hh0 hs) (U32.v lv)))
(Rgl?.r_repr (hreg hsz) hh0 acc)));
/// 4) Recursion
insert_ (lv + 1ul)
(Ghost.hide (Ghost.reveal i / 2ul)) (j / 2ul)
hs acc hash_fun;
let hh3 = HST.get () in
// 4-0) Memory safety brought from the postcondition of the recursion
assert (RV.rv_inv hh3 hs);
assert (Rgl?.r_inv (hreg hsz) hh3 acc);
assert (modifies (loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh2 hh3);
assert (modifies
(loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc)))
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc))))
hh0 hh3);
// 4-1) For `mt_safe_elts`
rv_inv_rv_elems_reg hh2 hs (lv + 1ul) (V.size_of hs);
RV.rv_loc_elems_included hh2 hs (lv + 1ul) (V.size_of hs);
assert (loc_disjoint
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs)));
assert (loc_disjoint
(V.loc_vector_within hs lv (lv + 1ul))
(B.loc_all_regions_from false (B.frameOf acc)));
V.get_preserved hs lv
(loc_union
(loc_union
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh2 hh3;
assert (V.size_of (V.get hh3 hs lv) ==
j + 1ul - offset_of (Ghost.reveal i)); // head preserved
assert (mt_safe_elts hh3 (lv + 1ul) hs
(Ghost.reveal i / 2ul) (j / 2ul + 1ul)); // tail by recursion
mt_safe_elts_constr hh3 lv hs (Ghost.reveal i) (j + 1ul);
assert (mt_safe_elts hh3 lv hs (Ghost.reveal i) (j + 1ul));
// 4-2) Correctness
mt_safe_elts_spec hh2 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul);
assert (S.equal (RV.as_seq hh3 hs)
(MTH.insert_ #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv + 1) (U32.v (Ghost.reveal i) / 2) (U32.v j / 2)
(RV.as_seq hh2 hs) (Rgl?.r_repr (hreg hsz) hh2 acc)));
mt_safe_elts_spec hh0 lv hs (Ghost.reveal i) j;
MTH.insert_rec #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc);
assert (S.equal (RV.as_seq hh3 hs)
(MTH.insert_ #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc))))
else (insert_index_helper_even lv j;
// memory safety
assert (mt_safe_elts hh1 (lv + 1ul) hs (Ghost.reveal i / 2ul) ((j + 1ul) / 2ul));
mt_safe_elts_constr hh1 lv hs (Ghost.reveal i) (j + 1ul);
assert (mt_safe_elts hh1 lv hs (Ghost.reveal i) (j + 1ul));
assert (modifies
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
hh0 hh1);
insert_modifies_union_loc_weakening
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc))
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh0 hh1;
// correctness
mt_safe_elts_spec hh0 lv hs (Ghost.reveal i) j;
MTH.insert_base #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc);
assert (S.equal (RV.as_seq hh1 hs)
(MTH.insert_ #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc))));
/// 5) Proving the postcondition after recursion
let hh4 = HST.get () in
// 5-1) For the `modifies` postcondition.
assert (modifies
(loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc)))
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc))))
hh0 hh4);
insert_modifies_rec_helper
lv hs (B.loc_all_regions_from false (B.frameOf acc)) hh0;
// 5-2) For `mt_safe_elts`
assert (mt_safe_elts hh4 lv hs (Ghost.reveal i) (j + 1ul));
// 5-3) Preservation
assert (RV.rv_inv hh4 hs);
assert (Rgl?.r_inv (hreg hsz) hh4 acc);
// 5-4) Correctness
mt_safe_elts_spec hh0 lv hs (Ghost.reveal i) j;
assert (S.equal (RV.as_seq hh4 hs)
(MTH.insert_ #(U32.v hsz) #hash_spec (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc))) // QED
#pop-options
private inline_for_extraction
val mt_insert_pre_nst: mtv:merkle_tree -> v:hash #(MT?.hash_size mtv) -> Tot bool
let mt_insert_pre_nst mtv v = mt_not_full_nst mtv && add64_fits (MT?.offset mtv) ((MT?.j mtv) + 1ul)
val mt_insert_pre: #hsz:Ghost.erased hash_size_t -> mt:const_mt_p -> v:hash #hsz -> HST.ST bool
(requires (fun h0 -> mt_safe h0 (CB.cast mt) /\ (MT?.hash_size (B.get h0 (CB.cast mt) 0)) = Ghost.reveal hsz))
(ensures (fun _ _ _ -> True))
let mt_insert_pre #hsz mt v =
let mt = !*(CB.cast mt) in
assert (MT?.hash_size mt == (MT?.hash_size mt));
mt_insert_pre_nst mt v
// `mt_insert` inserts a hash to a Merkle tree. Note that this operation
// manipulates the content in `v`, since it uses `v` as an accumulator during
// insertion.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
val mt_insert:
hsz:Ghost.erased hash_size_t ->
mt:mt_p -> v:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
let dmt = B.get h0 mt 0 in
mt_safe h0 mt /\
Rgl?.r_inv (hreg hsz) h0 v /\
HH.disjoint (B.frameOf mt) (B.frameOf v) /\
MT?.hash_size dmt = Ghost.reveal hsz /\
mt_insert_pre_nst dmt v))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(mt_loc mt)
(B.loc_all_regions_from false (B.frameOf v)))
h0 h1 /\
mt_safe h1 mt /\
// correctness
MT?.hash_size (B.get h1 mt 0) = Ghost.reveal hsz /\
mt_lift h1 mt == MTH.mt_insert (mt_lift h0 mt) (Rgl?.r_repr (hreg hsz) h0 v)))
#pop-options
#push-options "--z3rlimit 40"
let mt_insert hsz mt v =
let hh0 = HST.get () in
let mtv = !*mt in
let hs = MT?.hs mtv in
let hsz = MT?.hash_size mtv in
insert_ #hsz #(Ghost.reveal (MT?.hash_spec mtv)) 0ul (Ghost.hide (MT?.i mtv)) (MT?.j mtv) hs v (MT?.hash_fun mtv);
let hh1 = HST.get () in
RV.rv_loc_elems_included hh0 (MT?.hs mtv) 0ul (V.size_of hs);
V.loc_vector_within_included hs 0ul (V.size_of hs);
RV.rv_inv_preserved
(MT?.rhs mtv)
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf v)))
hh0 hh1;
RV.as_seq_preserved
(MT?.rhs mtv)
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf v)))
hh0 hh1;
Rgl?.r_sep (hreg hsz) (MT?.mroot mtv)
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf v)))
hh0 hh1;
mt *= MT (MT?.hash_size mtv)
(MT?.offset mtv)
(MT?.i mtv)
(MT?.j mtv + 1ul)
(MT?.hs mtv)
false // `rhs` is always deprecated right after an insertion.
(MT?.rhs mtv)
(MT?.mroot mtv)
(MT?.hash_spec mtv)
(MT?.hash_fun mtv);
let hh2 = HST.get () in
RV.rv_inv_preserved
(MT?.hs mtv) (B.loc_buffer mt) hh1 hh2;
RV.rv_inv_preserved
(MT?.rhs mtv) (B.loc_buffer mt) hh1 hh2;
RV.as_seq_preserved
(MT?.hs mtv) (B.loc_buffer mt) hh1 hh2;
RV.as_seq_preserved
(MT?.rhs mtv) (B.loc_buffer mt) hh1 hh2;
Rgl?.r_sep (hreg hsz) (MT?.mroot mtv) (B.loc_buffer mt) hh1 hh2;
mt_safe_elts_preserved
0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv + 1ul) (B.loc_buffer mt)
hh1 hh2
#pop-options
// `mt_create` initiates a Merkle tree with a given initial hash `init`.
// A valid Merkle tree should contain at least one element.
val mt_create_custom:
hsz:hash_size_t ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hsz)) ->
r:HST.erid -> init:hash #hsz -> hash_fun:hash_fun_t #hsz #hash_spec -> HST.ST mt_p
(requires (fun h0 ->
Rgl?.r_inv (hreg hsz) h0 init /\
HH.disjoint r (B.frameOf init)))
(ensures (fun h0 mt h1 ->
// memory safety
modifies (loc_union (mt_loc mt) (B.loc_all_regions_from false (B.frameOf init))) h0 h1 /\
mt_safe h1 mt /\
// correctness
MT?.hash_size (B.get h1 mt 0) = hsz /\
mt_lift h1 mt == MTH.mt_create (U32.v hsz) (Ghost.reveal hash_spec) (Rgl?.r_repr (hreg hsz) h0 init)))
#push-options "--z3rlimit 40"
let mt_create_custom hsz hash_spec r init hash_fun =
let hh0 = HST.get () in
let mt = create_empty_mt hsz hash_spec hash_fun r in
mt_insert hsz mt init;
let hh2 = HST.get () in
mt
#pop-options
/// Construction and Destruction of paths
// Since each element pointer in `path` is from the target Merkle tree and
// each element has different location in `MT?.hs` (thus different region id),
// we cannot use the regionality property for `path`s. Hence here we manually
// define invariants and representation.
noeq type path =
| Path: hash_size:hash_size_t ->
hashes:V.vector (hash #hash_size) ->
path
type path_p = B.pointer path
type const_path_p = const_pointer path
private
let phashes (h:HS.mem) (p:path_p)
: GTot (V.vector (hash #(Path?.hash_size (B.get h p 0))))
= Path?.hashes (B.get h p 0)
// Memory safety of a path as an invariant
inline_for_extraction noextract
val path_safe:
h:HS.mem -> mtr:HH.rid -> p:path_p -> GTot Type0
let path_safe h mtr p =
B.live h p /\ B.freeable p /\
V.live h (phashes h p) /\ V.freeable (phashes h p) /\
HST.is_eternal_region (V.frameOf (phashes h p)) /\
(let hsz = Path?.hash_size (B.get h p 0) in
V.forall_all h (phashes h p)
(fun hp -> Rgl?.r_inv (hreg hsz) h hp /\
HH.includes mtr (Rgl?.region_of (hreg hsz) hp)) /\
HH.extends (V.frameOf (phashes h p)) (B.frameOf p) /\
HH.disjoint mtr (B.frameOf p))
val path_loc: path_p -> GTot loc
let path_loc p = B.loc_all_regions_from false (B.frameOf p)
val lift_path_:
#hsz:hash_size_t ->
h:HS.mem ->
hs:S.seq (hash #hsz) ->
i:nat ->
j:nat{
i <= j /\ j <= S.length hs /\
V.forall_seq hs i j (fun hp -> Rgl?.r_inv (hreg hsz) h hp)} ->
GTot (hp:MTH.path #(U32.v hsz) {S.length hp = j - i}) (decreases j)
let rec lift_path_ #hsz h hs i j =
if i = j then S.empty
else (S.snoc (lift_path_ h hs i (j - 1))
(Rgl?.r_repr (hreg hsz) h (S.index hs (j - 1))))
// Representation of a path
val lift_path:
#hsz:hash_size_t ->
h:HS.mem -> mtr:HH.rid -> p:path_p {path_safe h mtr p /\ (Path?.hash_size (B.get h p 0)) = hsz} ->
GTot (hp:MTH.path #(U32.v hsz) {S.length hp = U32.v (V.size_of (phashes h p))})
let lift_path #hsz h mtr p =
lift_path_ h (V.as_seq h (phashes h p))
0 (S.length (V.as_seq h (phashes h p)))
val lift_path_index_:
#hsz:hash_size_t ->
h:HS.mem ->
hs:S.seq (hash #hsz) ->
i:nat -> j:nat{i <= j && j <= S.length hs} ->
k:nat{i <= k && k < j} ->
Lemma (requires (V.forall_seq hs i j (fun hp -> Rgl?.r_inv (hreg hsz) h hp)))
(ensures (Rgl?.r_repr (hreg hsz) h (S.index hs k) ==
S.index (lift_path_ h hs i j) (k - i)))
(decreases j)
[SMTPat (S.index (lift_path_ h hs i j) (k - i))]
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
let rec lift_path_index_ #hsz h hs i j k =
if i = j then ()
else if k = j - 1 then ()
else lift_path_index_ #hsz h hs i (j - 1) k
#pop-options
val lift_path_index:
h:HS.mem -> mtr:HH.rid ->
p:path_p -> i:uint32_t ->
Lemma (requires (path_safe h mtr p /\
i < V.size_of (phashes h p)))
(ensures (let hsz = Path?.hash_size (B.get h p 0) in
Rgl?.r_repr (hreg hsz) h (V.get h (phashes h p) i) ==
S.index (lift_path #(hsz) h mtr p) (U32.v i)))
let lift_path_index h mtr p i =
lift_path_index_ h (V.as_seq h (phashes h p))
0 (S.length (V.as_seq h (phashes h p))) (U32.v i)
val lift_path_eq:
#hsz:hash_size_t ->
h:HS.mem ->
hs1:S.seq (hash #hsz) -> hs2:S.seq (hash #hsz) ->
i:nat -> j:nat ->
Lemma (requires (i <= j /\ j <= S.length hs1 /\ j <= S.length hs2 /\
S.equal (S.slice hs1 i j) (S.slice hs2 i j) /\
V.forall_seq hs1 i j (fun hp -> Rgl?.r_inv (hreg hsz) h hp) /\
V.forall_seq hs2 i j (fun hp -> Rgl?.r_inv (hreg hsz) h hp)))
(ensures (S.equal (lift_path_ h hs1 i j) (lift_path_ h hs2 i j)))
let lift_path_eq #hsz h hs1 hs2 i j =
assert (forall (k:nat{i <= k && k < j}).
S.index (lift_path_ h hs1 i j) (k - i) ==
Rgl?.r_repr (hreg hsz) h (S.index hs1 k));
assert (forall (k:nat{i <= k && k < j}).
S.index (lift_path_ h hs2 i j) (k - i) ==
Rgl?.r_repr (hreg hsz) h (S.index hs2 k));
assert (forall (k:nat{k < j - i}).
S.index (lift_path_ h hs1 i j) k ==
Rgl?.r_repr (hreg hsz) h (S.index hs1 (k + i)));
assert (forall (k:nat{k < j - i}).
S.index (lift_path_ h hs2 i j) k ==
Rgl?.r_repr (hreg hsz) h (S.index hs2 (k + i)));
assert (forall (k:nat{k < j - i}).
S.index (S.slice hs1 i j) k == S.index (S.slice hs2 i j) k);
assert (forall (k:nat{i <= k && k < j}).
S.index (S.slice hs1 i j) (k - i) == S.index (S.slice hs2 i j) (k - i))
private
val path_safe_preserved_:
#hsz:hash_size_t ->
mtr:HH.rid -> hs:S.seq (hash #hsz) ->
i:nat -> j:nat{i <= j && j <= S.length hs} ->
dl:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma
(requires (V.forall_seq hs i j
(fun hp ->
Rgl?.r_inv (hreg hsz) h0 hp /\
HH.includes mtr (Rgl?.region_of (hreg hsz) hp)) /\
loc_disjoint dl (B.loc_all_regions_from false mtr) /\
modifies dl h0 h1))
(ensures (V.forall_seq hs i j
(fun hp ->
Rgl?.r_inv (hreg hsz) h1 hp /\
HH.includes mtr (Rgl?.region_of (hreg hsz) hp))))
(decreases j)
let rec path_safe_preserved_ #hsz mtr hs i j dl h0 h1 =
if i = j then ()
else (assert (loc_includes
(B.loc_all_regions_from false mtr)
(B.loc_all_regions_from false
(Rgl?.region_of (hreg hsz) (S.index hs (j - 1)))));
Rgl?.r_sep (hreg hsz) (S.index hs (j - 1)) dl h0 h1;
path_safe_preserved_ mtr hs i (j - 1) dl h0 h1)
val path_safe_preserved:
mtr:HH.rid -> p:path_p ->
dl:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (path_safe h0 mtr p /\
loc_disjoint dl (path_loc p) /\
loc_disjoint dl (B.loc_all_regions_from false mtr) /\
modifies dl h0 h1))
(ensures (path_safe h1 mtr p))
let path_safe_preserved mtr p dl h0 h1 =
assert (loc_includes (path_loc p) (B.loc_buffer p));
assert (loc_includes (path_loc p) (V.loc_vector (phashes h0 p)));
path_safe_preserved_
mtr (V.as_seq h0 (phashes h0 p))
0 (S.length (V.as_seq h0 (phashes h0 p))) dl h0 h1
val path_safe_init_preserved:
mtr:HH.rid -> p:path_p ->
dl:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (path_safe h0 mtr p /\
V.size_of (phashes h0 p) = 0ul /\
B.loc_disjoint dl (path_loc p) /\
modifies dl h0 h1))
(ensures (path_safe h1 mtr p /\
V.size_of (phashes h1 p) = 0ul))
let path_safe_init_preserved mtr p dl h0 h1 =
assert (loc_includes (path_loc p) (B.loc_buffer p));
assert (loc_includes (path_loc p) (V.loc_vector (phashes h0 p)))
val path_preserved_:
#hsz:hash_size_t ->
mtr:HH.rid ->
hs:S.seq (hash #hsz) ->
i:nat -> j:nat{i <= j && j <= S.length hs} ->
dl:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.forall_seq hs i j
(fun hp -> Rgl?.r_inv (hreg hsz) h0 hp /\
HH.includes mtr (Rgl?.region_of (hreg hsz) hp)) /\
loc_disjoint dl (B.loc_all_regions_from false mtr) /\
modifies dl h0 h1))
(ensures (path_safe_preserved_ mtr hs i j dl h0 h1;
S.equal (lift_path_ h0 hs i j)
(lift_path_ h1 hs i j)))
(decreases j)
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
let rec path_preserved_ #hsz mtr hs i j dl h0 h1 =
if i = j then ()
else (path_safe_preserved_ mtr hs i (j - 1) dl h0 h1;
path_preserved_ mtr hs i (j - 1) dl h0 h1;
assert (loc_includes
(B.loc_all_regions_from false mtr)
(B.loc_all_regions_from false
(Rgl?.region_of (hreg hsz) (S.index hs (j - 1)))));
Rgl?.r_sep (hreg hsz) (S.index hs (j - 1)) dl h0 h1)
#pop-options
val path_preserved:
mtr:HH.rid -> p:path_p ->
dl:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (path_safe h0 mtr p /\
loc_disjoint dl (path_loc p) /\
loc_disjoint dl (B.loc_all_regions_from false mtr) /\
modifies dl h0 h1))
(ensures (path_safe_preserved mtr p dl h0 h1;
let hsz0 = (Path?.hash_size (B.get h0 p 0)) in
let hsz1 = (Path?.hash_size (B.get h1 p 0)) in
let b:MTH.path = lift_path #hsz0 h0 mtr p in
let a:MTH.path = lift_path #hsz1 h1 mtr p in
hsz0 = hsz1 /\ S.equal b a))
let path_preserved mtr p dl h0 h1 =
assert (loc_includes (path_loc p) (B.loc_buffer p));
assert (loc_includes (path_loc p) (V.loc_vector (phashes h0 p)));
path_preserved_ mtr (V.as_seq h0 (phashes h0 p))
0 (S.length (V.as_seq h0 (phashes h0 p)))
dl h0 h1
val init_path:
hsz:hash_size_t ->
mtr:HH.rid -> r:HST.erid ->
HST.ST path_p
(requires (fun h0 -> HH.disjoint mtr r))
(ensures (fun h0 p h1 ->
// memory safety
path_safe h1 mtr p /\
// correctness
Path?.hash_size (B.get h1 p 0) = hsz /\
S.equal (lift_path #hsz h1 mtr p) S.empty))
let init_path hsz mtr r =
let nrid = HST.new_region r in
(B.malloc r (Path hsz (rg_alloc (hvreg hsz) nrid)) 1ul)
val clear_path:
mtr:HH.rid -> p:path_p ->
HST.ST unit
(requires (fun h0 -> path_safe h0 mtr p))
(ensures (fun h0 _ h1 ->
// memory safety
path_safe h1 mtr p /\
// correctness
V.size_of (phashes h1 p) = 0ul /\
S.equal (lift_path #(Path?.hash_size (B.get h1 p 0)) h1 mtr p) S.empty))
let clear_path mtr p =
let pv = !*p in
p *= Path (Path?.hash_size pv) (V.clear (Path?.hashes pv))
val free_path:
p:path_p ->
HST.ST unit
(requires (fun h0 ->
B.live h0 p /\ B.freeable p /\
V.live h0 (phashes h0 p) /\ V.freeable (phashes h0 p) /\
HH.extends (V.frameOf (phashes h0 p)) (B.frameOf p)))
(ensures (fun h0 _ h1 ->
modifies (path_loc p) h0 h1))
let free_path p =
let pv = !*p in
V.free (Path?.hashes pv);
B.free p
/// Getting the Merkle root and path
// Construct "rightmost hashes" for a given (incomplete) Merkle tree.
// This function calculates the Merkle root as well, which is the final
// accumulator value.
private
val construct_rhs:
#hsz:hash_size_t ->
#hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hsz)) ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
rhs:hash_vec #hsz {V.size_of rhs = merkle_tree_size_lg} ->
i:index_t ->
j:index_t{i <= j && (U32.v j) < pow2 (32 - U32.v lv)} ->
acc:hash #hsz ->
actd:bool ->
hash_fun:hash_fun_t #hsz #(Ghost.reveal hash_spec) ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\ RV.rv_inv h0 rhs /\
HH.disjoint (V.frameOf hs) (V.frameOf rhs) /\
Rgl?.r_inv (hreg hsz) h0 acc /\
HH.disjoint (B.frameOf acc) (V.frameOf hs) /\
HH.disjoint (B.frameOf acc) (V.frameOf rhs) /\
mt_safe_elts #hsz h0 lv hs i j))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(RV.loc_rvector rhs)
(B.loc_all_regions_from false (B.frameOf acc)))
h0 h1 /\
RV.rv_inv h1 rhs /\
Rgl?.r_inv (hreg hsz) h1 acc /\
// correctness
(mt_safe_elts_spec #hsz h0 lv hs i j;
MTH.construct_rhs #(U32.v hsz) #(Ghost.reveal hash_spec)
(U32.v lv)
(Rgl?.r_repr (hvvreg hsz) h0 hs)
(Rgl?.r_repr (hvreg hsz) h0 rhs)
(U32.v i) (U32.v j)
(Rgl?.r_repr (hreg hsz) h0 acc) actd ==
(Rgl?.r_repr (hvreg hsz) h1 rhs, Rgl?.r_repr (hreg hsz) h1 acc)
)))
(decreases (U32.v j))
#push-options "--z3rlimit 250 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
let rec construct_rhs #hsz #hash_spec lv hs rhs i j acc actd hash_fun =
let hh0 = HST.get () in
if j = 0ul then begin
assert (RV.rv_inv hh0 hs);
assert (mt_safe_elts #hsz hh0 lv hs i j);
mt_safe_elts_spec #hsz hh0 lv hs 0ul 0ul;
assert (MTH.hs_wf_elts #(U32.v hsz)
(U32.v lv) (RV.as_seq hh0 hs)
(U32.v i) (U32.v j));
let hh1 = HST.get() in
assert (MTH.construct_rhs #(U32.v hsz) #(Ghost.reveal hash_spec)
(U32.v lv)
(Rgl?.r_repr (hvvreg hsz) hh0 hs)
(Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j)
(Rgl?.r_repr (hreg hsz) hh0 acc) actd ==
(Rgl?.r_repr (hvreg hsz) hh1 rhs, Rgl?.r_repr (hreg hsz) hh1 acc))
end
else
let ofs = offset_of i in
begin
(if j % 2ul = 0ul
then begin
Math.Lemmas.pow2_double_mult (32 - U32.v lv - 1);
mt_safe_elts_rec #hsz hh0 lv hs i j;
construct_rhs #hsz #hash_spec (lv + 1ul) hs rhs (i / 2ul) (j / 2ul) acc actd hash_fun;
let hh1 = HST.get () in
// correctness
mt_safe_elts_spec #hsz hh0 lv hs i j;
MTH.construct_rhs_even #(U32.v hsz) #hash_spec
(U32.v lv) (Rgl?.r_repr (hvvreg hsz) hh0 hs) (Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j) (Rgl?.r_repr (hreg hsz) hh0 acc) actd;
assert (MTH.construct_rhs #(U32.v hsz) #hash_spec
(U32.v lv)
(Rgl?.r_repr (hvvreg hsz) hh0 hs)
(Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j)
(Rgl?.r_repr (hreg hsz) hh0 acc)
actd ==
(Rgl?.r_repr (hvreg hsz) hh1 rhs, Rgl?.r_repr (hreg hsz) hh1 acc))
end
else begin
if actd
then begin
RV.assign_copy (hcpy hsz) rhs lv acc;
let hh1 = HST.get () in
// memory safety
Rgl?.r_sep (hreg hsz) acc
(B.loc_all_regions_from false (V.frameOf rhs)) hh0 hh1;
RV.rv_inv_preserved
hs (B.loc_all_regions_from false (V.frameOf rhs))
hh0 hh1;
RV.as_seq_preserved
hs (B.loc_all_regions_from false (V.frameOf rhs))
hh0 hh1;
RV.rv_inv_preserved
(V.get hh0 hs lv) (B.loc_all_regions_from false (V.frameOf rhs))
hh0 hh1;
V.loc_vector_within_included hs lv (V.size_of hs);
mt_safe_elts_preserved lv hs i j
(B.loc_all_regions_from false (V.frameOf rhs))
hh0 hh1;
mt_safe_elts_head hh1 lv hs i j;
hash_vv_rv_inv_r_inv hh1 hs lv (j - 1ul - ofs);
// correctness
assert (S.equal (RV.as_seq hh1 rhs)
(S.upd (RV.as_seq hh0 rhs) (U32.v lv)
(Rgl?.r_repr (hreg hsz) hh0 acc)));
hash_fun (V.index (V.index hs lv) (j - 1ul - ofs)) acc acc;
let hh2 = HST.get () in
// memory safety
mt_safe_elts_preserved lv hs i j
(B.loc_all_regions_from false (B.frameOf acc)) hh1 hh2;
RV.rv_inv_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.rv_inv_preserved
rhs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.as_seq_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.as_seq_preserved
rhs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
// correctness
hash_vv_as_seq_get_index hh0 hs lv (j - 1ul - ofs);
assert (Rgl?.r_repr (hreg hsz) hh2 acc ==
(Ghost.reveal hash_spec) (S.index (S.index (RV.as_seq hh0 hs) (U32.v lv))
(U32.v j - 1 - U32.v ofs))
(Rgl?.r_repr (hreg hsz) hh0 acc))
end
else begin
mt_safe_elts_head hh0 lv hs i j;
hash_vv_rv_inv_r_inv hh0 hs lv (j - 1ul - ofs);
hash_vv_rv_inv_disjoint hh0 hs lv (j - 1ul - ofs) (B.frameOf acc);
Cpy?.copy (hcpy hsz) hsz (V.index (V.index hs lv) (j - 1ul - ofs)) acc;
let hh1 = HST.get () in
// memory safety
V.loc_vector_within_included hs lv (V.size_of hs);
mt_safe_elts_preserved lv hs i j
(B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
RV.rv_inv_preserved
hs (B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
RV.rv_inv_preserved
rhs (B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
RV.as_seq_preserved
hs (B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
RV.as_seq_preserved
rhs (B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
// correctness
hash_vv_as_seq_get_index hh0 hs lv (j - 1ul - ofs);
assert (Rgl?.r_repr (hreg hsz) hh1 acc ==
S.index (S.index (RV.as_seq hh0 hs) (U32.v lv))
(U32.v j - 1 - U32.v ofs))
end;
let hh3 = HST.get () in
assert (S.equal (RV.as_seq hh3 hs) (RV.as_seq hh0 hs));
assert (S.equal (RV.as_seq hh3 rhs)
(if actd
then S.upd (RV.as_seq hh0 rhs) (U32.v lv)
(Rgl?.r_repr (hreg hsz) hh0 acc)
else RV.as_seq hh0 rhs));
assert (Rgl?.r_repr (hreg hsz) hh3 acc ==
(if actd
then (Ghost.reveal hash_spec) (S.index (S.index (RV.as_seq hh0 hs) (U32.v lv))
(U32.v j - 1 - U32.v ofs))
(Rgl?.r_repr (hreg hsz) hh0 acc)
else S.index (S.index (RV.as_seq hh0 hs) (U32.v lv))
(U32.v j - 1 - U32.v ofs)));
mt_safe_elts_rec hh3 lv hs i j;
construct_rhs #hsz #hash_spec (lv + 1ul) hs rhs (i / 2ul) (j / 2ul) acc true hash_fun;
let hh4 = HST.get () in
mt_safe_elts_spec hh3 (lv + 1ul) hs (i / 2ul) (j / 2ul);
assert (MTH.construct_rhs #(U32.v hsz) #hash_spec
(U32.v lv + 1)
(Rgl?.r_repr (hvvreg hsz) hh3 hs)
(Rgl?.r_repr (hvreg hsz) hh3 rhs)
(U32.v i / 2) (U32.v j / 2)
(Rgl?.r_repr (hreg hsz) hh3 acc) true ==
(Rgl?.r_repr (hvreg hsz) hh4 rhs, Rgl?.r_repr (hreg hsz) hh4 acc));
mt_safe_elts_spec hh0 lv hs i j;
MTH.construct_rhs_odd #(U32.v hsz) #hash_spec
(U32.v lv) (Rgl?.r_repr (hvvreg hsz) hh0 hs) (Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j) (Rgl?.r_repr (hreg hsz) hh0 acc) actd;
assert (MTH.construct_rhs #(U32.v hsz) #hash_spec
(U32.v lv)
(Rgl?.r_repr (hvvreg hsz) hh0 hs)
(Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j)
(Rgl?.r_repr (hreg hsz) hh0 acc) actd ==
(Rgl?.r_repr (hvreg hsz) hh4 rhs, Rgl?.r_repr (hreg hsz) hh4 acc))
end)
end
#pop-options
private inline_for_extraction
val mt_get_root_pre_nst: mtv:merkle_tree -> rt:hash #(MT?.hash_size mtv) -> Tot bool
let mt_get_root_pre_nst mtv rt = true
val mt_get_root_pre:
#hsz:Ghost.erased hash_size_t ->
mt:const_mt_p ->
rt:hash #hsz ->
HST.ST bool
(requires (fun h0 ->
let mt = CB.cast mt in
MT?.hash_size (B.get h0 mt 0) = Ghost.reveal hsz /\
mt_safe h0 mt /\ Rgl?.r_inv (hreg hsz) h0 rt /\
HH.disjoint (B.frameOf mt) (B.frameOf rt)))
(ensures (fun _ _ _ -> True))
let mt_get_root_pre #hsz mt rt =
let mt = CB.cast mt in
let mt = !*mt in
let hsz = MT?.hash_size mt in
assert (MT?.hash_size mt = hsz);
mt_get_root_pre_nst mt rt
// `mt_get_root` returns the Merkle root. If it's already calculated with
// up-to-date hashes, the root is returned immediately. Otherwise it calls
// `construct_rhs` to build rightmost hashes and to calculate the Merkle root
// as well.
val mt_get_root:
#hsz:Ghost.erased hash_size_t ->
mt:const_mt_p ->
rt:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
let mt = CB.cast mt in
let dmt = B.get h0 mt 0 in
MT?.hash_size dmt = (Ghost.reveal hsz) /\
mt_get_root_pre_nst dmt rt /\
mt_safe h0 mt /\ Rgl?.r_inv (hreg hsz) h0 rt /\
HH.disjoint (B.frameOf mt) (B.frameOf rt)))
(ensures (fun h0 _ h1 ->
let mt = CB.cast mt in
// memory safety
modifies (loc_union
(mt_loc mt)
(B.loc_all_regions_from false (B.frameOf rt)))
h0 h1 /\
mt_safe h1 mt /\
(let mtv0 = B.get h0 mt 0 in
let mtv1 = B.get h1 mt 0 in
MT?.hash_size mtv0 = (Ghost.reveal hsz) /\
MT?.hash_size mtv1 = (Ghost.reveal hsz) /\
MT?.i mtv1 = MT?.i mtv0 /\ MT?.j mtv1 = MT?.j mtv0 /\
MT?.hs mtv1 == MT?.hs mtv0 /\ MT?.rhs mtv1 == MT?.rhs mtv0 /\
MT?.offset mtv1 == MT?.offset mtv0 /\
MT?.rhs_ok mtv1 = true /\
Rgl?.r_inv (hreg hsz) h1 rt /\
// correctness
MTH.mt_get_root (mt_lift h0 mt) (Rgl?.r_repr (hreg hsz) h0 rt) ==
(mt_lift h1 mt, Rgl?.r_repr (hreg hsz) h1 rt))))
#push-options "--z3rlimit 150 --initial_fuel 1 --max_fuel 1"
let mt_get_root #hsz mt rt =
let mt = CB.cast mt in
let hh0 = HST.get () in
let mtv = !*mt in
let prefix = MT?.offset mtv in
let i = MT?.i mtv in
let j = MT?.j mtv in
let hs = MT?.hs mtv in
let rhs = MT?.rhs mtv in
let mroot = MT?.mroot mtv in
let hash_size = MT?.hash_size mtv in
let hash_spec = MT?.hash_spec mtv in
let hash_fun = MT?.hash_fun mtv in
if MT?.rhs_ok mtv
then begin
Cpy?.copy (hcpy hash_size) hash_size mroot rt;
let hh1 = HST.get () in
mt_safe_preserved mt
(B.loc_all_regions_from false (Rgl?.region_of (hreg hsz) rt)) hh0 hh1;
mt_preserved mt
(B.loc_all_regions_from false (Rgl?.region_of (hreg hsz) rt)) hh0 hh1;
MTH.mt_get_root_rhs_ok_true
(mt_lift hh0 mt) (Rgl?.r_repr (hreg hsz) hh0 rt);
assert (MTH.mt_get_root (mt_lift hh0 mt) (Rgl?.r_repr (hreg hsz) hh0 rt) ==
(mt_lift hh1 mt, Rgl?.r_repr (hreg hsz) hh1 rt))
end
else begin
construct_rhs #hash_size #hash_spec 0ul hs rhs i j rt false hash_fun;
let hh1 = HST.get () in
// memory safety
assert (RV.rv_inv hh1 rhs);
assert (Rgl?.r_inv (hreg hsz) hh1 rt);
assert (B.live hh1 mt);
RV.rv_inv_preserved
hs (loc_union
(RV.loc_rvector rhs)
(B.loc_all_regions_from false (B.frameOf rt)))
hh0 hh1;
RV.as_seq_preserved
hs (loc_union
(RV.loc_rvector rhs)
(B.loc_all_regions_from false (B.frameOf rt)))
hh0 hh1;
V.loc_vector_within_included hs 0ul (V.size_of hs);
mt_safe_elts_preserved 0ul hs i j
(loc_union
(RV.loc_rvector rhs)
(B.loc_all_regions_from false (B.frameOf rt)))
hh0 hh1;
// correctness
mt_safe_elts_spec hh0 0ul hs i j;
assert (MTH.construct_rhs #(U32.v hash_size) #hash_spec 0
(Rgl?.r_repr (hvvreg hsz) hh0 hs)
(Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j)
(Rgl?.r_repr (hreg hsz) hh0 rt) false ==
(Rgl?.r_repr (hvreg hsz) hh1 rhs, Rgl?.r_repr (hreg hsz) hh1 rt));
Cpy?.copy (hcpy hash_size) hash_size rt mroot;
let hh2 = HST.get () in
// memory safety
RV.rv_inv_preserved
hs (B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
RV.rv_inv_preserved
rhs (B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
RV.as_seq_preserved
hs (B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
RV.as_seq_preserved
rhs (B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
B.modifies_buffer_elim
rt (B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
mt_safe_elts_preserved 0ul hs i j
(B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
// correctness
assert (Rgl?.r_repr (hreg hsz) hh2 mroot == Rgl?.r_repr (hreg hsz) hh1 rt);
mt *= MT hash_size prefix i j hs true rhs mroot hash_spec hash_fun;
let hh3 = HST.get () in
// memory safety
Rgl?.r_sep (hreg hsz) rt (B.loc_buffer mt) hh2 hh3;
RV.rv_inv_preserved hs (B.loc_buffer mt) hh2 hh3;
RV.rv_inv_preserved rhs (B.loc_buffer mt) hh2 hh3;
RV.as_seq_preserved hs (B.loc_buffer mt) hh2 hh3;
RV.as_seq_preserved rhs (B.loc_buffer mt) hh2 hh3;
Rgl?.r_sep (hreg hsz) mroot (B.loc_buffer mt) hh2 hh3;
mt_safe_elts_preserved 0ul hs i j
(B.loc_buffer mt) hh2 hh3;
assert (mt_safe hh3 mt);
// correctness
MTH.mt_get_root_rhs_ok_false
(mt_lift hh0 mt) (Rgl?.r_repr (hreg hsz) hh0 rt);
assert (MTH.mt_get_root (mt_lift hh0 mt) (Rgl?.r_repr (hreg hsz) hh0 rt) ==
(MTH.MT #(U32.v hash_size)
(U32.v i) (U32.v j)
(RV.as_seq hh0 hs)
true
(RV.as_seq hh1 rhs)
(Rgl?.r_repr (hreg hsz) hh1 rt)
hash_spec,
Rgl?.r_repr (hreg hsz) hh1 rt));
assert (MTH.mt_get_root (mt_lift hh0 mt) (Rgl?.r_repr (hreg hsz) hh0 rt) ==
(mt_lift hh3 mt, Rgl?.r_repr (hreg hsz) hh3 rt))
end
#pop-options
inline_for_extraction
val mt_path_insert:
#hsz:hash_size_t ->
mtr:HH.rid -> p:path_p -> hp:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
path_safe h0 mtr p /\
not (V.is_full (phashes h0 p)) /\
Rgl?.r_inv (hreg hsz) h0 hp /\
HH.disjoint mtr (B.frameOf p) /\
HH.includes mtr (B.frameOf hp) /\
Path?.hash_size (B.get h0 p 0) = hsz))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (path_loc p) h0 h1 /\
path_safe h1 mtr p /\
// correctness
(let hsz0 = Path?.hash_size (B.get h0 p 0) in
let hsz1 = Path?.hash_size (B.get h1 p 0) in
(let before:(S.seq (MTH.hash #(U32.v hsz0))) = lift_path h0 mtr p in
let after:(S.seq (MTH.hash #(U32.v hsz1))) = lift_path h1 mtr p in
V.size_of (phashes h1 p) = V.size_of (phashes h0 p) + 1ul /\
hsz = hsz0 /\ hsz = hsz1 /\
(let hspec:(S.seq (MTH.hash #(U32.v hsz))) = (MTH.path_insert #(U32.v hsz) before (Rgl?.r_repr (hreg hsz) h0 hp)) in
S.equal hspec after)))))
#push-options "--z3rlimit 20 --initial_fuel 1 --max_fuel 1"
let mt_path_insert #hsz mtr p hp =
let pth = !*p in
let pv = Path?.hashes pth in
let hh0 = HST.get () in
let ipv = V.insert pv hp in
let hh1 = HST.get () in
path_safe_preserved_
mtr (V.as_seq hh0 pv) 0 (S.length (V.as_seq hh0 pv))
(B.loc_all_regions_from false (V.frameOf ipv)) hh0 hh1;
path_preserved_
mtr (V.as_seq hh0 pv) 0 (S.length (V.as_seq hh0 pv))
(B.loc_all_regions_from false (V.frameOf ipv)) hh0 hh1;
Rgl?.r_sep (hreg hsz) hp
(B.loc_all_regions_from false (V.frameOf ipv)) hh0 hh1;
p *= Path hsz ipv;
let hh2 = HST.get () in
path_safe_preserved_
mtr (V.as_seq hh1 ipv) 0 (S.length (V.as_seq hh1 ipv))
(B.loc_region_only false (B.frameOf p)) hh1 hh2;
path_preserved_
mtr (V.as_seq hh1 ipv) 0 (S.length (V.as_seq hh1 ipv))
(B.loc_region_only false (B.frameOf p)) hh1 hh2;
Rgl?.r_sep (hreg hsz) hp
(B.loc_region_only false (B.frameOf p)) hh1 hh2;
assert (S.equal (lift_path hh2 mtr p)
(lift_path_ hh1 (S.snoc (V.as_seq hh0 pv) hp)
0 (S.length (V.as_seq hh1 ipv))));
lift_path_eq hh1 (S.snoc (V.as_seq hh0 pv) hp) (V.as_seq hh0 pv)
0 (S.length (V.as_seq hh0 pv))
#pop-options
// For given a target index `k`, the number of elements (in the tree) `j`,
// and a boolean flag (to check the existence of rightmost hashes), we can
// calculate a required Merkle path length.
//
// `mt_path_length` is a postcondition of `mt_get_path`, and a precondition
// of `mt_verify`. For detailed description, see `mt_get_path` and `mt_verify`.
private
val mt_path_length_step:
k:index_t ->
j:index_t{k <= j} ->
actd:bool ->
Tot (sl:uint32_t{U32.v sl = MTH.mt_path_length_step (U32.v k) (U32.v j) actd})
let mt_path_length_step k j actd =
if j = 0ul then 0ul
else (if k % 2ul = 0ul
then (if j = k || (j = k + 1ul && not actd) then 0ul else 1ul)
else 1ul)
private inline_for_extraction
val mt_path_length:
lv:uint32_t{lv <= merkle_tree_size_lg} ->
k:index_t ->
j:index_t{k <= j && U32.v j < pow2 (32 - U32.v lv)} ->
actd:bool ->
Tot (l:uint32_t{
U32.v l = MTH.mt_path_length (U32.v k) (U32.v j) actd &&
l <= 32ul - lv})
(decreases (U32.v j))
#push-options "--z3rlimit 10 --initial_fuel 1 --max_fuel 1"
let rec mt_path_length lv k j actd =
if j = 0ul then 0ul
else (let nactd = actd || (j % 2ul = 1ul) in
mt_path_length_step k j actd +
mt_path_length (lv + 1ul) (k / 2ul) (j / 2ul) nactd)
#pop-options
val mt_get_path_length:
mtr:HH.rid ->
p:const_path_p ->
HST.ST uint32_t
(requires (fun h0 -> path_safe h0 mtr (CB.cast p)))
(ensures (fun h0 _ h1 -> True))
let mt_get_path_length mtr p =
let pd = !*(CB.cast p) in
V.size_of (Path?.hashes pd)
private inline_for_extraction
val mt_make_path_step:
#hsz:hash_size_t ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
mtr:HH.rid ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
rhs:hash_vec #hsz {V.size_of rhs = merkle_tree_size_lg} ->
i:index_t ->
j:index_t{j <> 0ul /\ i <= j /\ U32.v j < pow2 (32 - U32.v lv)} ->
k:index_t{i <= k && k <= j} ->
p:path_p ->
actd:bool ->
HST.ST unit
(requires (fun h0 ->
HH.includes mtr (V.frameOf hs) /\
HH.includes mtr (V.frameOf rhs) /\
RV.rv_inv h0 hs /\ RV.rv_inv h0 rhs /\
mt_safe_elts h0 lv hs i j /\
path_safe h0 mtr p /\
Path?.hash_size (B.get h0 p 0) = hsz /\
V.size_of (phashes h0 p) <= lv + 1ul))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (path_loc p) h0 h1 /\
path_safe h1 mtr p /\
V.size_of (phashes h1 p) == V.size_of (phashes h0 p) + mt_path_length_step k j actd /\
V.size_of (phashes h1 p) <= lv + 2ul /\
// correctness
(mt_safe_elts_spec h0 lv hs i j;
(let hsz0 = Path?.hash_size (B.get h0 p 0) in
let hsz1 = Path?.hash_size (B.get h1 p 0) in
let before:(S.seq (MTH.hash #(U32.v hsz0))) = lift_path h0 mtr p in
let after:(S.seq (MTH.hash #(U32.v hsz1))) = lift_path h1 mtr p in
hsz = hsz0 /\ hsz = hsz1 /\
S.equal after
(MTH.mt_make_path_step
(U32.v lv) (RV.as_seq h0 hs) (RV.as_seq h0 rhs)
(U32.v i) (U32.v j) (U32.v k) before actd)))))
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 2 --max_ifuel 2"
let mt_make_path_step #hsz lv mtr hs rhs i j k p actd =
let pth = !*p in
let hh0 = HST.get () in
let ofs = offset_of i in
if k % 2ul = 1ul
then begin
hash_vv_rv_inv_includes hh0 hs lv (k - 1ul - ofs);
assert (HH.includes mtr
(B.frameOf (V.get hh0 (V.get hh0 hs lv) (k - 1ul - ofs))));
assert(Path?.hash_size pth = hsz);
mt_path_insert #hsz mtr p (V.index (V.index hs lv) (k - 1ul - ofs))
end
else begin
if k = j then ()
else if k + 1ul = j
then (if actd
then (assert (HH.includes mtr (B.frameOf (V.get hh0 rhs lv)));
mt_path_insert mtr p (V.index rhs lv)))
else (hash_vv_rv_inv_includes hh0 hs lv (k + 1ul - ofs);
assert (HH.includes mtr
(B.frameOf (V.get hh0 (V.get hh0 hs lv) (k + 1ul - ofs))));
mt_path_insert mtr p (V.index (V.index hs lv) (k + 1ul - ofs)))
end
#pop-options
private inline_for_extraction
val mt_get_path_step_pre_nst:
#hsz:Ghost.erased hash_size_t ->
mtr:HH.rid ->
p:path ->
i:uint32_t ->
Tot bool
let mt_get_path_step_pre_nst #hsz mtr p i =
i < V.size_of (Path?.hashes p)
val mt_get_path_step_pre:
#hsz:Ghost.erased hash_size_t ->
mtr:HH.rid ->
p:const_path_p ->
i:uint32_t ->
HST.ST bool
(requires (fun h0 ->
path_safe h0 mtr (CB.cast p) /\
(let pv = B.get h0 (CB.cast p) 0 in
Path?.hash_size pv = Ghost.reveal hsz /\
live h0 (Path?.hashes pv) /\
mt_get_path_step_pre_nst #hsz mtr pv i)))
(ensures (fun _ _ _ -> True))
let mt_get_path_step_pre #hsz mtr p i =
let p = CB.cast p in
mt_get_path_step_pre_nst #hsz mtr !*p i
val mt_get_path_step:
#hsz:Ghost.erased hash_size_t ->
mtr:HH.rid ->
p:const_path_p ->
i:uint32_t ->
HST.ST (hash #hsz)
(requires (fun h0 ->
path_safe h0 mtr (CB.cast p) /\
(let pv = B.get h0 (CB.cast p) 0 in
Path?.hash_size pv = Ghost.reveal hsz /\
live h0 (Path?.hashes pv) /\
i < V.size_of (Path?.hashes pv))))
(ensures (fun h0 r h1 -> True ))
let mt_get_path_step #hsz mtr p i =
let pd = !*(CB.cast p) in
V.index #(hash #(Path?.hash_size pd)) (Path?.hashes pd) i
private
val mt_get_path_:
#hsz:hash_size_t ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
mtr:HH.rid ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
rhs:hash_vec #hsz {V.size_of rhs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{i <= j /\ U32.v j < pow2 (32 - U32.v lv)} ->
k:index_t{i <= k && k <= j} ->
p:path_p ->
actd:bool ->
HST.ST unit
(requires (fun h0 ->
HH.includes mtr (V.frameOf hs) /\
HH.includes mtr (V.frameOf rhs) /\
RV.rv_inv h0 hs /\ RV.rv_inv h0 rhs /\
mt_safe_elts h0 lv hs i j /\
path_safe h0 mtr p /\
Path?.hash_size (B.get h0 p 0) = hsz /\
V.size_of (phashes h0 p) <= lv + 1ul))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (path_loc p) h0 h1 /\
path_safe h1 mtr p /\
V.size_of (phashes h1 p) ==
V.size_of (phashes h0 p) + mt_path_length lv k j actd /\
// correctness
(mt_safe_elts_spec h0 lv hs i j;
(let hsz0 = Path?.hash_size (B.get h0 p 0) in
let hsz1 = Path?.hash_size (B.get h1 p 0) in
let before:(S.seq (MTH.hash #(U32.v hsz0))) = lift_path h0 mtr p in
let after:(S.seq (MTH.hash #(U32.v hsz1))) = lift_path h1 mtr p in
hsz = hsz0 /\ hsz = hsz1 /\
S.equal after
(MTH.mt_get_path_ (U32.v lv) (RV.as_seq h0 hs) (RV.as_seq h0 rhs)
(U32.v i) (U32.v j) (U32.v k) before actd)))))
(decreases (32 - U32.v lv))
#push-options "--z3rlimit 300 --initial_fuel 1 --max_fuel 1 --max_ifuel 2 --initial_ifuel 2"
let rec mt_get_path_ #hsz lv mtr hs rhs i j k p actd =
let hh0 = HST.get () in
mt_safe_elts_spec hh0 lv hs i j;
let ofs = offset_of i in
if j = 0ul then ()
else
(mt_make_path_step lv mtr hs rhs i j k p actd;
let hh1 = HST.get () in
mt_safe_elts_spec hh0 lv hs i j;
assert (S.equal (lift_path hh1 mtr p)
(MTH.mt_make_path_step
(U32.v lv) (RV.as_seq hh0 hs) (RV.as_seq hh0 rhs)
(U32.v i) (U32.v j) (U32.v k)
(lift_path hh0 mtr p) actd));
RV.rv_inv_preserved hs (path_loc p) hh0 hh1;
RV.rv_inv_preserved rhs (path_loc p) hh0 hh1;
RV.as_seq_preserved hs (path_loc p) hh0 hh1;
RV.as_seq_preserved rhs (path_loc p) hh0 hh1;
V.loc_vector_within_included hs lv (V.size_of hs);
mt_safe_elts_preserved lv hs i j (path_loc p) hh0 hh1;
assert (mt_safe_elts hh1 lv hs i j);
mt_safe_elts_rec hh1 lv hs i j;
mt_safe_elts_spec hh1 (lv + 1ul) hs (i / 2ul) (j / 2ul);
mt_get_path_ (lv + 1ul) mtr hs rhs (i / 2ul) (j / 2ul) (k / 2ul) p
(if j % 2ul = 0ul then actd else true);
let hh2 = HST.get () in
assert (S.equal (lift_path hh2 mtr p)
(MTH.mt_get_path_ (U32.v lv + 1)
(RV.as_seq hh1 hs) (RV.as_seq hh1 rhs)
(U32.v i / 2) (U32.v j / 2) (U32.v k / 2)
(lift_path hh1 mtr p)
(if U32.v j % 2 = 0 then actd else true)));
assert (S.equal (lift_path hh2 mtr p)
(MTH.mt_get_path_ (U32.v lv)
(RV.as_seq hh0 hs) (RV.as_seq hh0 rhs)
(U32.v i) (U32.v j) (U32.v k)
(lift_path hh0 mtr p) actd)))
#pop-options
private inline_for_extraction
val mt_get_path_pre_nst:
mtv:merkle_tree ->
idx:offset_t ->
p:path ->
root:(hash #(MT?.hash_size mtv)) ->
Tot bool
let mt_get_path_pre_nst mtv idx p root =
offsets_connect (MT?.offset mtv) idx &&
Path?.hash_size p = MT?.hash_size mtv &&
([@inline_let] let idx = split_offset (MT?.offset mtv) idx in
MT?.i mtv <= idx && idx < MT?.j mtv &&
V.size_of (Path?.hashes p) = 0ul)
val mt_get_path_pre:
#hsz:Ghost.erased hash_size_t ->
mt:const_mt_p ->
idx:offset_t ->
p:const_path_p ->
root:hash #hsz ->
HST.ST bool
(requires (fun h0 ->
let mt = CB.cast mt in
let p = CB.cast p in
let dmt = B.get h0 mt 0 in
let dp = B.get h0 p 0 in
MT?.hash_size dmt = (Ghost.reveal hsz) /\
Path?.hash_size dp = (Ghost.reveal hsz) /\
mt_safe h0 mt /\
path_safe h0 (B.frameOf mt) p /\
Rgl?.r_inv (hreg hsz) h0 root /\
HH.disjoint (B.frameOf root) (B.frameOf mt) /\
HH.disjoint (B.frameOf root) (B.frameOf p)))
(ensures (fun _ _ _ -> True))
let mt_get_path_pre #_ mt idx p root =
let mt = CB.cast mt in
let p = CB.cast p in
let mtv = !*mt in
mt_get_path_pre_nst mtv idx !*p root
val mt_get_path_loc_union_helper:
l1:loc -> l2:loc ->
Lemma (loc_union (loc_union l1 l2) l2 == loc_union l1 l2)
let mt_get_path_loc_union_helper l1 l2 = ()
// Construct a Merkle path for a given index `idx`, hashes `mt.hs`, and rightmost
// hashes `mt.rhs`. Note that this operation copies "pointers" into the Merkle tree
// to the output path.
#push-options "--z3rlimit 60"
val mt_get_path:
#hsz:Ghost.erased hash_size_t ->
mt:const_mt_p ->
idx:offset_t ->
p:path_p ->
root:hash #hsz ->
HST.ST index_t
(requires (fun h0 ->
let mt = CB.cast mt in
let dmt = B.get h0 mt 0 in
MT?.hash_size dmt = Ghost.reveal hsz /\
Path?.hash_size (B.get h0 p 0) = Ghost.reveal hsz /\
mt_get_path_pre_nst (B.get h0 mt 0) idx (B.get h0 p 0) root /\
mt_safe h0 mt /\
path_safe h0 (B.frameOf mt) p /\
Rgl?.r_inv (hreg hsz) h0 root /\
HH.disjoint (B.frameOf root) (B.frameOf mt) /\
HH.disjoint (B.frameOf root) (B.frameOf p)))
(ensures (fun h0 _ h1 ->
let mt = CB.cast mt in
let mtv0 = B.get h0 mt 0 in
let mtv1 = B.get h1 mt 0 in
let idx = split_offset (MT?.offset mtv0) idx in
MT?.hash_size mtv0 = Ghost.reveal hsz /\
MT?.hash_size mtv1 = Ghost.reveal hsz /\
Path?.hash_size (B.get h0 p 0) = Ghost.reveal hsz /\
Path?.hash_size (B.get h1 p 0) = Ghost.reveal hsz /\
// memory safety
modifies (loc_union
(loc_union
(mt_loc mt)
(B.loc_all_regions_from false (B.frameOf root)))
(path_loc p))
h0 h1 /\
mt_safe h1 mt /\
path_safe h1 (B.frameOf mt) p /\
Rgl?.r_inv (hreg hsz) h1 root /\
V.size_of (phashes h1 p) ==
1ul + mt_path_length 0ul idx (MT?.j mtv0) false /\
// correctness
(let sj, sp, srt =
MTH.mt_get_path
(mt_lift h0 mt) (U32.v idx) (Rgl?.r_repr (hreg hsz) h0 root) in
sj == U32.v (MT?.j mtv1) /\
S.equal sp (lift_path #hsz h1 (B.frameOf mt) p) /\
srt == Rgl?.r_repr (hreg hsz) h1 root)))
#pop-options
#push-options "--z3rlimit 300 --initial_fuel 1 --max_fuel 1"
let mt_get_path #hsz mt idx p root =
let ncmt = CB.cast mt in
let mtframe = B.frameOf ncmt in
let hh0 = HST.get () in
mt_get_root mt root;
let mtv = !*ncmt in
let hsz = MT?.hash_size mtv in
let hh1 = HST.get () in
path_safe_init_preserved mtframe p
(B.loc_union (mt_loc ncmt)
(B.loc_all_regions_from false (B.frameOf root)))
hh0 hh1;
assert (MTH.mt_get_root (mt_lift hh0 ncmt) (Rgl?.r_repr (hreg hsz) hh0 root) ==
(mt_lift hh1 ncmt, Rgl?.r_repr (hreg hsz) hh1 root));
assert (S.equal (lift_path #hsz hh1 mtframe p) S.empty);
let idx = split_offset (MT?.offset mtv) idx in
let i = MT?.i mtv in
let ofs = offset_of (MT?.i mtv) in
let j = MT?.j mtv in
let hs = MT?.hs mtv in
let rhs = MT?.rhs mtv in
assert (mt_safe_elts hh1 0ul hs i j);
assert (V.size_of (V.get hh1 hs 0ul) == j - ofs);
assert (idx < j);
hash_vv_rv_inv_includes hh1 hs 0ul (idx - ofs);
hash_vv_rv_inv_r_inv hh1 hs 0ul (idx - ofs);
hash_vv_as_seq_get_index hh1 hs 0ul (idx - ofs);
let ih = V.index (V.index hs 0ul) (idx - ofs) in
mt_path_insert #hsz mtframe p ih;
let hh2 = HST.get () in
assert (S.equal (lift_path hh2 mtframe p)
(MTH.path_insert
(lift_path hh1 mtframe p)
(S.index (S.index (RV.as_seq hh1 hs) 0) (U32.v idx - U32.v ofs))));
Rgl?.r_sep (hreg hsz) root (path_loc p) hh1 hh2;
mt_safe_preserved ncmt (path_loc p) hh1 hh2;
mt_preserved ncmt (path_loc p) hh1 hh2;
assert (V.size_of (phashes hh2 p) == 1ul);
mt_get_path_ 0ul mtframe hs rhs i j idx p false;
let hh3 = HST.get () in
// memory safety
mt_get_path_loc_union_helper
(loc_union (mt_loc ncmt)
(B.loc_all_regions_from false (B.frameOf root)))
(path_loc p);
Rgl?.r_sep (hreg hsz) root (path_loc p) hh2 hh3;
mt_safe_preserved ncmt (path_loc p) hh2 hh3;
mt_preserved ncmt (path_loc p) hh2 hh3;
assert (V.size_of (phashes hh3 p) ==
1ul + mt_path_length 0ul idx (MT?.j (B.get hh0 ncmt 0)) false);
assert (S.length (lift_path #hsz hh3 mtframe p) ==
S.length (lift_path #hsz hh2 mtframe p) +
MTH.mt_path_length (U32.v idx) (U32.v (MT?.j (B.get hh0 ncmt 0))) false);
assert (modifies (loc_union
(loc_union
(mt_loc ncmt)
(B.loc_all_regions_from false (B.frameOf root)))
(path_loc p))
hh0 hh3);
assert (mt_safe hh3 ncmt);
assert (path_safe hh3 mtframe p);
assert (Rgl?.r_inv (hreg hsz) hh3 root);
assert (V.size_of (phashes hh3 p) ==
1ul + mt_path_length 0ul idx (MT?.j (B.get hh0 ncmt 0)) false);
// correctness
mt_safe_elts_spec hh2 0ul hs i j;
assert (S.equal (lift_path hh3 mtframe p)
(MTH.mt_get_path_ 0 (RV.as_seq hh2 hs) (RV.as_seq hh2 rhs)
(U32.v i) (U32.v j) (U32.v idx)
(lift_path hh2 mtframe p) false));
assert (MTH.mt_get_path
(mt_lift hh0 ncmt) (U32.v idx) (Rgl?.r_repr (hreg hsz) hh0 root) ==
(U32.v (MT?.j (B.get hh3 ncmt 0)),
lift_path hh3 mtframe p,
Rgl?.r_repr (hreg hsz) hh3 root));
j
#pop-options
/// Flushing
private val
mt_flush_to_modifies_rec_helper:
#hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
h:HS.mem ->
Lemma (loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))) ==
loc_union
(RV.rv_loc_elems h hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
#push-options "--initial_fuel 2 --max_fuel 2"
let mt_flush_to_modifies_rec_helper #hsz lv hs h =
assert (V.loc_vector_within hs lv (V.size_of hs) ==
loc_union (V.loc_vector_within hs lv (lv + 1ul))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)));
RV.rs_loc_elems_rec_inverse (hvreg hsz) (V.as_seq h hs) (U32.v lv) (U32.v (V.size_of hs));
assert (RV.rv_loc_elems h hs lv (V.size_of hs) ==
loc_union (RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs)));
loc_union_assoc_4
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))
#pop-options
private
val mt_flush_to_:
hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
pi:index_t ->
i:index_t{i >= pi} ->
j:Ghost.erased index_t{
Ghost.reveal j >= i &&
U32.v (Ghost.reveal j) < pow2 (32 - U32.v lv)} ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\
mt_safe_elts h0 lv hs pi (Ghost.reveal j)))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(RV.rv_loc_elems h0 hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
h0 h1 /\
RV.rv_inv h1 hs /\
mt_safe_elts h1 lv hs i (Ghost.reveal j) /\
// correctness
(mt_safe_elts_spec h0 lv hs pi (Ghost.reveal j);
S.equal (RV.as_seq h1 hs)
(MTH.mt_flush_to_
(U32.v lv) (RV.as_seq h0 hs) (U32.v pi)
(U32.v i) (U32.v (Ghost.reveal j))))))
(decreases (U32.v i))
#restart-solver
#push-options "--z3rlimit 1500 --fuel 1 --ifuel 0"
let rec mt_flush_to_ hsz lv hs pi i j =
let hh0 = HST.get () in
// Base conditions
mt_safe_elts_rec hh0 lv hs pi (Ghost.reveal j);
V.loc_vector_within_included hs 0ul lv;
V.loc_vector_within_included hs lv (lv + 1ul);
V.loc_vector_within_included hs (lv + 1ul) (V.size_of hs);
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
let oi = offset_of i in
let opi = offset_of pi in
if oi = opi then mt_safe_elts_spec hh0 lv hs pi (Ghost.reveal j)
else begin
/// 1) Flush hashes at the level `lv`, where the new vector is
/// not yet connected to `hs`.
let ofs = oi - opi in
let hvec = V.index hs lv in
let flushed:(rvector (hreg hsz)) = rv_flush_inplace hvec ofs in
let hh1 = HST.get () in
// 1-0) Basic disjointness conditions for `RV.assign`
V.forall2_forall_left hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.forall2_forall_right hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.forall_preserved
hs 0ul lv
(fun b -> HH.disjoint (Rgl?.region_of (hvreg hsz) hvec)
(Rgl?.region_of (hvreg hsz) b))
(RV.loc_rvector hvec)
hh0 hh1;
V.forall_preserved
hs (lv + 1ul) (V.size_of hs)
(fun b -> HH.disjoint (Rgl?.region_of (hvreg hsz) hvec)
(Rgl?.region_of (hvreg hsz) b))
(RV.loc_rvector hvec)
hh0 hh1;
assert (Rgl?.region_of (hvreg hsz) hvec == Rgl?.region_of (hvreg hsz) flushed);
// 1-1) For the `modifies` postcondition.
assert (modifies (RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv)) hh0 hh1);
// 1-2) Preservation
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
// 1-3) For `mt_safe_elts`
assert (V.size_of flushed == Ghost.reveal j - offset_of i); // head updated
mt_safe_elts_preserved
(lv + 1ul) hs (pi / 2ul) (Ghost.reveal j / 2ul)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1; // tail not yet
// 1-4) For the `rv_inv` postcondition
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs)) 0 (U32.v lv) (U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v lv);
RV.rv_elems_inv_preserved
hs 0ul lv (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs 0ul lv);
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs))
(U32.v lv + 1) (U32.v (V.size_of hs))
(U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
(U32.v lv + 1) (U32.v (V.size_of hs));
RV.rv_elems_inv_preserved
hs (lv + 1ul) (V.size_of hs) (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs (lv + 1ul) (V.size_of hs));
assert (rv_itself_inv hh1 hs);
assert (elems_reg hh1 hs);
// 1-5) Correctness
assert (S.equal (RV.as_seq hh1 flushed)
(S.slice (RV.as_seq hh0 (V.get hh0 hs lv)) (U32.v ofs)
(S.length (RV.as_seq hh0 (V.get hh0 hs lv)))));
/// 2) Assign the flushed vector to `hs` at the level `lv`.
RV.assign hs lv flushed;
let hh2 = HST.get () in
// 2-1) For the `modifies` postcondition.
assert (modifies (V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2);
assert (modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))) hh0 hh2);
// 2-2) Preservation
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-3) For `mt_safe_elts`
assert (V.size_of (V.get hh2 hs lv) ==
Ghost.reveal j - offset_of i);
mt_safe_elts_preserved
(lv + 1ul) hs (pi / 2ul) (Ghost.reveal j / 2ul)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-4) Correctness
RV.as_seq_sub_preserved hs 0ul lv (loc_rvector flushed) hh0 hh1;
RV.as_seq_sub_preserved hs (lv + 1ul) merkle_tree_size_lg (loc_rvector flushed) hh0 hh1;
assert (S.equal (RV.as_seq hh2 hs)
(S.append
(RV.as_seq_sub hh0 hs 0ul lv)
(S.cons (RV.as_seq hh1 flushed)
(RV.as_seq_sub hh0 hs (lv + 1ul) merkle_tree_size_lg))));
as_seq_sub_upd hh0 hs lv (RV.as_seq hh1 flushed);
// if `lv = 31` then `pi <= i <= j < 2` thus `oi = opi`,
// contradicting the branch.
assert (lv + 1ul < merkle_tree_size_lg);
assert (U32.v (Ghost.reveal j / 2ul) < pow2 (32 - U32.v (lv + 1ul)));
assert (RV.rv_inv hh2 hs);
assert (mt_safe_elts hh2 (lv + 1ul) hs (pi / 2ul) (Ghost.reveal j / 2ul));
/// 3) Recursion
mt_flush_to_ hsz (lv + 1ul) hs (pi / 2ul) (i / 2ul)
(Ghost.hide (Ghost.reveal j / 2ul));
let hh3 = HST.get () in
// 3-0) Memory safety brought from the postcondition of the recursion
assert (modifies
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))))
hh0 hh3);
mt_flush_to_modifies_rec_helper lv hs hh0;
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
V.loc_vector_within_included hs lv (lv + 1ul);
RV.rv_loc_elems_included hh2 hs (lv + 1ul) (V.size_of hs);
assert (loc_disjoint
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs)));
V.get_preserved hs lv
(loc_union
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
hh2 hh3;
assert (V.size_of (V.get hh3 hs lv) ==
Ghost.reveal j - offset_of i);
assert (RV.rv_inv hh3 hs);
mt_safe_elts_constr hh3 lv hs i (Ghost.reveal j);
assert (mt_safe_elts hh3 lv hs i (Ghost.reveal j));
// 3-1) Correctness
mt_safe_elts_spec hh2 (lv + 1ul) hs (pi / 2ul) (Ghost.reveal j / 2ul);
assert (S.equal (RV.as_seq hh3 hs)
(MTH.mt_flush_to_ (U32.v lv + 1) (RV.as_seq hh2 hs)
(U32.v pi / 2) (U32.v i / 2) (U32.v (Ghost.reveal j) / 2)));
mt_safe_elts_spec hh0 lv hs pi (Ghost.reveal j);
MTH.mt_flush_to_rec
(U32.v lv) (RV.as_seq hh0 hs)
(U32.v pi) (U32.v i) (U32.v (Ghost.reveal j));
assert (S.equal (RV.as_seq hh3 hs)
(MTH.mt_flush_to_ (U32.v lv) (RV.as_seq hh0 hs)
(U32.v pi) (U32.v i) (U32.v (Ghost.reveal j))))
end
#pop-options
// `mt_flush_to` flushes old hashes in the Merkle tree. It removes hash elements
// from `MT?.i` to **`offset_of (idx - 1)`**, but maintains the tree structure,
// i.e., the tree still holds some old internal hashes (compressed from old
// hashes) which are required to generate Merkle paths for remaining hashes.
//
// Note that `mt_flush_to` (and `mt_flush`) always remain at least one base hash
// elements. If there are `MT?.j` number of elements in the tree, because of the
// precondition `MT?.i <= idx < MT?.j` we still have `idx`-th element after
// flushing.
private inline_for_extraction
val mt_flush_to_pre_nst: mtv:merkle_tree -> idx:offset_t -> Tot bool
let mt_flush_to_pre_nst mtv idx =
offsets_connect (MT?.offset mtv) idx &&
([@inline_let] let idx = split_offset (MT?.offset mtv) idx in
idx >= MT?.i mtv &&
idx < MT?.j mtv)
val mt_flush_to_pre: mt:const_mt_p -> idx:offset_t -> HST.ST bool
(requires (fun h0 -> mt_safe h0 (CB.cast mt)))
(ensures (fun _ _ _ -> True))
let mt_flush_to_pre mt idx =
let mt = CB.cast mt in
let h0 = HST.get() in
let mtv = !*mt in
mt_flush_to_pre_nst mtv idx
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1"
val mt_flush_to:
mt:mt_p ->
idx:offset_t ->
HST.ST unit
(requires (fun h0 -> mt_safe h0 mt /\ mt_flush_to_pre_nst (B.get h0 mt 0) idx))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (mt_loc mt) h0 h1 /\
mt_safe h1 mt /\
// correctness
(let mtv0 = B.get h0 mt 0 in
let mtv1 = B.get h1 mt 0 in
let off = MT?.offset mtv0 in
let idx = split_offset off idx in
MT?.hash_size mtv0 = MT?.hash_size mtv1 /\
MTH.mt_flush_to (mt_lift h0 mt) (U32.v idx) == mt_lift h1 mt)))
let mt_flush_to mt idx =
let hh0 = HST.get () in
let mtv = !*mt in
let offset = MT?.offset mtv in
let j = MT?.j mtv in
let hsz = MT?.hash_size mtv in
let idx = split_offset offset idx in
let hs = MT?.hs mtv in
mt_flush_to_ hsz 0ul hs (MT?.i mtv) idx (Ghost.hide (MT?.j mtv));
let hh1 = HST.get () in
RV.rv_loc_elems_included hh0 hs 0ul (V.size_of hs);
V.loc_vector_within_included hs 0ul (V.size_of hs);
RV.rv_inv_preserved
(MT?.rhs mtv)
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
hh0 hh1;
RV.as_seq_preserved
(MT?.rhs mtv)
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
hh0 hh1;
Rgl?.r_sep (hreg (MT?.hash_size mtv)) (MT?.mroot mtv)
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
hh0 hh1;
mt *= MT (MT?.hash_size mtv)
(MT?.offset mtv) idx (MT?.j mtv)
hs
(MT?.rhs_ok mtv) (MT?.rhs mtv)
(MT?.mroot mtv)
(MT?.hash_spec mtv) (MT?.hash_fun mtv);
let hh2 = HST.get () in
RV.rv_inv_preserved (MT?.hs mtv) (B.loc_buffer mt) hh1 hh2;
RV.rv_inv_preserved (MT?.rhs mtv) (B.loc_buffer mt) hh1 hh2;
RV.as_seq_preserved (MT?.hs mtv) (B.loc_buffer mt) hh1 hh2;
RV.as_seq_preserved (MT?.rhs mtv) (B.loc_buffer mt) hh1 hh2;
Rgl?.r_sep (hreg (MT?.hash_size mtv)) (MT?.mroot mtv) (B.loc_buffer mt) hh1 hh2;
mt_safe_elts_preserved 0ul hs idx (MT?.j mtv) (B.loc_buffer mt) hh1 hh2
#pop-options
private inline_for_extraction | false | true | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mt_flush_pre_nst: mt:merkle_tree -> Tot bool | [] | MerkleTree.Low.mt_flush_pre_nst | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} | mt: MerkleTree.Low.merkle_tree -> Prims.bool | {
"end_col": 45,
"end_line": 2497,
"start_col": 26,
"start_line": 2497
} |
Prims.Tot | val mt_get_path_step_pre_nst:
#hsz:Ghost.erased hash_size_t ->
mtr:HH.rid ->
p:path ->
i:uint32_t ->
Tot bool | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mt_get_path_step_pre_nst #hsz mtr p i =
i < V.size_of (Path?.hashes p) | val mt_get_path_step_pre_nst:
#hsz:Ghost.erased hash_size_t ->
mtr:HH.rid ->
p:path ->
i:uint32_t ->
Tot bool
let mt_get_path_step_pre_nst #hsz mtr p i = | false | null | false | i < V.size_of (Path?.hashes p) | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"total"
] | [
"FStar.Ghost.erased",
"MerkleTree.Low.Datastructures.hash_size_t",
"FStar.Monotonic.HyperHeap.rid",
"MerkleTree.Low.path",
"LowStar.Vector.uint32_t",
"FStar.Integers.op_Less",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"LowStar.Vector.size_of",
"MerkleTree.Low.Datastructures.hash",
"MerkleTree.Low.__proj__Path__item__hash_size",
"MerkleTree.Low.__proj__Path__item__hashes",
"Prims.bool"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction
let uint32_max = 4294967295UL
let uint64_max = 18446744073709551615UL
let offset_range_limit = uint32_max
type offset_t = uint64_t
inline_for_extraction noextract unfold let u32_64 = Int.Cast.uint32_to_uint64
inline_for_extraction noextract unfold let u64_32 = Int.Cast.uint64_to_uint32
private inline_for_extraction
let offsets_connect (x:offset_t) (y:offset_t): Tot bool = y >= x && (y - x) <= offset_range_limit
private inline_for_extraction
let split_offset (tree:offset_t) (index:offset_t{offsets_connect tree index}): Tot index_t =
[@inline_let] let diff = U64.sub_mod index tree in
assert (diff <= offset_range_limit);
Int.Cast.uint64_to_uint32 diff
private inline_for_extraction
let add64_fits (x:offset_t) (i:index_t): Tot bool = uint64_max - x >= (u32_64 i)
private inline_for_extraction
let join_offset (tree:offset_t) (i:index_t{add64_fits tree i}): Tot (r:offset_t{offsets_connect tree r}) =
U64.add tree (u32_64 i)
inline_for_extraction val merkle_tree_size_lg: uint32_t
let merkle_tree_size_lg = 32ul
// A Merkle tree `MT i j hs rhs_ok rhs` stores all necessary hashes to generate
// a Merkle path for each element from the index `i` to `j-1`.
// - Parameters
// `hs`: a 2-dim store for hashes, where `hs[0]` contains leaf hash values.
// `rhs_ok`: to check the rightmost hashes are up-to-date
// `rhs`: a store for "rightmost" hashes, manipulated only when required to
// calculate some merkle paths that need the rightmost hashes
// as a part of them.
// `mroot`: during the construction of `rhs` we can also calculate the Merkle
// root of the tree. If `rhs_ok` is true then it has the up-to-date
// root value.
noeq type merkle_tree =
| MT: hash_size:hash_size_t ->
offset:offset_t ->
i:index_t -> j:index_t{i <= j /\ add64_fits offset j} ->
hs:hash_vv hash_size {V.size_of hs = merkle_tree_size_lg} ->
rhs_ok:bool ->
rhs:hash_vec #hash_size {V.size_of rhs = merkle_tree_size_lg} ->
mroot:hash #hash_size ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
merkle_tree
type mt_p = B.pointer merkle_tree
type const_mt_p = const_pointer merkle_tree
inline_for_extraction
let merkle_tree_conditions (#hsz:Ghost.erased hash_size_t) (offset:uint64_t) (i j:uint32_t) (hs:hash_vv hsz) (rhs_ok:bool) (rhs:hash_vec #hsz) (mroot:hash #hsz): Tot bool =
j >= i && add64_fits offset j &&
V.size_of hs = merkle_tree_size_lg &&
V.size_of rhs = merkle_tree_size_lg
// The maximum number of currently held elements in the tree is (2^32 - 1).
// cwinter: even when using 64-bit indices, we fail if the underlying 32-bit
// vector is full; this can be fixed if necessary.
private inline_for_extraction
val mt_not_full_nst: mtv:merkle_tree -> Tot bool
let mt_not_full_nst mtv = MT?.j mtv < uint32_32_max
val mt_not_full: HS.mem -> mt_p -> GTot bool
let mt_not_full h mt = mt_not_full_nst (B.get h mt 0)
/// (Memory) Safety
val offset_of: i:index_t -> Tot index_t
let offset_of i = if i % 2ul = 0ul then i else i - 1ul
// `mt_safe_elts` says that it is safe to access an element from `i` to `j - 1`
// at level `lv` in the Merkle tree, i.e., hs[lv][k] (i <= k < j) is a valid
// element.
inline_for_extraction noextract
val mt_safe_elts:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
GTot Type0 (decreases (32 - U32.v lv))
let rec mt_safe_elts #hsz h lv hs i j =
if lv = merkle_tree_size_lg then true
else (let ofs = offset_of i in
V.size_of (V.get h hs lv) == j - ofs /\
mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul))
#push-options "--initial_fuel 1 --max_fuel 1"
val mt_safe_elts_constr:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (V.size_of (V.get h hs lv) == j - offset_of i /\
mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)))
(ensures (mt_safe_elts #hsz h lv hs i j))
let mt_safe_elts_constr #_ h lv hs i j = ()
val mt_safe_elts_head:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (mt_safe_elts #hsz h lv hs i j))
(ensures (V.size_of (V.get h hs lv) == j - offset_of i))
let mt_safe_elts_head #_ h lv hs i j = ()
val mt_safe_elts_rec:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (mt_safe_elts #hsz h lv hs i j))
(ensures (mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)))
let mt_safe_elts_rec #_ h lv hs i j = ()
val mt_safe_elts_init:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
Lemma (requires (V.forall_ h hs lv (V.size_of hs)
(fun hv -> V.size_of hv = 0ul)))
(ensures (mt_safe_elts #hsz h lv hs 0ul 0ul))
(decreases (32 - U32.v lv))
let rec mt_safe_elts_init #hsz h lv hs =
if lv = merkle_tree_size_lg then ()
else mt_safe_elts_init #hsz h (lv + 1ul) hs
#pop-options
val mt_safe_elts_preserved:
#hsz:hash_size_t ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 hs /\
mt_safe_elts #hsz h0 lv hs i j /\
loc_disjoint p (V.loc_vector_within hs lv (V.size_of hs)) /\
modifies p h0 h1))
(ensures (mt_safe_elts #hsz h1 lv hs i j))
(decreases (32 - U32.v lv))
[SMTPat (V.live h0 hs);
SMTPat (mt_safe_elts #hsz h0 lv hs i j);
SMTPat (loc_disjoint p (RV.loc_rvector hs));
SMTPat (modifies p h0 h1)]
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let rec mt_safe_elts_preserved #hsz lv hs i j p h0 h1 =
if lv = merkle_tree_size_lg then ()
else (V.get_preserved hs lv p h0 h1;
mt_safe_elts_preserved #hsz (lv + 1ul) hs (i / 2ul) (j / 2ul) p h0 h1)
#pop-options
// `mt_safe` is the invariant of a Merkle tree through its lifetime.
// It includes liveness, regionality, disjointness (to each data structure),
// and valid element access (`mt_safe_elts`).
inline_for_extraction noextract
val mt_safe: HS.mem -> mt_p -> GTot Type0
let mt_safe h mt =
B.live h mt /\ B.freeable mt /\
(let mtv = B.get h mt 0 in
// Liveness & Accessibility
RV.rv_inv h (MT?.hs mtv) /\
RV.rv_inv h (MT?.rhs mtv) /\
Rgl?.r_inv (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv) /\
mt_safe_elts h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv) /\
// Regionality
HH.extends (V.frameOf (MT?.hs mtv)) (B.frameOf mt) /\
HH.extends (V.frameOf (MT?.rhs mtv)) (B.frameOf mt) /\
HH.extends (B.frameOf (MT?.mroot mtv)) (B.frameOf mt) /\
HH.disjoint (V.frameOf (MT?.hs mtv)) (V.frameOf (MT?.rhs mtv)) /\
HH.disjoint (V.frameOf (MT?.hs mtv)) (B.frameOf (MT?.mroot mtv)) /\
HH.disjoint (V.frameOf (MT?.rhs mtv)) (B.frameOf (MT?.mroot mtv)))
// Since a Merkle tree satisfies regionality, it's ok to take all regions from
// a tree pointer as a location of the tree.
val mt_loc: mt_p -> GTot loc
let mt_loc mt = B.loc_all_regions_from false (B.frameOf mt)
val mt_safe_preserved:
mt:mt_p -> p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (mt_safe h0 mt /\
loc_disjoint p (mt_loc mt) /\
modifies p h0 h1))
(ensures (B.get h0 mt 0 == B.get h1 mt 0 /\
mt_safe h1 mt))
let mt_safe_preserved mt p h0 h1 =
assert (loc_includes (mt_loc mt) (B.loc_buffer mt));
let mtv = B.get h0 mt 0 in
assert (loc_includes (mt_loc mt) (RV.loc_rvector (MT?.hs mtv)));
assert (loc_includes (mt_loc mt) (RV.loc_rvector (MT?.rhs mtv)));
assert (loc_includes (mt_loc mt) (V.loc_vector (MT?.hs mtv)));
assert (loc_includes (mt_loc mt)
(B.loc_all_regions_from false (B.frameOf (MT?.mroot mtv))));
RV.rv_inv_preserved (MT?.hs mtv) p h0 h1;
RV.rv_inv_preserved (MT?.rhs mtv) p h0 h1;
Rgl?.r_sep (hreg (MT?.hash_size mtv)) (MT?.mroot mtv) p h0 h1;
V.loc_vector_within_included (MT?.hs mtv) 0ul (V.size_of (MT?.hs mtv));
mt_safe_elts_preserved 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv) p h0 h1
/// Lifting to a high-level Merkle tree structure
val mt_safe_elts_spec:
#hsz:hash_size_t ->
h:HS.mem ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t ->
j:index_t{j >= i} ->
Lemma (requires (RV.rv_inv h hs /\
mt_safe_elts #hsz h lv hs i j))
(ensures (MTH.hs_wf_elts #(U32.v hsz)
(U32.v lv) (RV.as_seq h hs)
(U32.v i) (U32.v j)))
(decreases (32 - U32.v lv))
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let rec mt_safe_elts_spec #_ h lv hs i j =
if lv = merkle_tree_size_lg then ()
else mt_safe_elts_spec h (lv + 1ul) hs (i / 2ul) (j / 2ul)
#pop-options
val merkle_tree_lift:
h:HS.mem ->
mtv:merkle_tree{
RV.rv_inv h (MT?.hs mtv) /\
RV.rv_inv h (MT?.rhs mtv) /\
Rgl?.r_inv (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv) /\
mt_safe_elts #(MT?.hash_size mtv) h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv)} ->
GTot (r:MTH.merkle_tree #(U32.v (MT?.hash_size mtv)) {MTH.mt_wf_elts #_ r})
let merkle_tree_lift h mtv =
mt_safe_elts_spec h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv);
MTH.MT #(U32.v (MT?.hash_size mtv))
(U32.v (MT?.i mtv))
(U32.v (MT?.j mtv))
(RV.as_seq h (MT?.hs mtv))
(MT?.rhs_ok mtv)
(RV.as_seq h (MT?.rhs mtv))
(Rgl?.r_repr (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv))
(Ghost.reveal (MT?.hash_spec mtv))
val mt_lift:
h:HS.mem -> mt:mt_p{mt_safe h mt} ->
GTot (r:MTH.merkle_tree #(U32.v (MT?.hash_size (B.get h mt 0))) {MTH.mt_wf_elts #_ r})
let mt_lift h mt =
merkle_tree_lift h (B.get h mt 0)
val mt_preserved:
mt:mt_p -> p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (mt_safe h0 mt /\
loc_disjoint p (mt_loc mt) /\
modifies p h0 h1))
(ensures (mt_safe_preserved mt p h0 h1;
mt_lift h0 mt == mt_lift h1 mt))
let mt_preserved mt p h0 h1 =
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(B.loc_buffer mt));
B.modifies_buffer_elim mt p h0 h1;
assert (B.get h0 mt 0 == B.get h1 mt 0);
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(RV.loc_rvector (MT?.hs (B.get h0 mt 0))));
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(RV.loc_rvector (MT?.rhs (B.get h0 mt 0))));
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(B.loc_buffer (MT?.mroot (B.get h0 mt 0))));
RV.as_seq_preserved (MT?.hs (B.get h0 mt 0)) p h0 h1;
RV.as_seq_preserved (MT?.rhs (B.get h0 mt 0)) p h0 h1;
B.modifies_buffer_elim (MT?.mroot (B.get h0 mt 0)) p h0 h1
/// Construction
// Note that the public function for creation is `mt_create` defined below,
// which builds a tree with an initial hash.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
private
val create_empty_mt:
hash_size:hash_size_t ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
r:HST.erid ->
HST.ST mt_p
(requires (fun _ -> true))
(ensures (fun h0 mt h1 ->
let dmt = B.get h1 mt 0 in
// memory safety
B.frameOf mt = r /\
modifies (mt_loc mt) h0 h1 /\
mt_safe h1 mt /\
mt_not_full h1 mt /\
// correctness
MT?.hash_size dmt = hash_size /\
MT?.offset dmt = 0UL /\
merkle_tree_lift h1 dmt == MTH.create_empty_mt #_ #(Ghost.reveal hash_spec) ()))
let create_empty_mt hsz hash_spec hash_fun r =
[@inline_let] let hrg = hreg hsz in
[@inline_let] let hvrg = hvreg hsz in
[@inline_let] let hvvrg = hvvreg hsz in
let hs_region = HST.new_region r in
let hs = RV.alloc_rid hvrg merkle_tree_size_lg hs_region in
let h0 = HST.get () in
mt_safe_elts_init #hsz h0 0ul hs;
let rhs_region = HST.new_region r in
let rhs = RV.alloc_rid hrg merkle_tree_size_lg rhs_region in
let h1 = HST.get () in
assert (RV.as_seq h1 rhs == S.create 32 (MTH.hash_init #(U32.v hsz)));
RV.rv_inv_preserved hs (V.loc_vector rhs) h0 h1;
RV.as_seq_preserved hs (V.loc_vector rhs) h0 h1;
V.loc_vector_within_included hs 0ul (V.size_of hs);
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul (V.loc_vector rhs) h0 h1;
let mroot_region = HST.new_region r in
let mroot = rg_alloc hrg mroot_region in
let h2 = HST.get () in
RV.as_seq_preserved hs loc_none h1 h2;
RV.as_seq_preserved rhs loc_none h1 h2;
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul loc_none h1 h2;
let mt = B.malloc r (MT hsz 0UL 0ul 0ul hs false rhs mroot hash_spec hash_fun) 1ul in
let h3 = HST.get () in
RV.as_seq_preserved hs loc_none h2 h3;
RV.as_seq_preserved rhs loc_none h2 h3;
Rgl?.r_sep hrg mroot loc_none h2 h3;
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul loc_none h2 h3;
mt
#pop-options
/// Destruction (free)
val mt_free: mt:mt_p ->
HST.ST unit
(requires (fun h0 -> mt_safe h0 mt))
(ensures (fun h0 _ h1 -> modifies (mt_loc mt) h0 h1))
#push-options "--z3rlimit 100"
let mt_free mt =
let mtv = !*mt in
RV.free (MT?.hs mtv);
RV.free (MT?.rhs mtv);
[@inline_let] let rg = hreg (MT?.hash_size mtv) in
rg_free rg (MT?.mroot mtv);
B.free mt
#pop-options
/// Insertion
private
val as_seq_sub_upd:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector #a #rst rg ->
i:uint32_t{i < V.size_of rv} -> v:Rgl?.repr rg ->
Lemma (requires (RV.rv_inv h rv))
(ensures (S.equal (S.upd (RV.as_seq h rv) (U32.v i) v)
(S.append
(RV.as_seq_sub h rv 0ul i)
(S.cons v (RV.as_seq_sub h rv (i + 1ul) (V.size_of rv))))))
#push-options "--z3rlimit 20"
let as_seq_sub_upd #a #rst #rg h rv i v =
Seq.Properties.slice_upd (RV.as_seq h rv) 0 (U32.v i) (U32.v i) v;
Seq.Properties.slice_upd (RV.as_seq h rv) (U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i) v;
RV.as_seq_seq_slice rg h (V.as_seq h rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i);
assert (S.equal (S.slice (RV.as_seq h rv) 0 (U32.v i))
(RV.as_seq_sub h rv 0ul i));
RV.as_seq_seq_slice rg h (V.as_seq h rv)
0 (U32.v (V.size_of rv)) (U32.v i + 1) (U32.v (V.size_of rv));
assert (S.equal (S.slice (RV.as_seq h rv) (U32.v i + 1) (U32.v (V.size_of rv)))
(RV.as_seq_sub h rv (i + 1ul) (V.size_of rv)));
assert (S.index (S.upd (RV.as_seq h rv) (U32.v i) v) (U32.v i) == v)
#pop-options
// `hash_vv_insert_copy` inserts a hash element at a level `lv`, by copying
// and pushing its content to `hs[lv]`. For detailed insertion procedure, see
// `insert_` and `mt_insert`.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1"
private
inline_for_extraction
val hash_vv_insert_copy:
#hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:Ghost.erased index_t ->
j:index_t{
Ghost.reveal i <= j &&
U32.v j < pow2 (32 - U32.v lv) - 1 &&
j < uint32_32_max} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
v:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\
Rgl?.r_inv (hreg hsz) h0 v /\
HH.disjoint (V.frameOf hs) (B.frameOf v) /\
mt_safe_elts #hsz h0 lv hs (Ghost.reveal i) j))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
h0 h1 /\
RV.rv_inv h1 hs /\
Rgl?.r_inv (hreg hsz) h1 v /\
V.size_of (V.get h1 hs lv) == j + 1ul - offset_of (Ghost.reveal i) /\
V.size_of (V.get h1 hs lv) == V.size_of (V.get h0 hs lv) + 1ul /\
mt_safe_elts #hsz h1 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul) /\
RV.rv_loc_elems h0 hs (lv + 1ul) (V.size_of hs) ==
RV.rv_loc_elems h1 hs (lv + 1ul) (V.size_of hs) /\
// correctness
(mt_safe_elts_spec #hsz h0 lv hs (Ghost.reveal i) j;
S.equal (RV.as_seq h1 hs)
(MTH.hashess_insert
(U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq h0 hs) (Rgl?.r_repr (hreg hsz) h0 v))) /\
S.equal (S.index (RV.as_seq h1 hs) (U32.v lv))
(S.snoc (S.index (RV.as_seq h0 hs) (U32.v lv))
(Rgl?.r_repr (hreg hsz) h0 v))))
let hash_vv_insert_copy #hsz lv i j hs v =
let hh0 = HST.get () in
mt_safe_elts_rec hh0 lv hs (Ghost.reveal i) j;
/// 1) Insert an element at the level `lv`, where the new vector is not yet
/// connected to `hs`.
let ihv = RV.insert_copy (hcpy hsz) (V.index hs lv) v in
let hh1 = HST.get () in
// 1-0) Basic disjointness conditions
V.forall2_forall_left hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.forall2_forall_right hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.loc_vector_within_included hs lv (lv + 1ul);
V.loc_vector_within_included hs (lv + 1ul) (V.size_of hs);
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
// 1-1) For the `modifies` postcondition.
assert (modifies (RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv)) hh0 hh1);
// 1-2) Preservation
Rgl?.r_sep (hreg hsz) v (RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
// 1-3) For `mt_safe_elts`
assert (V.size_of ihv == j + 1ul - offset_of (Ghost.reveal i)); // head updated
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1; // tail not yet
// 1-4) For the `rv_inv` postcondition
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs)) 0 (U32.v lv) (U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v lv);
RV.rv_elems_inv_preserved
hs 0ul lv (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs 0ul lv);
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs))
(U32.v lv + 1) (U32.v (V.size_of hs))
(U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
(U32.v lv + 1) (U32.v (V.size_of hs));
RV.rv_elems_inv_preserved
hs (lv + 1ul) (V.size_of hs) (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs (lv + 1ul) (V.size_of hs));
// assert (rv_itself_inv hh1 hs);
// assert (elems_reg hh1 hs);
// 1-5) Correctness
assert (S.equal (RV.as_seq hh1 ihv)
(S.snoc (RV.as_seq hh0 (V.get hh0 hs lv)) (Rgl?.r_repr (hreg hsz) hh0 v)));
/// 2) Assign the updated vector to `hs` at the level `lv`.
RV.assign hs lv ihv;
let hh2 = HST.get () in
// 2-1) For the `modifies` postcondition.
assert (modifies (V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2);
assert (modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))) hh0 hh2);
// 2-2) Preservation
Rgl?.r_sep (hreg hsz) v (RV.loc_rvector hs) hh1 hh2;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-3) For `mt_safe_elts`
assert (V.size_of (V.get hh2 hs lv) == j + 1ul - offset_of (Ghost.reveal i));
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-4) Correctness
RV.as_seq_sub_preserved hs 0ul lv (loc_rvector ihv) hh0 hh1;
RV.as_seq_sub_preserved hs (lv + 1ul) merkle_tree_size_lg (loc_rvector ihv) hh0 hh1;
assert (S.equal (RV.as_seq hh2 hs)
(S.append
(RV.as_seq_sub hh0 hs 0ul lv)
(S.cons (RV.as_seq hh1 ihv)
(RV.as_seq_sub hh0 hs (lv + 1ul) merkle_tree_size_lg))));
as_seq_sub_upd hh0 hs lv (RV.as_seq hh1 ihv)
#pop-options
private
val insert_index_helper_even:
lv:uint32_t{lv < merkle_tree_size_lg} ->
j:index_t{U32.v j < pow2 (32 - U32.v lv) - 1} ->
Lemma (requires (j % 2ul <> 1ul))
(ensures (U32.v j % 2 <> 1 /\ j / 2ul == (j + 1ul) / 2ul))
let insert_index_helper_even lv j = ()
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
private
val insert_index_helper_odd:
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:index_t ->
j:index_t{i <= j && U32.v j < pow2 (32 - U32.v lv) - 1} ->
Lemma (requires (j % 2ul = 1ul /\
j < uint32_32_max))
(ensures (U32.v j % 2 = 1 /\
U32.v (j / 2ul) < pow2 (32 - U32.v (lv + 1ul)) - 1 /\
(j + 1ul) / 2ul == j / 2ul + 1ul /\
j - offset_of i > 0ul))
let insert_index_helper_odd lv i j = ()
#pop-options
private
val loc_union_assoc_4:
a:loc -> b:loc -> c:loc -> d:loc ->
Lemma (loc_union (loc_union a b) (loc_union c d) ==
loc_union (loc_union a c) (loc_union b d))
let loc_union_assoc_4 a b c d =
loc_union_assoc (loc_union a b) c d;
loc_union_assoc a b c;
loc_union_assoc a c b;
loc_union_assoc (loc_union a c) b d
private
val insert_modifies_rec_helper:
#hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
aloc:loc ->
h:HS.mem ->
Lemma (loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
aloc)
(loc_union
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc) ==
loc_union
(loc_union
(RV.rv_loc_elems h hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
aloc)
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let insert_modifies_rec_helper #hsz lv hs aloc h =
assert (V.loc_vector_within hs lv (V.size_of hs) ==
loc_union (V.loc_vector_within hs lv (lv + 1ul))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)));
RV.rs_loc_elems_rec_inverse (hvreg hsz) (V.as_seq h hs) (U32.v lv) (U32.v (V.size_of hs));
assert (RV.rv_loc_elems h hs lv (V.size_of hs) ==
loc_union (RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs)));
// Applying some association rules...
loc_union_assoc
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))) aloc
(loc_union
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc);
loc_union_assoc
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))) aloc aloc;
loc_union_assoc
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc;
loc_union_assoc_4
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))
#pop-options
private
val insert_modifies_union_loc_weakening:
l1:loc -> l2:loc -> l3:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (modifies l1 h0 h1))
(ensures (modifies (loc_union (loc_union l1 l2) l3) h0 h1))
let insert_modifies_union_loc_weakening l1 l2 l3 h0 h1 =
B.loc_includes_union_l l1 l2 l1;
B.loc_includes_union_l (loc_union l1 l2) l3 (loc_union l1 l2)
private
val insert_snoc_last_helper:
#a:Type -> s:S.seq a{S.length s > 0} -> v:a ->
Lemma (S.index (S.snoc s v) (S.length s - 1) == S.last s)
let insert_snoc_last_helper #a s v = ()
private
val rv_inv_rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (RV.rv_inv h rv))
(ensures (RV.rv_elems_reg h rv i j))
let rv_inv_rv_elems_reg #a #rst #rg h rv i j = ()
// `insert_` recursively inserts proper hashes to each level `lv` by
// accumulating a compressed hash. For example, if there are three leaf elements
// in the tree, `insert_` will change `hs` as follow:
// (`hij` is a compressed hash from `hi` to `hj`)
//
// BEFORE INSERTION AFTER INSERTION
// lv
// 0 h0 h1 h2 ====> h0 h1 h2 h3
// 1 h01 h01 h23
// 2 h03
//
private
val insert_:
#hsz:hash_size_t ->
#hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hsz)) ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:Ghost.erased index_t ->
j:index_t{
Ghost.reveal i <= j &&
U32.v j < pow2 (32 - U32.v lv) - 1 &&
j < uint32_32_max} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
acc:hash #hsz ->
hash_fun:hash_fun_t #hsz #hash_spec ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\
Rgl?.r_inv (hreg hsz) h0 acc /\
HH.disjoint (V.frameOf hs) (B.frameOf acc) /\
mt_safe_elts h0 lv hs (Ghost.reveal i) j))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(loc_union
(RV.rv_loc_elems h0 hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
h0 h1 /\
RV.rv_inv h1 hs /\
Rgl?.r_inv (hreg hsz) h1 acc /\
mt_safe_elts h1 lv hs (Ghost.reveal i) (j + 1ul) /\
// correctness
(mt_safe_elts_spec h0 lv hs (Ghost.reveal i) j;
S.equal (RV.as_seq h1 hs)
(MTH.insert_ #(U32.v hsz) #hash_spec (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq h0 hs) (Rgl?.r_repr (hreg hsz) h0 acc)))))
(decreases (U32.v j))
#push-options "--z3rlimit 800 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
let rec insert_ #hsz #hash_spec lv i j hs acc hash_fun =
let hh0 = HST.get () in
hash_vv_insert_copy lv i j hs acc;
let hh1 = HST.get () in
// Base conditions
V.loc_vector_within_included hs lv (lv + 1ul);
V.loc_vector_within_included hs (lv + 1ul) (V.size_of hs);
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
assert (V.size_of (V.get hh1 hs lv) == j + 1ul - offset_of (Ghost.reveal i));
assert (mt_safe_elts hh1 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul));
if j % 2ul = 1ul
then (insert_index_helper_odd lv (Ghost.reveal i) j;
assert (S.length (S.index (RV.as_seq hh0 hs) (U32.v lv)) > 0);
let lvhs = V.index hs lv in
assert (U32.v (V.size_of lvhs) ==
S.length (S.index (RV.as_seq hh0 hs) (U32.v lv)) + 1);
assert (V.size_of lvhs > 1ul);
/// 3) Update the accumulator `acc`.
hash_vec_rv_inv_r_inv hh1 (V.get hh1 hs lv) (V.size_of (V.get hh1 hs lv) - 2ul);
assert (Rgl?.r_inv (hreg hsz) hh1 acc);
hash_fun (V.index lvhs (V.size_of lvhs - 2ul)) acc acc;
let hh2 = HST.get () in
// 3-1) For the `modifies` postcondition
assert (modifies (B.loc_all_regions_from false (B.frameOf acc)) hh1 hh2);
assert (modifies
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh0 hh2);
// 3-2) Preservation
RV.rv_inv_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.as_seq_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(B.loc_region_only false (B.frameOf acc)) hh1 hh2;
assert (RV.rv_inv hh2 hs);
assert (Rgl?.r_inv (hreg hsz) hh2 acc);
// 3-3) For `mt_safe_elts`
V.get_preserved hs lv
(B.loc_region_only false (B.frameOf acc)) hh1 hh2; // head preserved
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(B.loc_region_only false (B.frameOf acc)) hh1 hh2; // tail preserved
// 3-4) Correctness
insert_snoc_last_helper
(RV.as_seq hh0 (V.get hh0 hs lv))
(Rgl?.r_repr (hreg hsz) hh0 acc);
assert (S.equal (Rgl?.r_repr (hreg hsz) hh2 acc) // `nacc` in `MTH.insert_`
((Ghost.reveal hash_spec)
(S.last (S.index (RV.as_seq hh0 hs) (U32.v lv)))
(Rgl?.r_repr (hreg hsz) hh0 acc)));
/// 4) Recursion
insert_ (lv + 1ul)
(Ghost.hide (Ghost.reveal i / 2ul)) (j / 2ul)
hs acc hash_fun;
let hh3 = HST.get () in
// 4-0) Memory safety brought from the postcondition of the recursion
assert (RV.rv_inv hh3 hs);
assert (Rgl?.r_inv (hreg hsz) hh3 acc);
assert (modifies (loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh2 hh3);
assert (modifies
(loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc)))
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc))))
hh0 hh3);
// 4-1) For `mt_safe_elts`
rv_inv_rv_elems_reg hh2 hs (lv + 1ul) (V.size_of hs);
RV.rv_loc_elems_included hh2 hs (lv + 1ul) (V.size_of hs);
assert (loc_disjoint
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs)));
assert (loc_disjoint
(V.loc_vector_within hs lv (lv + 1ul))
(B.loc_all_regions_from false (B.frameOf acc)));
V.get_preserved hs lv
(loc_union
(loc_union
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh2 hh3;
assert (V.size_of (V.get hh3 hs lv) ==
j + 1ul - offset_of (Ghost.reveal i)); // head preserved
assert (mt_safe_elts hh3 (lv + 1ul) hs
(Ghost.reveal i / 2ul) (j / 2ul + 1ul)); // tail by recursion
mt_safe_elts_constr hh3 lv hs (Ghost.reveal i) (j + 1ul);
assert (mt_safe_elts hh3 lv hs (Ghost.reveal i) (j + 1ul));
// 4-2) Correctness
mt_safe_elts_spec hh2 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul);
assert (S.equal (RV.as_seq hh3 hs)
(MTH.insert_ #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv + 1) (U32.v (Ghost.reveal i) / 2) (U32.v j / 2)
(RV.as_seq hh2 hs) (Rgl?.r_repr (hreg hsz) hh2 acc)));
mt_safe_elts_spec hh0 lv hs (Ghost.reveal i) j;
MTH.insert_rec #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc);
assert (S.equal (RV.as_seq hh3 hs)
(MTH.insert_ #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc))))
else (insert_index_helper_even lv j;
// memory safety
assert (mt_safe_elts hh1 (lv + 1ul) hs (Ghost.reveal i / 2ul) ((j + 1ul) / 2ul));
mt_safe_elts_constr hh1 lv hs (Ghost.reveal i) (j + 1ul);
assert (mt_safe_elts hh1 lv hs (Ghost.reveal i) (j + 1ul));
assert (modifies
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
hh0 hh1);
insert_modifies_union_loc_weakening
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc))
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh0 hh1;
// correctness
mt_safe_elts_spec hh0 lv hs (Ghost.reveal i) j;
MTH.insert_base #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc);
assert (S.equal (RV.as_seq hh1 hs)
(MTH.insert_ #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc))));
/// 5) Proving the postcondition after recursion
let hh4 = HST.get () in
// 5-1) For the `modifies` postcondition.
assert (modifies
(loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc)))
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc))))
hh0 hh4);
insert_modifies_rec_helper
lv hs (B.loc_all_regions_from false (B.frameOf acc)) hh0;
// 5-2) For `mt_safe_elts`
assert (mt_safe_elts hh4 lv hs (Ghost.reveal i) (j + 1ul));
// 5-3) Preservation
assert (RV.rv_inv hh4 hs);
assert (Rgl?.r_inv (hreg hsz) hh4 acc);
// 5-4) Correctness
mt_safe_elts_spec hh0 lv hs (Ghost.reveal i) j;
assert (S.equal (RV.as_seq hh4 hs)
(MTH.insert_ #(U32.v hsz) #hash_spec (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc))) // QED
#pop-options
private inline_for_extraction
val mt_insert_pre_nst: mtv:merkle_tree -> v:hash #(MT?.hash_size mtv) -> Tot bool
let mt_insert_pre_nst mtv v = mt_not_full_nst mtv && add64_fits (MT?.offset mtv) ((MT?.j mtv) + 1ul)
val mt_insert_pre: #hsz:Ghost.erased hash_size_t -> mt:const_mt_p -> v:hash #hsz -> HST.ST bool
(requires (fun h0 -> mt_safe h0 (CB.cast mt) /\ (MT?.hash_size (B.get h0 (CB.cast mt) 0)) = Ghost.reveal hsz))
(ensures (fun _ _ _ -> True))
let mt_insert_pre #hsz mt v =
let mt = !*(CB.cast mt) in
assert (MT?.hash_size mt == (MT?.hash_size mt));
mt_insert_pre_nst mt v
// `mt_insert` inserts a hash to a Merkle tree. Note that this operation
// manipulates the content in `v`, since it uses `v` as an accumulator during
// insertion.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
val mt_insert:
hsz:Ghost.erased hash_size_t ->
mt:mt_p -> v:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
let dmt = B.get h0 mt 0 in
mt_safe h0 mt /\
Rgl?.r_inv (hreg hsz) h0 v /\
HH.disjoint (B.frameOf mt) (B.frameOf v) /\
MT?.hash_size dmt = Ghost.reveal hsz /\
mt_insert_pre_nst dmt v))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(mt_loc mt)
(B.loc_all_regions_from false (B.frameOf v)))
h0 h1 /\
mt_safe h1 mt /\
// correctness
MT?.hash_size (B.get h1 mt 0) = Ghost.reveal hsz /\
mt_lift h1 mt == MTH.mt_insert (mt_lift h0 mt) (Rgl?.r_repr (hreg hsz) h0 v)))
#pop-options
#push-options "--z3rlimit 40"
let mt_insert hsz mt v =
let hh0 = HST.get () in
let mtv = !*mt in
let hs = MT?.hs mtv in
let hsz = MT?.hash_size mtv in
insert_ #hsz #(Ghost.reveal (MT?.hash_spec mtv)) 0ul (Ghost.hide (MT?.i mtv)) (MT?.j mtv) hs v (MT?.hash_fun mtv);
let hh1 = HST.get () in
RV.rv_loc_elems_included hh0 (MT?.hs mtv) 0ul (V.size_of hs);
V.loc_vector_within_included hs 0ul (V.size_of hs);
RV.rv_inv_preserved
(MT?.rhs mtv)
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf v)))
hh0 hh1;
RV.as_seq_preserved
(MT?.rhs mtv)
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf v)))
hh0 hh1;
Rgl?.r_sep (hreg hsz) (MT?.mroot mtv)
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf v)))
hh0 hh1;
mt *= MT (MT?.hash_size mtv)
(MT?.offset mtv)
(MT?.i mtv)
(MT?.j mtv + 1ul)
(MT?.hs mtv)
false // `rhs` is always deprecated right after an insertion.
(MT?.rhs mtv)
(MT?.mroot mtv)
(MT?.hash_spec mtv)
(MT?.hash_fun mtv);
let hh2 = HST.get () in
RV.rv_inv_preserved
(MT?.hs mtv) (B.loc_buffer mt) hh1 hh2;
RV.rv_inv_preserved
(MT?.rhs mtv) (B.loc_buffer mt) hh1 hh2;
RV.as_seq_preserved
(MT?.hs mtv) (B.loc_buffer mt) hh1 hh2;
RV.as_seq_preserved
(MT?.rhs mtv) (B.loc_buffer mt) hh1 hh2;
Rgl?.r_sep (hreg hsz) (MT?.mroot mtv) (B.loc_buffer mt) hh1 hh2;
mt_safe_elts_preserved
0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv + 1ul) (B.loc_buffer mt)
hh1 hh2
#pop-options
// `mt_create` initiates a Merkle tree with a given initial hash `init`.
// A valid Merkle tree should contain at least one element.
val mt_create_custom:
hsz:hash_size_t ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hsz)) ->
r:HST.erid -> init:hash #hsz -> hash_fun:hash_fun_t #hsz #hash_spec -> HST.ST mt_p
(requires (fun h0 ->
Rgl?.r_inv (hreg hsz) h0 init /\
HH.disjoint r (B.frameOf init)))
(ensures (fun h0 mt h1 ->
// memory safety
modifies (loc_union (mt_loc mt) (B.loc_all_regions_from false (B.frameOf init))) h0 h1 /\
mt_safe h1 mt /\
// correctness
MT?.hash_size (B.get h1 mt 0) = hsz /\
mt_lift h1 mt == MTH.mt_create (U32.v hsz) (Ghost.reveal hash_spec) (Rgl?.r_repr (hreg hsz) h0 init)))
#push-options "--z3rlimit 40"
let mt_create_custom hsz hash_spec r init hash_fun =
let hh0 = HST.get () in
let mt = create_empty_mt hsz hash_spec hash_fun r in
mt_insert hsz mt init;
let hh2 = HST.get () in
mt
#pop-options
/// Construction and Destruction of paths
// Since each element pointer in `path` is from the target Merkle tree and
// each element has different location in `MT?.hs` (thus different region id),
// we cannot use the regionality property for `path`s. Hence here we manually
// define invariants and representation.
noeq type path =
| Path: hash_size:hash_size_t ->
hashes:V.vector (hash #hash_size) ->
path
type path_p = B.pointer path
type const_path_p = const_pointer path
private
let phashes (h:HS.mem) (p:path_p)
: GTot (V.vector (hash #(Path?.hash_size (B.get h p 0))))
= Path?.hashes (B.get h p 0)
// Memory safety of a path as an invariant
inline_for_extraction noextract
val path_safe:
h:HS.mem -> mtr:HH.rid -> p:path_p -> GTot Type0
let path_safe h mtr p =
B.live h p /\ B.freeable p /\
V.live h (phashes h p) /\ V.freeable (phashes h p) /\
HST.is_eternal_region (V.frameOf (phashes h p)) /\
(let hsz = Path?.hash_size (B.get h p 0) in
V.forall_all h (phashes h p)
(fun hp -> Rgl?.r_inv (hreg hsz) h hp /\
HH.includes mtr (Rgl?.region_of (hreg hsz) hp)) /\
HH.extends (V.frameOf (phashes h p)) (B.frameOf p) /\
HH.disjoint mtr (B.frameOf p))
val path_loc: path_p -> GTot loc
let path_loc p = B.loc_all_regions_from false (B.frameOf p)
val lift_path_:
#hsz:hash_size_t ->
h:HS.mem ->
hs:S.seq (hash #hsz) ->
i:nat ->
j:nat{
i <= j /\ j <= S.length hs /\
V.forall_seq hs i j (fun hp -> Rgl?.r_inv (hreg hsz) h hp)} ->
GTot (hp:MTH.path #(U32.v hsz) {S.length hp = j - i}) (decreases j)
let rec lift_path_ #hsz h hs i j =
if i = j then S.empty
else (S.snoc (lift_path_ h hs i (j - 1))
(Rgl?.r_repr (hreg hsz) h (S.index hs (j - 1))))
// Representation of a path
val lift_path:
#hsz:hash_size_t ->
h:HS.mem -> mtr:HH.rid -> p:path_p {path_safe h mtr p /\ (Path?.hash_size (B.get h p 0)) = hsz} ->
GTot (hp:MTH.path #(U32.v hsz) {S.length hp = U32.v (V.size_of (phashes h p))})
let lift_path #hsz h mtr p =
lift_path_ h (V.as_seq h (phashes h p))
0 (S.length (V.as_seq h (phashes h p)))
val lift_path_index_:
#hsz:hash_size_t ->
h:HS.mem ->
hs:S.seq (hash #hsz) ->
i:nat -> j:nat{i <= j && j <= S.length hs} ->
k:nat{i <= k && k < j} ->
Lemma (requires (V.forall_seq hs i j (fun hp -> Rgl?.r_inv (hreg hsz) h hp)))
(ensures (Rgl?.r_repr (hreg hsz) h (S.index hs k) ==
S.index (lift_path_ h hs i j) (k - i)))
(decreases j)
[SMTPat (S.index (lift_path_ h hs i j) (k - i))]
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
let rec lift_path_index_ #hsz h hs i j k =
if i = j then ()
else if k = j - 1 then ()
else lift_path_index_ #hsz h hs i (j - 1) k
#pop-options
val lift_path_index:
h:HS.mem -> mtr:HH.rid ->
p:path_p -> i:uint32_t ->
Lemma (requires (path_safe h mtr p /\
i < V.size_of (phashes h p)))
(ensures (let hsz = Path?.hash_size (B.get h p 0) in
Rgl?.r_repr (hreg hsz) h (V.get h (phashes h p) i) ==
S.index (lift_path #(hsz) h mtr p) (U32.v i)))
let lift_path_index h mtr p i =
lift_path_index_ h (V.as_seq h (phashes h p))
0 (S.length (V.as_seq h (phashes h p))) (U32.v i)
val lift_path_eq:
#hsz:hash_size_t ->
h:HS.mem ->
hs1:S.seq (hash #hsz) -> hs2:S.seq (hash #hsz) ->
i:nat -> j:nat ->
Lemma (requires (i <= j /\ j <= S.length hs1 /\ j <= S.length hs2 /\
S.equal (S.slice hs1 i j) (S.slice hs2 i j) /\
V.forall_seq hs1 i j (fun hp -> Rgl?.r_inv (hreg hsz) h hp) /\
V.forall_seq hs2 i j (fun hp -> Rgl?.r_inv (hreg hsz) h hp)))
(ensures (S.equal (lift_path_ h hs1 i j) (lift_path_ h hs2 i j)))
let lift_path_eq #hsz h hs1 hs2 i j =
assert (forall (k:nat{i <= k && k < j}).
S.index (lift_path_ h hs1 i j) (k - i) ==
Rgl?.r_repr (hreg hsz) h (S.index hs1 k));
assert (forall (k:nat{i <= k && k < j}).
S.index (lift_path_ h hs2 i j) (k - i) ==
Rgl?.r_repr (hreg hsz) h (S.index hs2 k));
assert (forall (k:nat{k < j - i}).
S.index (lift_path_ h hs1 i j) k ==
Rgl?.r_repr (hreg hsz) h (S.index hs1 (k + i)));
assert (forall (k:nat{k < j - i}).
S.index (lift_path_ h hs2 i j) k ==
Rgl?.r_repr (hreg hsz) h (S.index hs2 (k + i)));
assert (forall (k:nat{k < j - i}).
S.index (S.slice hs1 i j) k == S.index (S.slice hs2 i j) k);
assert (forall (k:nat{i <= k && k < j}).
S.index (S.slice hs1 i j) (k - i) == S.index (S.slice hs2 i j) (k - i))
private
val path_safe_preserved_:
#hsz:hash_size_t ->
mtr:HH.rid -> hs:S.seq (hash #hsz) ->
i:nat -> j:nat{i <= j && j <= S.length hs} ->
dl:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma
(requires (V.forall_seq hs i j
(fun hp ->
Rgl?.r_inv (hreg hsz) h0 hp /\
HH.includes mtr (Rgl?.region_of (hreg hsz) hp)) /\
loc_disjoint dl (B.loc_all_regions_from false mtr) /\
modifies dl h0 h1))
(ensures (V.forall_seq hs i j
(fun hp ->
Rgl?.r_inv (hreg hsz) h1 hp /\
HH.includes mtr (Rgl?.region_of (hreg hsz) hp))))
(decreases j)
let rec path_safe_preserved_ #hsz mtr hs i j dl h0 h1 =
if i = j then ()
else (assert (loc_includes
(B.loc_all_regions_from false mtr)
(B.loc_all_regions_from false
(Rgl?.region_of (hreg hsz) (S.index hs (j - 1)))));
Rgl?.r_sep (hreg hsz) (S.index hs (j - 1)) dl h0 h1;
path_safe_preserved_ mtr hs i (j - 1) dl h0 h1)
val path_safe_preserved:
mtr:HH.rid -> p:path_p ->
dl:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (path_safe h0 mtr p /\
loc_disjoint dl (path_loc p) /\
loc_disjoint dl (B.loc_all_regions_from false mtr) /\
modifies dl h0 h1))
(ensures (path_safe h1 mtr p))
let path_safe_preserved mtr p dl h0 h1 =
assert (loc_includes (path_loc p) (B.loc_buffer p));
assert (loc_includes (path_loc p) (V.loc_vector (phashes h0 p)));
path_safe_preserved_
mtr (V.as_seq h0 (phashes h0 p))
0 (S.length (V.as_seq h0 (phashes h0 p))) dl h0 h1
val path_safe_init_preserved:
mtr:HH.rid -> p:path_p ->
dl:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (path_safe h0 mtr p /\
V.size_of (phashes h0 p) = 0ul /\
B.loc_disjoint dl (path_loc p) /\
modifies dl h0 h1))
(ensures (path_safe h1 mtr p /\
V.size_of (phashes h1 p) = 0ul))
let path_safe_init_preserved mtr p dl h0 h1 =
assert (loc_includes (path_loc p) (B.loc_buffer p));
assert (loc_includes (path_loc p) (V.loc_vector (phashes h0 p)))
val path_preserved_:
#hsz:hash_size_t ->
mtr:HH.rid ->
hs:S.seq (hash #hsz) ->
i:nat -> j:nat{i <= j && j <= S.length hs} ->
dl:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.forall_seq hs i j
(fun hp -> Rgl?.r_inv (hreg hsz) h0 hp /\
HH.includes mtr (Rgl?.region_of (hreg hsz) hp)) /\
loc_disjoint dl (B.loc_all_regions_from false mtr) /\
modifies dl h0 h1))
(ensures (path_safe_preserved_ mtr hs i j dl h0 h1;
S.equal (lift_path_ h0 hs i j)
(lift_path_ h1 hs i j)))
(decreases j)
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
let rec path_preserved_ #hsz mtr hs i j dl h0 h1 =
if i = j then ()
else (path_safe_preserved_ mtr hs i (j - 1) dl h0 h1;
path_preserved_ mtr hs i (j - 1) dl h0 h1;
assert (loc_includes
(B.loc_all_regions_from false mtr)
(B.loc_all_regions_from false
(Rgl?.region_of (hreg hsz) (S.index hs (j - 1)))));
Rgl?.r_sep (hreg hsz) (S.index hs (j - 1)) dl h0 h1)
#pop-options
val path_preserved:
mtr:HH.rid -> p:path_p ->
dl:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (path_safe h0 mtr p /\
loc_disjoint dl (path_loc p) /\
loc_disjoint dl (B.loc_all_regions_from false mtr) /\
modifies dl h0 h1))
(ensures (path_safe_preserved mtr p dl h0 h1;
let hsz0 = (Path?.hash_size (B.get h0 p 0)) in
let hsz1 = (Path?.hash_size (B.get h1 p 0)) in
let b:MTH.path = lift_path #hsz0 h0 mtr p in
let a:MTH.path = lift_path #hsz1 h1 mtr p in
hsz0 = hsz1 /\ S.equal b a))
let path_preserved mtr p dl h0 h1 =
assert (loc_includes (path_loc p) (B.loc_buffer p));
assert (loc_includes (path_loc p) (V.loc_vector (phashes h0 p)));
path_preserved_ mtr (V.as_seq h0 (phashes h0 p))
0 (S.length (V.as_seq h0 (phashes h0 p)))
dl h0 h1
val init_path:
hsz:hash_size_t ->
mtr:HH.rid -> r:HST.erid ->
HST.ST path_p
(requires (fun h0 -> HH.disjoint mtr r))
(ensures (fun h0 p h1 ->
// memory safety
path_safe h1 mtr p /\
// correctness
Path?.hash_size (B.get h1 p 0) = hsz /\
S.equal (lift_path #hsz h1 mtr p) S.empty))
let init_path hsz mtr r =
let nrid = HST.new_region r in
(B.malloc r (Path hsz (rg_alloc (hvreg hsz) nrid)) 1ul)
val clear_path:
mtr:HH.rid -> p:path_p ->
HST.ST unit
(requires (fun h0 -> path_safe h0 mtr p))
(ensures (fun h0 _ h1 ->
// memory safety
path_safe h1 mtr p /\
// correctness
V.size_of (phashes h1 p) = 0ul /\
S.equal (lift_path #(Path?.hash_size (B.get h1 p 0)) h1 mtr p) S.empty))
let clear_path mtr p =
let pv = !*p in
p *= Path (Path?.hash_size pv) (V.clear (Path?.hashes pv))
val free_path:
p:path_p ->
HST.ST unit
(requires (fun h0 ->
B.live h0 p /\ B.freeable p /\
V.live h0 (phashes h0 p) /\ V.freeable (phashes h0 p) /\
HH.extends (V.frameOf (phashes h0 p)) (B.frameOf p)))
(ensures (fun h0 _ h1 ->
modifies (path_loc p) h0 h1))
let free_path p =
let pv = !*p in
V.free (Path?.hashes pv);
B.free p
/// Getting the Merkle root and path
// Construct "rightmost hashes" for a given (incomplete) Merkle tree.
// This function calculates the Merkle root as well, which is the final
// accumulator value.
private
val construct_rhs:
#hsz:hash_size_t ->
#hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hsz)) ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
rhs:hash_vec #hsz {V.size_of rhs = merkle_tree_size_lg} ->
i:index_t ->
j:index_t{i <= j && (U32.v j) < pow2 (32 - U32.v lv)} ->
acc:hash #hsz ->
actd:bool ->
hash_fun:hash_fun_t #hsz #(Ghost.reveal hash_spec) ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\ RV.rv_inv h0 rhs /\
HH.disjoint (V.frameOf hs) (V.frameOf rhs) /\
Rgl?.r_inv (hreg hsz) h0 acc /\
HH.disjoint (B.frameOf acc) (V.frameOf hs) /\
HH.disjoint (B.frameOf acc) (V.frameOf rhs) /\
mt_safe_elts #hsz h0 lv hs i j))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(RV.loc_rvector rhs)
(B.loc_all_regions_from false (B.frameOf acc)))
h0 h1 /\
RV.rv_inv h1 rhs /\
Rgl?.r_inv (hreg hsz) h1 acc /\
// correctness
(mt_safe_elts_spec #hsz h0 lv hs i j;
MTH.construct_rhs #(U32.v hsz) #(Ghost.reveal hash_spec)
(U32.v lv)
(Rgl?.r_repr (hvvreg hsz) h0 hs)
(Rgl?.r_repr (hvreg hsz) h0 rhs)
(U32.v i) (U32.v j)
(Rgl?.r_repr (hreg hsz) h0 acc) actd ==
(Rgl?.r_repr (hvreg hsz) h1 rhs, Rgl?.r_repr (hreg hsz) h1 acc)
)))
(decreases (U32.v j))
#push-options "--z3rlimit 250 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
let rec construct_rhs #hsz #hash_spec lv hs rhs i j acc actd hash_fun =
let hh0 = HST.get () in
if j = 0ul then begin
assert (RV.rv_inv hh0 hs);
assert (mt_safe_elts #hsz hh0 lv hs i j);
mt_safe_elts_spec #hsz hh0 lv hs 0ul 0ul;
assert (MTH.hs_wf_elts #(U32.v hsz)
(U32.v lv) (RV.as_seq hh0 hs)
(U32.v i) (U32.v j));
let hh1 = HST.get() in
assert (MTH.construct_rhs #(U32.v hsz) #(Ghost.reveal hash_spec)
(U32.v lv)
(Rgl?.r_repr (hvvreg hsz) hh0 hs)
(Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j)
(Rgl?.r_repr (hreg hsz) hh0 acc) actd ==
(Rgl?.r_repr (hvreg hsz) hh1 rhs, Rgl?.r_repr (hreg hsz) hh1 acc))
end
else
let ofs = offset_of i in
begin
(if j % 2ul = 0ul
then begin
Math.Lemmas.pow2_double_mult (32 - U32.v lv - 1);
mt_safe_elts_rec #hsz hh0 lv hs i j;
construct_rhs #hsz #hash_spec (lv + 1ul) hs rhs (i / 2ul) (j / 2ul) acc actd hash_fun;
let hh1 = HST.get () in
// correctness
mt_safe_elts_spec #hsz hh0 lv hs i j;
MTH.construct_rhs_even #(U32.v hsz) #hash_spec
(U32.v lv) (Rgl?.r_repr (hvvreg hsz) hh0 hs) (Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j) (Rgl?.r_repr (hreg hsz) hh0 acc) actd;
assert (MTH.construct_rhs #(U32.v hsz) #hash_spec
(U32.v lv)
(Rgl?.r_repr (hvvreg hsz) hh0 hs)
(Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j)
(Rgl?.r_repr (hreg hsz) hh0 acc)
actd ==
(Rgl?.r_repr (hvreg hsz) hh1 rhs, Rgl?.r_repr (hreg hsz) hh1 acc))
end
else begin
if actd
then begin
RV.assign_copy (hcpy hsz) rhs lv acc;
let hh1 = HST.get () in
// memory safety
Rgl?.r_sep (hreg hsz) acc
(B.loc_all_regions_from false (V.frameOf rhs)) hh0 hh1;
RV.rv_inv_preserved
hs (B.loc_all_regions_from false (V.frameOf rhs))
hh0 hh1;
RV.as_seq_preserved
hs (B.loc_all_regions_from false (V.frameOf rhs))
hh0 hh1;
RV.rv_inv_preserved
(V.get hh0 hs lv) (B.loc_all_regions_from false (V.frameOf rhs))
hh0 hh1;
V.loc_vector_within_included hs lv (V.size_of hs);
mt_safe_elts_preserved lv hs i j
(B.loc_all_regions_from false (V.frameOf rhs))
hh0 hh1;
mt_safe_elts_head hh1 lv hs i j;
hash_vv_rv_inv_r_inv hh1 hs lv (j - 1ul - ofs);
// correctness
assert (S.equal (RV.as_seq hh1 rhs)
(S.upd (RV.as_seq hh0 rhs) (U32.v lv)
(Rgl?.r_repr (hreg hsz) hh0 acc)));
hash_fun (V.index (V.index hs lv) (j - 1ul - ofs)) acc acc;
let hh2 = HST.get () in
// memory safety
mt_safe_elts_preserved lv hs i j
(B.loc_all_regions_from false (B.frameOf acc)) hh1 hh2;
RV.rv_inv_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.rv_inv_preserved
rhs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.as_seq_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.as_seq_preserved
rhs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
// correctness
hash_vv_as_seq_get_index hh0 hs lv (j - 1ul - ofs);
assert (Rgl?.r_repr (hreg hsz) hh2 acc ==
(Ghost.reveal hash_spec) (S.index (S.index (RV.as_seq hh0 hs) (U32.v lv))
(U32.v j - 1 - U32.v ofs))
(Rgl?.r_repr (hreg hsz) hh0 acc))
end
else begin
mt_safe_elts_head hh0 lv hs i j;
hash_vv_rv_inv_r_inv hh0 hs lv (j - 1ul - ofs);
hash_vv_rv_inv_disjoint hh0 hs lv (j - 1ul - ofs) (B.frameOf acc);
Cpy?.copy (hcpy hsz) hsz (V.index (V.index hs lv) (j - 1ul - ofs)) acc;
let hh1 = HST.get () in
// memory safety
V.loc_vector_within_included hs lv (V.size_of hs);
mt_safe_elts_preserved lv hs i j
(B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
RV.rv_inv_preserved
hs (B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
RV.rv_inv_preserved
rhs (B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
RV.as_seq_preserved
hs (B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
RV.as_seq_preserved
rhs (B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
// correctness
hash_vv_as_seq_get_index hh0 hs lv (j - 1ul - ofs);
assert (Rgl?.r_repr (hreg hsz) hh1 acc ==
S.index (S.index (RV.as_seq hh0 hs) (U32.v lv))
(U32.v j - 1 - U32.v ofs))
end;
let hh3 = HST.get () in
assert (S.equal (RV.as_seq hh3 hs) (RV.as_seq hh0 hs));
assert (S.equal (RV.as_seq hh3 rhs)
(if actd
then S.upd (RV.as_seq hh0 rhs) (U32.v lv)
(Rgl?.r_repr (hreg hsz) hh0 acc)
else RV.as_seq hh0 rhs));
assert (Rgl?.r_repr (hreg hsz) hh3 acc ==
(if actd
then (Ghost.reveal hash_spec) (S.index (S.index (RV.as_seq hh0 hs) (U32.v lv))
(U32.v j - 1 - U32.v ofs))
(Rgl?.r_repr (hreg hsz) hh0 acc)
else S.index (S.index (RV.as_seq hh0 hs) (U32.v lv))
(U32.v j - 1 - U32.v ofs)));
mt_safe_elts_rec hh3 lv hs i j;
construct_rhs #hsz #hash_spec (lv + 1ul) hs rhs (i / 2ul) (j / 2ul) acc true hash_fun;
let hh4 = HST.get () in
mt_safe_elts_spec hh3 (lv + 1ul) hs (i / 2ul) (j / 2ul);
assert (MTH.construct_rhs #(U32.v hsz) #hash_spec
(U32.v lv + 1)
(Rgl?.r_repr (hvvreg hsz) hh3 hs)
(Rgl?.r_repr (hvreg hsz) hh3 rhs)
(U32.v i / 2) (U32.v j / 2)
(Rgl?.r_repr (hreg hsz) hh3 acc) true ==
(Rgl?.r_repr (hvreg hsz) hh4 rhs, Rgl?.r_repr (hreg hsz) hh4 acc));
mt_safe_elts_spec hh0 lv hs i j;
MTH.construct_rhs_odd #(U32.v hsz) #hash_spec
(U32.v lv) (Rgl?.r_repr (hvvreg hsz) hh0 hs) (Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j) (Rgl?.r_repr (hreg hsz) hh0 acc) actd;
assert (MTH.construct_rhs #(U32.v hsz) #hash_spec
(U32.v lv)
(Rgl?.r_repr (hvvreg hsz) hh0 hs)
(Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j)
(Rgl?.r_repr (hreg hsz) hh0 acc) actd ==
(Rgl?.r_repr (hvreg hsz) hh4 rhs, Rgl?.r_repr (hreg hsz) hh4 acc))
end)
end
#pop-options
private inline_for_extraction
val mt_get_root_pre_nst: mtv:merkle_tree -> rt:hash #(MT?.hash_size mtv) -> Tot bool
let mt_get_root_pre_nst mtv rt = true
val mt_get_root_pre:
#hsz:Ghost.erased hash_size_t ->
mt:const_mt_p ->
rt:hash #hsz ->
HST.ST bool
(requires (fun h0 ->
let mt = CB.cast mt in
MT?.hash_size (B.get h0 mt 0) = Ghost.reveal hsz /\
mt_safe h0 mt /\ Rgl?.r_inv (hreg hsz) h0 rt /\
HH.disjoint (B.frameOf mt) (B.frameOf rt)))
(ensures (fun _ _ _ -> True))
let mt_get_root_pre #hsz mt rt =
let mt = CB.cast mt in
let mt = !*mt in
let hsz = MT?.hash_size mt in
assert (MT?.hash_size mt = hsz);
mt_get_root_pre_nst mt rt
// `mt_get_root` returns the Merkle root. If it's already calculated with
// up-to-date hashes, the root is returned immediately. Otherwise it calls
// `construct_rhs` to build rightmost hashes and to calculate the Merkle root
// as well.
val mt_get_root:
#hsz:Ghost.erased hash_size_t ->
mt:const_mt_p ->
rt:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
let mt = CB.cast mt in
let dmt = B.get h0 mt 0 in
MT?.hash_size dmt = (Ghost.reveal hsz) /\
mt_get_root_pre_nst dmt rt /\
mt_safe h0 mt /\ Rgl?.r_inv (hreg hsz) h0 rt /\
HH.disjoint (B.frameOf mt) (B.frameOf rt)))
(ensures (fun h0 _ h1 ->
let mt = CB.cast mt in
// memory safety
modifies (loc_union
(mt_loc mt)
(B.loc_all_regions_from false (B.frameOf rt)))
h0 h1 /\
mt_safe h1 mt /\
(let mtv0 = B.get h0 mt 0 in
let mtv1 = B.get h1 mt 0 in
MT?.hash_size mtv0 = (Ghost.reveal hsz) /\
MT?.hash_size mtv1 = (Ghost.reveal hsz) /\
MT?.i mtv1 = MT?.i mtv0 /\ MT?.j mtv1 = MT?.j mtv0 /\
MT?.hs mtv1 == MT?.hs mtv0 /\ MT?.rhs mtv1 == MT?.rhs mtv0 /\
MT?.offset mtv1 == MT?.offset mtv0 /\
MT?.rhs_ok mtv1 = true /\
Rgl?.r_inv (hreg hsz) h1 rt /\
// correctness
MTH.mt_get_root (mt_lift h0 mt) (Rgl?.r_repr (hreg hsz) h0 rt) ==
(mt_lift h1 mt, Rgl?.r_repr (hreg hsz) h1 rt))))
#push-options "--z3rlimit 150 --initial_fuel 1 --max_fuel 1"
let mt_get_root #hsz mt rt =
let mt = CB.cast mt in
let hh0 = HST.get () in
let mtv = !*mt in
let prefix = MT?.offset mtv in
let i = MT?.i mtv in
let j = MT?.j mtv in
let hs = MT?.hs mtv in
let rhs = MT?.rhs mtv in
let mroot = MT?.mroot mtv in
let hash_size = MT?.hash_size mtv in
let hash_spec = MT?.hash_spec mtv in
let hash_fun = MT?.hash_fun mtv in
if MT?.rhs_ok mtv
then begin
Cpy?.copy (hcpy hash_size) hash_size mroot rt;
let hh1 = HST.get () in
mt_safe_preserved mt
(B.loc_all_regions_from false (Rgl?.region_of (hreg hsz) rt)) hh0 hh1;
mt_preserved mt
(B.loc_all_regions_from false (Rgl?.region_of (hreg hsz) rt)) hh0 hh1;
MTH.mt_get_root_rhs_ok_true
(mt_lift hh0 mt) (Rgl?.r_repr (hreg hsz) hh0 rt);
assert (MTH.mt_get_root (mt_lift hh0 mt) (Rgl?.r_repr (hreg hsz) hh0 rt) ==
(mt_lift hh1 mt, Rgl?.r_repr (hreg hsz) hh1 rt))
end
else begin
construct_rhs #hash_size #hash_spec 0ul hs rhs i j rt false hash_fun;
let hh1 = HST.get () in
// memory safety
assert (RV.rv_inv hh1 rhs);
assert (Rgl?.r_inv (hreg hsz) hh1 rt);
assert (B.live hh1 mt);
RV.rv_inv_preserved
hs (loc_union
(RV.loc_rvector rhs)
(B.loc_all_regions_from false (B.frameOf rt)))
hh0 hh1;
RV.as_seq_preserved
hs (loc_union
(RV.loc_rvector rhs)
(B.loc_all_regions_from false (B.frameOf rt)))
hh0 hh1;
V.loc_vector_within_included hs 0ul (V.size_of hs);
mt_safe_elts_preserved 0ul hs i j
(loc_union
(RV.loc_rvector rhs)
(B.loc_all_regions_from false (B.frameOf rt)))
hh0 hh1;
// correctness
mt_safe_elts_spec hh0 0ul hs i j;
assert (MTH.construct_rhs #(U32.v hash_size) #hash_spec 0
(Rgl?.r_repr (hvvreg hsz) hh0 hs)
(Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j)
(Rgl?.r_repr (hreg hsz) hh0 rt) false ==
(Rgl?.r_repr (hvreg hsz) hh1 rhs, Rgl?.r_repr (hreg hsz) hh1 rt));
Cpy?.copy (hcpy hash_size) hash_size rt mroot;
let hh2 = HST.get () in
// memory safety
RV.rv_inv_preserved
hs (B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
RV.rv_inv_preserved
rhs (B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
RV.as_seq_preserved
hs (B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
RV.as_seq_preserved
rhs (B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
B.modifies_buffer_elim
rt (B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
mt_safe_elts_preserved 0ul hs i j
(B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
// correctness
assert (Rgl?.r_repr (hreg hsz) hh2 mroot == Rgl?.r_repr (hreg hsz) hh1 rt);
mt *= MT hash_size prefix i j hs true rhs mroot hash_spec hash_fun;
let hh3 = HST.get () in
// memory safety
Rgl?.r_sep (hreg hsz) rt (B.loc_buffer mt) hh2 hh3;
RV.rv_inv_preserved hs (B.loc_buffer mt) hh2 hh3;
RV.rv_inv_preserved rhs (B.loc_buffer mt) hh2 hh3;
RV.as_seq_preserved hs (B.loc_buffer mt) hh2 hh3;
RV.as_seq_preserved rhs (B.loc_buffer mt) hh2 hh3;
Rgl?.r_sep (hreg hsz) mroot (B.loc_buffer mt) hh2 hh3;
mt_safe_elts_preserved 0ul hs i j
(B.loc_buffer mt) hh2 hh3;
assert (mt_safe hh3 mt);
// correctness
MTH.mt_get_root_rhs_ok_false
(mt_lift hh0 mt) (Rgl?.r_repr (hreg hsz) hh0 rt);
assert (MTH.mt_get_root (mt_lift hh0 mt) (Rgl?.r_repr (hreg hsz) hh0 rt) ==
(MTH.MT #(U32.v hash_size)
(U32.v i) (U32.v j)
(RV.as_seq hh0 hs)
true
(RV.as_seq hh1 rhs)
(Rgl?.r_repr (hreg hsz) hh1 rt)
hash_spec,
Rgl?.r_repr (hreg hsz) hh1 rt));
assert (MTH.mt_get_root (mt_lift hh0 mt) (Rgl?.r_repr (hreg hsz) hh0 rt) ==
(mt_lift hh3 mt, Rgl?.r_repr (hreg hsz) hh3 rt))
end
#pop-options
inline_for_extraction
val mt_path_insert:
#hsz:hash_size_t ->
mtr:HH.rid -> p:path_p -> hp:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
path_safe h0 mtr p /\
not (V.is_full (phashes h0 p)) /\
Rgl?.r_inv (hreg hsz) h0 hp /\
HH.disjoint mtr (B.frameOf p) /\
HH.includes mtr (B.frameOf hp) /\
Path?.hash_size (B.get h0 p 0) = hsz))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (path_loc p) h0 h1 /\
path_safe h1 mtr p /\
// correctness
(let hsz0 = Path?.hash_size (B.get h0 p 0) in
let hsz1 = Path?.hash_size (B.get h1 p 0) in
(let before:(S.seq (MTH.hash #(U32.v hsz0))) = lift_path h0 mtr p in
let after:(S.seq (MTH.hash #(U32.v hsz1))) = lift_path h1 mtr p in
V.size_of (phashes h1 p) = V.size_of (phashes h0 p) + 1ul /\
hsz = hsz0 /\ hsz = hsz1 /\
(let hspec:(S.seq (MTH.hash #(U32.v hsz))) = (MTH.path_insert #(U32.v hsz) before (Rgl?.r_repr (hreg hsz) h0 hp)) in
S.equal hspec after)))))
#push-options "--z3rlimit 20 --initial_fuel 1 --max_fuel 1"
let mt_path_insert #hsz mtr p hp =
let pth = !*p in
let pv = Path?.hashes pth in
let hh0 = HST.get () in
let ipv = V.insert pv hp in
let hh1 = HST.get () in
path_safe_preserved_
mtr (V.as_seq hh0 pv) 0 (S.length (V.as_seq hh0 pv))
(B.loc_all_regions_from false (V.frameOf ipv)) hh0 hh1;
path_preserved_
mtr (V.as_seq hh0 pv) 0 (S.length (V.as_seq hh0 pv))
(B.loc_all_regions_from false (V.frameOf ipv)) hh0 hh1;
Rgl?.r_sep (hreg hsz) hp
(B.loc_all_regions_from false (V.frameOf ipv)) hh0 hh1;
p *= Path hsz ipv;
let hh2 = HST.get () in
path_safe_preserved_
mtr (V.as_seq hh1 ipv) 0 (S.length (V.as_seq hh1 ipv))
(B.loc_region_only false (B.frameOf p)) hh1 hh2;
path_preserved_
mtr (V.as_seq hh1 ipv) 0 (S.length (V.as_seq hh1 ipv))
(B.loc_region_only false (B.frameOf p)) hh1 hh2;
Rgl?.r_sep (hreg hsz) hp
(B.loc_region_only false (B.frameOf p)) hh1 hh2;
assert (S.equal (lift_path hh2 mtr p)
(lift_path_ hh1 (S.snoc (V.as_seq hh0 pv) hp)
0 (S.length (V.as_seq hh1 ipv))));
lift_path_eq hh1 (S.snoc (V.as_seq hh0 pv) hp) (V.as_seq hh0 pv)
0 (S.length (V.as_seq hh0 pv))
#pop-options
// For given a target index `k`, the number of elements (in the tree) `j`,
// and a boolean flag (to check the existence of rightmost hashes), we can
// calculate a required Merkle path length.
//
// `mt_path_length` is a postcondition of `mt_get_path`, and a precondition
// of `mt_verify`. For detailed description, see `mt_get_path` and `mt_verify`.
private
val mt_path_length_step:
k:index_t ->
j:index_t{k <= j} ->
actd:bool ->
Tot (sl:uint32_t{U32.v sl = MTH.mt_path_length_step (U32.v k) (U32.v j) actd})
let mt_path_length_step k j actd =
if j = 0ul then 0ul
else (if k % 2ul = 0ul
then (if j = k || (j = k + 1ul && not actd) then 0ul else 1ul)
else 1ul)
private inline_for_extraction
val mt_path_length:
lv:uint32_t{lv <= merkle_tree_size_lg} ->
k:index_t ->
j:index_t{k <= j && U32.v j < pow2 (32 - U32.v lv)} ->
actd:bool ->
Tot (l:uint32_t{
U32.v l = MTH.mt_path_length (U32.v k) (U32.v j) actd &&
l <= 32ul - lv})
(decreases (U32.v j))
#push-options "--z3rlimit 10 --initial_fuel 1 --max_fuel 1"
let rec mt_path_length lv k j actd =
if j = 0ul then 0ul
else (let nactd = actd || (j % 2ul = 1ul) in
mt_path_length_step k j actd +
mt_path_length (lv + 1ul) (k / 2ul) (j / 2ul) nactd)
#pop-options
val mt_get_path_length:
mtr:HH.rid ->
p:const_path_p ->
HST.ST uint32_t
(requires (fun h0 -> path_safe h0 mtr (CB.cast p)))
(ensures (fun h0 _ h1 -> True))
let mt_get_path_length mtr p =
let pd = !*(CB.cast p) in
V.size_of (Path?.hashes pd)
private inline_for_extraction
val mt_make_path_step:
#hsz:hash_size_t ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
mtr:HH.rid ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
rhs:hash_vec #hsz {V.size_of rhs = merkle_tree_size_lg} ->
i:index_t ->
j:index_t{j <> 0ul /\ i <= j /\ U32.v j < pow2 (32 - U32.v lv)} ->
k:index_t{i <= k && k <= j} ->
p:path_p ->
actd:bool ->
HST.ST unit
(requires (fun h0 ->
HH.includes mtr (V.frameOf hs) /\
HH.includes mtr (V.frameOf rhs) /\
RV.rv_inv h0 hs /\ RV.rv_inv h0 rhs /\
mt_safe_elts h0 lv hs i j /\
path_safe h0 mtr p /\
Path?.hash_size (B.get h0 p 0) = hsz /\
V.size_of (phashes h0 p) <= lv + 1ul))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (path_loc p) h0 h1 /\
path_safe h1 mtr p /\
V.size_of (phashes h1 p) == V.size_of (phashes h0 p) + mt_path_length_step k j actd /\
V.size_of (phashes h1 p) <= lv + 2ul /\
// correctness
(mt_safe_elts_spec h0 lv hs i j;
(let hsz0 = Path?.hash_size (B.get h0 p 0) in
let hsz1 = Path?.hash_size (B.get h1 p 0) in
let before:(S.seq (MTH.hash #(U32.v hsz0))) = lift_path h0 mtr p in
let after:(S.seq (MTH.hash #(U32.v hsz1))) = lift_path h1 mtr p in
hsz = hsz0 /\ hsz = hsz1 /\
S.equal after
(MTH.mt_make_path_step
(U32.v lv) (RV.as_seq h0 hs) (RV.as_seq h0 rhs)
(U32.v i) (U32.v j) (U32.v k) before actd)))))
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 2 --max_ifuel 2"
let mt_make_path_step #hsz lv mtr hs rhs i j k p actd =
let pth = !*p in
let hh0 = HST.get () in
let ofs = offset_of i in
if k % 2ul = 1ul
then begin
hash_vv_rv_inv_includes hh0 hs lv (k - 1ul - ofs);
assert (HH.includes mtr
(B.frameOf (V.get hh0 (V.get hh0 hs lv) (k - 1ul - ofs))));
assert(Path?.hash_size pth = hsz);
mt_path_insert #hsz mtr p (V.index (V.index hs lv) (k - 1ul - ofs))
end
else begin
if k = j then ()
else if k + 1ul = j
then (if actd
then (assert (HH.includes mtr (B.frameOf (V.get hh0 rhs lv)));
mt_path_insert mtr p (V.index rhs lv)))
else (hash_vv_rv_inv_includes hh0 hs lv (k + 1ul - ofs);
assert (HH.includes mtr
(B.frameOf (V.get hh0 (V.get hh0 hs lv) (k + 1ul - ofs))));
mt_path_insert mtr p (V.index (V.index hs lv) (k + 1ul - ofs)))
end
#pop-options
private inline_for_extraction
val mt_get_path_step_pre_nst:
#hsz:Ghost.erased hash_size_t ->
mtr:HH.rid ->
p:path ->
i:uint32_t ->
Tot bool | false | true | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mt_get_path_step_pre_nst:
#hsz:Ghost.erased hash_size_t ->
mtr:HH.rid ->
p:path ->
i:uint32_t ->
Tot bool | [] | MerkleTree.Low.mt_get_path_step_pre_nst | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} | mtr: FStar.Monotonic.HyperHeap.rid -> p: MerkleTree.Low.path -> i: LowStar.Vector.uint32_t
-> Prims.bool | {
"end_col": 32,
"end_line": 1860,
"start_col": 2,
"start_line": 1860
} |
Prims.Tot | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let offset_range_limit = uint32_max | let offset_range_limit = | false | null | false | uint32_max | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"total"
] | [
"MerkleTree.Low.uint32_max"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction
let uint32_max = 4294967295UL | false | true | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val offset_range_limit : FStar.UInt64.t | [] | MerkleTree.Low.offset_range_limit | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} | FStar.UInt64.t | {
"end_col": 35,
"end_line": 55,
"start_col": 25,
"start_line": 55
} |
|
Prims.Tot | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let u64_32 = Int.Cast.uint64_to_uint32 | let u64_32 = | false | null | false | Int.Cast.uint64_to_uint32 | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"total"
] | [
"FStar.Int.Cast.uint64_to_uint32"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction
let uint32_max = 4294967295UL
let uint64_max = 18446744073709551615UL
let offset_range_limit = uint32_max
type offset_t = uint64_t | false | false | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val u64_32 : a: FStar.UInt64.t -> b: FStar.UInt32.t{FStar.UInt32.v b = FStar.UInt64.v a % Prims.pow2 32} | [] | MerkleTree.Low.u64_32 | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} | a: FStar.UInt64.t -> b: FStar.UInt32.t{FStar.UInt32.v b = FStar.UInt64.v a % Prims.pow2 32} | {
"end_col": 77,
"end_line": 59,
"start_col": 52,
"start_line": 59
} |
|
Prims.Tot | val merkle_tree_size_lg: uint32_t | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let merkle_tree_size_lg = 32ul | val merkle_tree_size_lg: uint32_t
let merkle_tree_size_lg = | false | null | false | 32ul | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"total"
] | [
"FStar.UInt32.__uint_to_t"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction
let uint32_max = 4294967295UL
let uint64_max = 18446744073709551615UL
let offset_range_limit = uint32_max
type offset_t = uint64_t
inline_for_extraction noextract unfold let u32_64 = Int.Cast.uint32_to_uint64
inline_for_extraction noextract unfold let u64_32 = Int.Cast.uint64_to_uint32
private inline_for_extraction
let offsets_connect (x:offset_t) (y:offset_t): Tot bool = y >= x && (y - x) <= offset_range_limit
private inline_for_extraction
let split_offset (tree:offset_t) (index:offset_t{offsets_connect tree index}): Tot index_t =
[@inline_let] let diff = U64.sub_mod index tree in
assert (diff <= offset_range_limit);
Int.Cast.uint64_to_uint32 diff
private inline_for_extraction
let add64_fits (x:offset_t) (i:index_t): Tot bool = uint64_max - x >= (u32_64 i)
private inline_for_extraction
let join_offset (tree:offset_t) (i:index_t{add64_fits tree i}): Tot (r:offset_t{offsets_connect tree r}) =
U64.add tree (u32_64 i) | false | true | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val merkle_tree_size_lg: uint32_t | [] | MerkleTree.Low.merkle_tree_size_lg | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} | LowStar.Vector.uint32_t | {
"end_col": 30,
"end_line": 78,
"start_col": 26,
"start_line": 78
} |
Prims.Tot | val mt_not_full_nst: mtv:merkle_tree -> Tot bool | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mt_not_full_nst mtv = MT?.j mtv < uint32_32_max | val mt_not_full_nst: mtv:merkle_tree -> Tot bool
let mt_not_full_nst mtv = | false | null | false | MT?.j mtv < uint32_32_max | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"total"
] | [
"MerkleTree.Low.merkle_tree",
"FStar.Integers.op_Less",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"MerkleTree.Low.__proj__MT__item__j",
"MerkleTree.Low.uint32_32_max",
"Prims.bool"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction
let uint32_max = 4294967295UL
let uint64_max = 18446744073709551615UL
let offset_range_limit = uint32_max
type offset_t = uint64_t
inline_for_extraction noextract unfold let u32_64 = Int.Cast.uint32_to_uint64
inline_for_extraction noextract unfold let u64_32 = Int.Cast.uint64_to_uint32
private inline_for_extraction
let offsets_connect (x:offset_t) (y:offset_t): Tot bool = y >= x && (y - x) <= offset_range_limit
private inline_for_extraction
let split_offset (tree:offset_t) (index:offset_t{offsets_connect tree index}): Tot index_t =
[@inline_let] let diff = U64.sub_mod index tree in
assert (diff <= offset_range_limit);
Int.Cast.uint64_to_uint32 diff
private inline_for_extraction
let add64_fits (x:offset_t) (i:index_t): Tot bool = uint64_max - x >= (u32_64 i)
private inline_for_extraction
let join_offset (tree:offset_t) (i:index_t{add64_fits tree i}): Tot (r:offset_t{offsets_connect tree r}) =
U64.add tree (u32_64 i)
inline_for_extraction val merkle_tree_size_lg: uint32_t
let merkle_tree_size_lg = 32ul
// A Merkle tree `MT i j hs rhs_ok rhs` stores all necessary hashes to generate
// a Merkle path for each element from the index `i` to `j-1`.
// - Parameters
// `hs`: a 2-dim store for hashes, where `hs[0]` contains leaf hash values.
// `rhs_ok`: to check the rightmost hashes are up-to-date
// `rhs`: a store for "rightmost" hashes, manipulated only when required to
// calculate some merkle paths that need the rightmost hashes
// as a part of them.
// `mroot`: during the construction of `rhs` we can also calculate the Merkle
// root of the tree. If `rhs_ok` is true then it has the up-to-date
// root value.
noeq type merkle_tree =
| MT: hash_size:hash_size_t ->
offset:offset_t ->
i:index_t -> j:index_t{i <= j /\ add64_fits offset j} ->
hs:hash_vv hash_size {V.size_of hs = merkle_tree_size_lg} ->
rhs_ok:bool ->
rhs:hash_vec #hash_size {V.size_of rhs = merkle_tree_size_lg} ->
mroot:hash #hash_size ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
merkle_tree
type mt_p = B.pointer merkle_tree
type const_mt_p = const_pointer merkle_tree
inline_for_extraction
let merkle_tree_conditions (#hsz:Ghost.erased hash_size_t) (offset:uint64_t) (i j:uint32_t) (hs:hash_vv hsz) (rhs_ok:bool) (rhs:hash_vec #hsz) (mroot:hash #hsz): Tot bool =
j >= i && add64_fits offset j &&
V.size_of hs = merkle_tree_size_lg &&
V.size_of rhs = merkle_tree_size_lg
// The maximum number of currently held elements in the tree is (2^32 - 1).
// cwinter: even when using 64-bit indices, we fail if the underlying 32-bit
// vector is full; this can be fixed if necessary.
private inline_for_extraction | false | true | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mt_not_full_nst: mtv:merkle_tree -> Tot bool | [] | MerkleTree.Low.mt_not_full_nst | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} | mtv: MerkleTree.Low.merkle_tree -> Prims.bool | {
"end_col": 51,
"end_line": 117,
"start_col": 26,
"start_line": 117
} |
FStar.Pervasives.Lemma | val loc_union_assoc_4:
a:loc -> b:loc -> c:loc -> d:loc ->
Lemma (loc_union (loc_union a b) (loc_union c d) ==
loc_union (loc_union a c) (loc_union b d)) | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let loc_union_assoc_4 a b c d =
loc_union_assoc (loc_union a b) c d;
loc_union_assoc a b c;
loc_union_assoc a c b;
loc_union_assoc (loc_union a c) b d | val loc_union_assoc_4:
a:loc -> b:loc -> c:loc -> d:loc ->
Lemma (loc_union (loc_union a b) (loc_union c d) ==
loc_union (loc_union a c) (loc_union b d))
let loc_union_assoc_4 a b c d = | false | null | true | loc_union_assoc (loc_union a b) c d;
loc_union_assoc a b c;
loc_union_assoc a c b;
loc_union_assoc (loc_union a c) b d | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"lemma"
] | [
"LowStar.Monotonic.Buffer.loc",
"LowStar.Monotonic.Buffer.loc_union_assoc",
"LowStar.Monotonic.Buffer.loc_union",
"Prims.unit"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction
let uint32_max = 4294967295UL
let uint64_max = 18446744073709551615UL
let offset_range_limit = uint32_max
type offset_t = uint64_t
inline_for_extraction noextract unfold let u32_64 = Int.Cast.uint32_to_uint64
inline_for_extraction noextract unfold let u64_32 = Int.Cast.uint64_to_uint32
private inline_for_extraction
let offsets_connect (x:offset_t) (y:offset_t): Tot bool = y >= x && (y - x) <= offset_range_limit
private inline_for_extraction
let split_offset (tree:offset_t) (index:offset_t{offsets_connect tree index}): Tot index_t =
[@inline_let] let diff = U64.sub_mod index tree in
assert (diff <= offset_range_limit);
Int.Cast.uint64_to_uint32 diff
private inline_for_extraction
let add64_fits (x:offset_t) (i:index_t): Tot bool = uint64_max - x >= (u32_64 i)
private inline_for_extraction
let join_offset (tree:offset_t) (i:index_t{add64_fits tree i}): Tot (r:offset_t{offsets_connect tree r}) =
U64.add tree (u32_64 i)
inline_for_extraction val merkle_tree_size_lg: uint32_t
let merkle_tree_size_lg = 32ul
// A Merkle tree `MT i j hs rhs_ok rhs` stores all necessary hashes to generate
// a Merkle path for each element from the index `i` to `j-1`.
// - Parameters
// `hs`: a 2-dim store for hashes, where `hs[0]` contains leaf hash values.
// `rhs_ok`: to check the rightmost hashes are up-to-date
// `rhs`: a store for "rightmost" hashes, manipulated only when required to
// calculate some merkle paths that need the rightmost hashes
// as a part of them.
// `mroot`: during the construction of `rhs` we can also calculate the Merkle
// root of the tree. If `rhs_ok` is true then it has the up-to-date
// root value.
noeq type merkle_tree =
| MT: hash_size:hash_size_t ->
offset:offset_t ->
i:index_t -> j:index_t{i <= j /\ add64_fits offset j} ->
hs:hash_vv hash_size {V.size_of hs = merkle_tree_size_lg} ->
rhs_ok:bool ->
rhs:hash_vec #hash_size {V.size_of rhs = merkle_tree_size_lg} ->
mroot:hash #hash_size ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
merkle_tree
type mt_p = B.pointer merkle_tree
type const_mt_p = const_pointer merkle_tree
inline_for_extraction
let merkle_tree_conditions (#hsz:Ghost.erased hash_size_t) (offset:uint64_t) (i j:uint32_t) (hs:hash_vv hsz) (rhs_ok:bool) (rhs:hash_vec #hsz) (mroot:hash #hsz): Tot bool =
j >= i && add64_fits offset j &&
V.size_of hs = merkle_tree_size_lg &&
V.size_of rhs = merkle_tree_size_lg
// The maximum number of currently held elements in the tree is (2^32 - 1).
// cwinter: even when using 64-bit indices, we fail if the underlying 32-bit
// vector is full; this can be fixed if necessary.
private inline_for_extraction
val mt_not_full_nst: mtv:merkle_tree -> Tot bool
let mt_not_full_nst mtv = MT?.j mtv < uint32_32_max
val mt_not_full: HS.mem -> mt_p -> GTot bool
let mt_not_full h mt = mt_not_full_nst (B.get h mt 0)
/// (Memory) Safety
val offset_of: i:index_t -> Tot index_t
let offset_of i = if i % 2ul = 0ul then i else i - 1ul
// `mt_safe_elts` says that it is safe to access an element from `i` to `j - 1`
// at level `lv` in the Merkle tree, i.e., hs[lv][k] (i <= k < j) is a valid
// element.
inline_for_extraction noextract
val mt_safe_elts:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
GTot Type0 (decreases (32 - U32.v lv))
let rec mt_safe_elts #hsz h lv hs i j =
if lv = merkle_tree_size_lg then true
else (let ofs = offset_of i in
V.size_of (V.get h hs lv) == j - ofs /\
mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul))
#push-options "--initial_fuel 1 --max_fuel 1"
val mt_safe_elts_constr:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (V.size_of (V.get h hs lv) == j - offset_of i /\
mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)))
(ensures (mt_safe_elts #hsz h lv hs i j))
let mt_safe_elts_constr #_ h lv hs i j = ()
val mt_safe_elts_head:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (mt_safe_elts #hsz h lv hs i j))
(ensures (V.size_of (V.get h hs lv) == j - offset_of i))
let mt_safe_elts_head #_ h lv hs i j = ()
val mt_safe_elts_rec:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (mt_safe_elts #hsz h lv hs i j))
(ensures (mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)))
let mt_safe_elts_rec #_ h lv hs i j = ()
val mt_safe_elts_init:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
Lemma (requires (V.forall_ h hs lv (V.size_of hs)
(fun hv -> V.size_of hv = 0ul)))
(ensures (mt_safe_elts #hsz h lv hs 0ul 0ul))
(decreases (32 - U32.v lv))
let rec mt_safe_elts_init #hsz h lv hs =
if lv = merkle_tree_size_lg then ()
else mt_safe_elts_init #hsz h (lv + 1ul) hs
#pop-options
val mt_safe_elts_preserved:
#hsz:hash_size_t ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 hs /\
mt_safe_elts #hsz h0 lv hs i j /\
loc_disjoint p (V.loc_vector_within hs lv (V.size_of hs)) /\
modifies p h0 h1))
(ensures (mt_safe_elts #hsz h1 lv hs i j))
(decreases (32 - U32.v lv))
[SMTPat (V.live h0 hs);
SMTPat (mt_safe_elts #hsz h0 lv hs i j);
SMTPat (loc_disjoint p (RV.loc_rvector hs));
SMTPat (modifies p h0 h1)]
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let rec mt_safe_elts_preserved #hsz lv hs i j p h0 h1 =
if lv = merkle_tree_size_lg then ()
else (V.get_preserved hs lv p h0 h1;
mt_safe_elts_preserved #hsz (lv + 1ul) hs (i / 2ul) (j / 2ul) p h0 h1)
#pop-options
// `mt_safe` is the invariant of a Merkle tree through its lifetime.
// It includes liveness, regionality, disjointness (to each data structure),
// and valid element access (`mt_safe_elts`).
inline_for_extraction noextract
val mt_safe: HS.mem -> mt_p -> GTot Type0
let mt_safe h mt =
B.live h mt /\ B.freeable mt /\
(let mtv = B.get h mt 0 in
// Liveness & Accessibility
RV.rv_inv h (MT?.hs mtv) /\
RV.rv_inv h (MT?.rhs mtv) /\
Rgl?.r_inv (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv) /\
mt_safe_elts h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv) /\
// Regionality
HH.extends (V.frameOf (MT?.hs mtv)) (B.frameOf mt) /\
HH.extends (V.frameOf (MT?.rhs mtv)) (B.frameOf mt) /\
HH.extends (B.frameOf (MT?.mroot mtv)) (B.frameOf mt) /\
HH.disjoint (V.frameOf (MT?.hs mtv)) (V.frameOf (MT?.rhs mtv)) /\
HH.disjoint (V.frameOf (MT?.hs mtv)) (B.frameOf (MT?.mroot mtv)) /\
HH.disjoint (V.frameOf (MT?.rhs mtv)) (B.frameOf (MT?.mroot mtv)))
// Since a Merkle tree satisfies regionality, it's ok to take all regions from
// a tree pointer as a location of the tree.
val mt_loc: mt_p -> GTot loc
let mt_loc mt = B.loc_all_regions_from false (B.frameOf mt)
val mt_safe_preserved:
mt:mt_p -> p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (mt_safe h0 mt /\
loc_disjoint p (mt_loc mt) /\
modifies p h0 h1))
(ensures (B.get h0 mt 0 == B.get h1 mt 0 /\
mt_safe h1 mt))
let mt_safe_preserved mt p h0 h1 =
assert (loc_includes (mt_loc mt) (B.loc_buffer mt));
let mtv = B.get h0 mt 0 in
assert (loc_includes (mt_loc mt) (RV.loc_rvector (MT?.hs mtv)));
assert (loc_includes (mt_loc mt) (RV.loc_rvector (MT?.rhs mtv)));
assert (loc_includes (mt_loc mt) (V.loc_vector (MT?.hs mtv)));
assert (loc_includes (mt_loc mt)
(B.loc_all_regions_from false (B.frameOf (MT?.mroot mtv))));
RV.rv_inv_preserved (MT?.hs mtv) p h0 h1;
RV.rv_inv_preserved (MT?.rhs mtv) p h0 h1;
Rgl?.r_sep (hreg (MT?.hash_size mtv)) (MT?.mroot mtv) p h0 h1;
V.loc_vector_within_included (MT?.hs mtv) 0ul (V.size_of (MT?.hs mtv));
mt_safe_elts_preserved 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv) p h0 h1
/// Lifting to a high-level Merkle tree structure
val mt_safe_elts_spec:
#hsz:hash_size_t ->
h:HS.mem ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t ->
j:index_t{j >= i} ->
Lemma (requires (RV.rv_inv h hs /\
mt_safe_elts #hsz h lv hs i j))
(ensures (MTH.hs_wf_elts #(U32.v hsz)
(U32.v lv) (RV.as_seq h hs)
(U32.v i) (U32.v j)))
(decreases (32 - U32.v lv))
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let rec mt_safe_elts_spec #_ h lv hs i j =
if lv = merkle_tree_size_lg then ()
else mt_safe_elts_spec h (lv + 1ul) hs (i / 2ul) (j / 2ul)
#pop-options
val merkle_tree_lift:
h:HS.mem ->
mtv:merkle_tree{
RV.rv_inv h (MT?.hs mtv) /\
RV.rv_inv h (MT?.rhs mtv) /\
Rgl?.r_inv (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv) /\
mt_safe_elts #(MT?.hash_size mtv) h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv)} ->
GTot (r:MTH.merkle_tree #(U32.v (MT?.hash_size mtv)) {MTH.mt_wf_elts #_ r})
let merkle_tree_lift h mtv =
mt_safe_elts_spec h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv);
MTH.MT #(U32.v (MT?.hash_size mtv))
(U32.v (MT?.i mtv))
(U32.v (MT?.j mtv))
(RV.as_seq h (MT?.hs mtv))
(MT?.rhs_ok mtv)
(RV.as_seq h (MT?.rhs mtv))
(Rgl?.r_repr (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv))
(Ghost.reveal (MT?.hash_spec mtv))
val mt_lift:
h:HS.mem -> mt:mt_p{mt_safe h mt} ->
GTot (r:MTH.merkle_tree #(U32.v (MT?.hash_size (B.get h mt 0))) {MTH.mt_wf_elts #_ r})
let mt_lift h mt =
merkle_tree_lift h (B.get h mt 0)
val mt_preserved:
mt:mt_p -> p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (mt_safe h0 mt /\
loc_disjoint p (mt_loc mt) /\
modifies p h0 h1))
(ensures (mt_safe_preserved mt p h0 h1;
mt_lift h0 mt == mt_lift h1 mt))
let mt_preserved mt p h0 h1 =
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(B.loc_buffer mt));
B.modifies_buffer_elim mt p h0 h1;
assert (B.get h0 mt 0 == B.get h1 mt 0);
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(RV.loc_rvector (MT?.hs (B.get h0 mt 0))));
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(RV.loc_rvector (MT?.rhs (B.get h0 mt 0))));
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(B.loc_buffer (MT?.mroot (B.get h0 mt 0))));
RV.as_seq_preserved (MT?.hs (B.get h0 mt 0)) p h0 h1;
RV.as_seq_preserved (MT?.rhs (B.get h0 mt 0)) p h0 h1;
B.modifies_buffer_elim (MT?.mroot (B.get h0 mt 0)) p h0 h1
/// Construction
// Note that the public function for creation is `mt_create` defined below,
// which builds a tree with an initial hash.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
private
val create_empty_mt:
hash_size:hash_size_t ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
r:HST.erid ->
HST.ST mt_p
(requires (fun _ -> true))
(ensures (fun h0 mt h1 ->
let dmt = B.get h1 mt 0 in
// memory safety
B.frameOf mt = r /\
modifies (mt_loc mt) h0 h1 /\
mt_safe h1 mt /\
mt_not_full h1 mt /\
// correctness
MT?.hash_size dmt = hash_size /\
MT?.offset dmt = 0UL /\
merkle_tree_lift h1 dmt == MTH.create_empty_mt #_ #(Ghost.reveal hash_spec) ()))
let create_empty_mt hsz hash_spec hash_fun r =
[@inline_let] let hrg = hreg hsz in
[@inline_let] let hvrg = hvreg hsz in
[@inline_let] let hvvrg = hvvreg hsz in
let hs_region = HST.new_region r in
let hs = RV.alloc_rid hvrg merkle_tree_size_lg hs_region in
let h0 = HST.get () in
mt_safe_elts_init #hsz h0 0ul hs;
let rhs_region = HST.new_region r in
let rhs = RV.alloc_rid hrg merkle_tree_size_lg rhs_region in
let h1 = HST.get () in
assert (RV.as_seq h1 rhs == S.create 32 (MTH.hash_init #(U32.v hsz)));
RV.rv_inv_preserved hs (V.loc_vector rhs) h0 h1;
RV.as_seq_preserved hs (V.loc_vector rhs) h0 h1;
V.loc_vector_within_included hs 0ul (V.size_of hs);
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul (V.loc_vector rhs) h0 h1;
let mroot_region = HST.new_region r in
let mroot = rg_alloc hrg mroot_region in
let h2 = HST.get () in
RV.as_seq_preserved hs loc_none h1 h2;
RV.as_seq_preserved rhs loc_none h1 h2;
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul loc_none h1 h2;
let mt = B.malloc r (MT hsz 0UL 0ul 0ul hs false rhs mroot hash_spec hash_fun) 1ul in
let h3 = HST.get () in
RV.as_seq_preserved hs loc_none h2 h3;
RV.as_seq_preserved rhs loc_none h2 h3;
Rgl?.r_sep hrg mroot loc_none h2 h3;
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul loc_none h2 h3;
mt
#pop-options
/// Destruction (free)
val mt_free: mt:mt_p ->
HST.ST unit
(requires (fun h0 -> mt_safe h0 mt))
(ensures (fun h0 _ h1 -> modifies (mt_loc mt) h0 h1))
#push-options "--z3rlimit 100"
let mt_free mt =
let mtv = !*mt in
RV.free (MT?.hs mtv);
RV.free (MT?.rhs mtv);
[@inline_let] let rg = hreg (MT?.hash_size mtv) in
rg_free rg (MT?.mroot mtv);
B.free mt
#pop-options
/// Insertion
private
val as_seq_sub_upd:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector #a #rst rg ->
i:uint32_t{i < V.size_of rv} -> v:Rgl?.repr rg ->
Lemma (requires (RV.rv_inv h rv))
(ensures (S.equal (S.upd (RV.as_seq h rv) (U32.v i) v)
(S.append
(RV.as_seq_sub h rv 0ul i)
(S.cons v (RV.as_seq_sub h rv (i + 1ul) (V.size_of rv))))))
#push-options "--z3rlimit 20"
let as_seq_sub_upd #a #rst #rg h rv i v =
Seq.Properties.slice_upd (RV.as_seq h rv) 0 (U32.v i) (U32.v i) v;
Seq.Properties.slice_upd (RV.as_seq h rv) (U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i) v;
RV.as_seq_seq_slice rg h (V.as_seq h rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i);
assert (S.equal (S.slice (RV.as_seq h rv) 0 (U32.v i))
(RV.as_seq_sub h rv 0ul i));
RV.as_seq_seq_slice rg h (V.as_seq h rv)
0 (U32.v (V.size_of rv)) (U32.v i + 1) (U32.v (V.size_of rv));
assert (S.equal (S.slice (RV.as_seq h rv) (U32.v i + 1) (U32.v (V.size_of rv)))
(RV.as_seq_sub h rv (i + 1ul) (V.size_of rv)));
assert (S.index (S.upd (RV.as_seq h rv) (U32.v i) v) (U32.v i) == v)
#pop-options
// `hash_vv_insert_copy` inserts a hash element at a level `lv`, by copying
// and pushing its content to `hs[lv]`. For detailed insertion procedure, see
// `insert_` and `mt_insert`.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1"
private
inline_for_extraction
val hash_vv_insert_copy:
#hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:Ghost.erased index_t ->
j:index_t{
Ghost.reveal i <= j &&
U32.v j < pow2 (32 - U32.v lv) - 1 &&
j < uint32_32_max} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
v:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\
Rgl?.r_inv (hreg hsz) h0 v /\
HH.disjoint (V.frameOf hs) (B.frameOf v) /\
mt_safe_elts #hsz h0 lv hs (Ghost.reveal i) j))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
h0 h1 /\
RV.rv_inv h1 hs /\
Rgl?.r_inv (hreg hsz) h1 v /\
V.size_of (V.get h1 hs lv) == j + 1ul - offset_of (Ghost.reveal i) /\
V.size_of (V.get h1 hs lv) == V.size_of (V.get h0 hs lv) + 1ul /\
mt_safe_elts #hsz h1 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul) /\
RV.rv_loc_elems h0 hs (lv + 1ul) (V.size_of hs) ==
RV.rv_loc_elems h1 hs (lv + 1ul) (V.size_of hs) /\
// correctness
(mt_safe_elts_spec #hsz h0 lv hs (Ghost.reveal i) j;
S.equal (RV.as_seq h1 hs)
(MTH.hashess_insert
(U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq h0 hs) (Rgl?.r_repr (hreg hsz) h0 v))) /\
S.equal (S.index (RV.as_seq h1 hs) (U32.v lv))
(S.snoc (S.index (RV.as_seq h0 hs) (U32.v lv))
(Rgl?.r_repr (hreg hsz) h0 v))))
let hash_vv_insert_copy #hsz lv i j hs v =
let hh0 = HST.get () in
mt_safe_elts_rec hh0 lv hs (Ghost.reveal i) j;
/// 1) Insert an element at the level `lv`, where the new vector is not yet
/// connected to `hs`.
let ihv = RV.insert_copy (hcpy hsz) (V.index hs lv) v in
let hh1 = HST.get () in
// 1-0) Basic disjointness conditions
V.forall2_forall_left hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.forall2_forall_right hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.loc_vector_within_included hs lv (lv + 1ul);
V.loc_vector_within_included hs (lv + 1ul) (V.size_of hs);
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
// 1-1) For the `modifies` postcondition.
assert (modifies (RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv)) hh0 hh1);
// 1-2) Preservation
Rgl?.r_sep (hreg hsz) v (RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
// 1-3) For `mt_safe_elts`
assert (V.size_of ihv == j + 1ul - offset_of (Ghost.reveal i)); // head updated
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1; // tail not yet
// 1-4) For the `rv_inv` postcondition
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs)) 0 (U32.v lv) (U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v lv);
RV.rv_elems_inv_preserved
hs 0ul lv (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs 0ul lv);
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs))
(U32.v lv + 1) (U32.v (V.size_of hs))
(U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
(U32.v lv + 1) (U32.v (V.size_of hs));
RV.rv_elems_inv_preserved
hs (lv + 1ul) (V.size_of hs) (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs (lv + 1ul) (V.size_of hs));
// assert (rv_itself_inv hh1 hs);
// assert (elems_reg hh1 hs);
// 1-5) Correctness
assert (S.equal (RV.as_seq hh1 ihv)
(S.snoc (RV.as_seq hh0 (V.get hh0 hs lv)) (Rgl?.r_repr (hreg hsz) hh0 v)));
/// 2) Assign the updated vector to `hs` at the level `lv`.
RV.assign hs lv ihv;
let hh2 = HST.get () in
// 2-1) For the `modifies` postcondition.
assert (modifies (V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2);
assert (modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))) hh0 hh2);
// 2-2) Preservation
Rgl?.r_sep (hreg hsz) v (RV.loc_rvector hs) hh1 hh2;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-3) For `mt_safe_elts`
assert (V.size_of (V.get hh2 hs lv) == j + 1ul - offset_of (Ghost.reveal i));
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-4) Correctness
RV.as_seq_sub_preserved hs 0ul lv (loc_rvector ihv) hh0 hh1;
RV.as_seq_sub_preserved hs (lv + 1ul) merkle_tree_size_lg (loc_rvector ihv) hh0 hh1;
assert (S.equal (RV.as_seq hh2 hs)
(S.append
(RV.as_seq_sub hh0 hs 0ul lv)
(S.cons (RV.as_seq hh1 ihv)
(RV.as_seq_sub hh0 hs (lv + 1ul) merkle_tree_size_lg))));
as_seq_sub_upd hh0 hs lv (RV.as_seq hh1 ihv)
#pop-options
private
val insert_index_helper_even:
lv:uint32_t{lv < merkle_tree_size_lg} ->
j:index_t{U32.v j < pow2 (32 - U32.v lv) - 1} ->
Lemma (requires (j % 2ul <> 1ul))
(ensures (U32.v j % 2 <> 1 /\ j / 2ul == (j + 1ul) / 2ul))
let insert_index_helper_even lv j = ()
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
private
val insert_index_helper_odd:
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:index_t ->
j:index_t{i <= j && U32.v j < pow2 (32 - U32.v lv) - 1} ->
Lemma (requires (j % 2ul = 1ul /\
j < uint32_32_max))
(ensures (U32.v j % 2 = 1 /\
U32.v (j / 2ul) < pow2 (32 - U32.v (lv + 1ul)) - 1 /\
(j + 1ul) / 2ul == j / 2ul + 1ul /\
j - offset_of i > 0ul))
let insert_index_helper_odd lv i j = ()
#pop-options
private
val loc_union_assoc_4:
a:loc -> b:loc -> c:loc -> d:loc ->
Lemma (loc_union (loc_union a b) (loc_union c d) ==
loc_union (loc_union a c) (loc_union b d)) | false | false | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val loc_union_assoc_4:
a:loc -> b:loc -> c:loc -> d:loc ->
Lemma (loc_union (loc_union a b) (loc_union c d) ==
loc_union (loc_union a c) (loc_union b d)) | [] | MerkleTree.Low.loc_union_assoc_4 | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} |
a: LowStar.Monotonic.Buffer.loc ->
b: LowStar.Monotonic.Buffer.loc ->
c: LowStar.Monotonic.Buffer.loc ->
d: LowStar.Monotonic.Buffer.loc
-> FStar.Pervasives.Lemma
(ensures
LowStar.Monotonic.Buffer.loc_union (LowStar.Monotonic.Buffer.loc_union a b)
(LowStar.Monotonic.Buffer.loc_union c d) ==
LowStar.Monotonic.Buffer.loc_union (LowStar.Monotonic.Buffer.loc_union a c)
(LowStar.Monotonic.Buffer.loc_union b d)) | {
"end_col": 37,
"end_line": 598,
"start_col": 2,
"start_line": 595
} |
FStar.Pervasives.Lemma | val insert_modifies_union_loc_weakening:
l1:loc -> l2:loc -> l3:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (modifies l1 h0 h1))
(ensures (modifies (loc_union (loc_union l1 l2) l3) h0 h1)) | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let insert_modifies_union_loc_weakening l1 l2 l3 h0 h1 =
B.loc_includes_union_l l1 l2 l1;
B.loc_includes_union_l (loc_union l1 l2) l3 (loc_union l1 l2) | val insert_modifies_union_loc_weakening:
l1:loc -> l2:loc -> l3:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (modifies l1 h0 h1))
(ensures (modifies (loc_union (loc_union l1 l2) l3) h0 h1))
let insert_modifies_union_loc_weakening l1 l2 l3 h0 h1 = | false | null | true | B.loc_includes_union_l l1 l2 l1;
B.loc_includes_union_l (loc_union l1 l2) l3 (loc_union l1 l2) | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"lemma"
] | [
"LowStar.Monotonic.Buffer.loc",
"FStar.Monotonic.HyperStack.mem",
"LowStar.Monotonic.Buffer.loc_includes_union_l",
"LowStar.Monotonic.Buffer.loc_union",
"Prims.unit"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction
let uint32_max = 4294967295UL
let uint64_max = 18446744073709551615UL
let offset_range_limit = uint32_max
type offset_t = uint64_t
inline_for_extraction noextract unfold let u32_64 = Int.Cast.uint32_to_uint64
inline_for_extraction noextract unfold let u64_32 = Int.Cast.uint64_to_uint32
private inline_for_extraction
let offsets_connect (x:offset_t) (y:offset_t): Tot bool = y >= x && (y - x) <= offset_range_limit
private inline_for_extraction
let split_offset (tree:offset_t) (index:offset_t{offsets_connect tree index}): Tot index_t =
[@inline_let] let diff = U64.sub_mod index tree in
assert (diff <= offset_range_limit);
Int.Cast.uint64_to_uint32 diff
private inline_for_extraction
let add64_fits (x:offset_t) (i:index_t): Tot bool = uint64_max - x >= (u32_64 i)
private inline_for_extraction
let join_offset (tree:offset_t) (i:index_t{add64_fits tree i}): Tot (r:offset_t{offsets_connect tree r}) =
U64.add tree (u32_64 i)
inline_for_extraction val merkle_tree_size_lg: uint32_t
let merkle_tree_size_lg = 32ul
// A Merkle tree `MT i j hs rhs_ok rhs` stores all necessary hashes to generate
// a Merkle path for each element from the index `i` to `j-1`.
// - Parameters
// `hs`: a 2-dim store for hashes, where `hs[0]` contains leaf hash values.
// `rhs_ok`: to check the rightmost hashes are up-to-date
// `rhs`: a store for "rightmost" hashes, manipulated only when required to
// calculate some merkle paths that need the rightmost hashes
// as a part of them.
// `mroot`: during the construction of `rhs` we can also calculate the Merkle
// root of the tree. If `rhs_ok` is true then it has the up-to-date
// root value.
noeq type merkle_tree =
| MT: hash_size:hash_size_t ->
offset:offset_t ->
i:index_t -> j:index_t{i <= j /\ add64_fits offset j} ->
hs:hash_vv hash_size {V.size_of hs = merkle_tree_size_lg} ->
rhs_ok:bool ->
rhs:hash_vec #hash_size {V.size_of rhs = merkle_tree_size_lg} ->
mroot:hash #hash_size ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
merkle_tree
type mt_p = B.pointer merkle_tree
type const_mt_p = const_pointer merkle_tree
inline_for_extraction
let merkle_tree_conditions (#hsz:Ghost.erased hash_size_t) (offset:uint64_t) (i j:uint32_t) (hs:hash_vv hsz) (rhs_ok:bool) (rhs:hash_vec #hsz) (mroot:hash #hsz): Tot bool =
j >= i && add64_fits offset j &&
V.size_of hs = merkle_tree_size_lg &&
V.size_of rhs = merkle_tree_size_lg
// The maximum number of currently held elements in the tree is (2^32 - 1).
// cwinter: even when using 64-bit indices, we fail if the underlying 32-bit
// vector is full; this can be fixed if necessary.
private inline_for_extraction
val mt_not_full_nst: mtv:merkle_tree -> Tot bool
let mt_not_full_nst mtv = MT?.j mtv < uint32_32_max
val mt_not_full: HS.mem -> mt_p -> GTot bool
let mt_not_full h mt = mt_not_full_nst (B.get h mt 0)
/// (Memory) Safety
val offset_of: i:index_t -> Tot index_t
let offset_of i = if i % 2ul = 0ul then i else i - 1ul
// `mt_safe_elts` says that it is safe to access an element from `i` to `j - 1`
// at level `lv` in the Merkle tree, i.e., hs[lv][k] (i <= k < j) is a valid
// element.
inline_for_extraction noextract
val mt_safe_elts:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
GTot Type0 (decreases (32 - U32.v lv))
let rec mt_safe_elts #hsz h lv hs i j =
if lv = merkle_tree_size_lg then true
else (let ofs = offset_of i in
V.size_of (V.get h hs lv) == j - ofs /\
mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul))
#push-options "--initial_fuel 1 --max_fuel 1"
val mt_safe_elts_constr:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (V.size_of (V.get h hs lv) == j - offset_of i /\
mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)))
(ensures (mt_safe_elts #hsz h lv hs i j))
let mt_safe_elts_constr #_ h lv hs i j = ()
val mt_safe_elts_head:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (mt_safe_elts #hsz h lv hs i j))
(ensures (V.size_of (V.get h hs lv) == j - offset_of i))
let mt_safe_elts_head #_ h lv hs i j = ()
val mt_safe_elts_rec:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (mt_safe_elts #hsz h lv hs i j))
(ensures (mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)))
let mt_safe_elts_rec #_ h lv hs i j = ()
val mt_safe_elts_init:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
Lemma (requires (V.forall_ h hs lv (V.size_of hs)
(fun hv -> V.size_of hv = 0ul)))
(ensures (mt_safe_elts #hsz h lv hs 0ul 0ul))
(decreases (32 - U32.v lv))
let rec mt_safe_elts_init #hsz h lv hs =
if lv = merkle_tree_size_lg then ()
else mt_safe_elts_init #hsz h (lv + 1ul) hs
#pop-options
val mt_safe_elts_preserved:
#hsz:hash_size_t ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 hs /\
mt_safe_elts #hsz h0 lv hs i j /\
loc_disjoint p (V.loc_vector_within hs lv (V.size_of hs)) /\
modifies p h0 h1))
(ensures (mt_safe_elts #hsz h1 lv hs i j))
(decreases (32 - U32.v lv))
[SMTPat (V.live h0 hs);
SMTPat (mt_safe_elts #hsz h0 lv hs i j);
SMTPat (loc_disjoint p (RV.loc_rvector hs));
SMTPat (modifies p h0 h1)]
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let rec mt_safe_elts_preserved #hsz lv hs i j p h0 h1 =
if lv = merkle_tree_size_lg then ()
else (V.get_preserved hs lv p h0 h1;
mt_safe_elts_preserved #hsz (lv + 1ul) hs (i / 2ul) (j / 2ul) p h0 h1)
#pop-options
// `mt_safe` is the invariant of a Merkle tree through its lifetime.
// It includes liveness, regionality, disjointness (to each data structure),
// and valid element access (`mt_safe_elts`).
inline_for_extraction noextract
val mt_safe: HS.mem -> mt_p -> GTot Type0
let mt_safe h mt =
B.live h mt /\ B.freeable mt /\
(let mtv = B.get h mt 0 in
// Liveness & Accessibility
RV.rv_inv h (MT?.hs mtv) /\
RV.rv_inv h (MT?.rhs mtv) /\
Rgl?.r_inv (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv) /\
mt_safe_elts h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv) /\
// Regionality
HH.extends (V.frameOf (MT?.hs mtv)) (B.frameOf mt) /\
HH.extends (V.frameOf (MT?.rhs mtv)) (B.frameOf mt) /\
HH.extends (B.frameOf (MT?.mroot mtv)) (B.frameOf mt) /\
HH.disjoint (V.frameOf (MT?.hs mtv)) (V.frameOf (MT?.rhs mtv)) /\
HH.disjoint (V.frameOf (MT?.hs mtv)) (B.frameOf (MT?.mroot mtv)) /\
HH.disjoint (V.frameOf (MT?.rhs mtv)) (B.frameOf (MT?.mroot mtv)))
// Since a Merkle tree satisfies regionality, it's ok to take all regions from
// a tree pointer as a location of the tree.
val mt_loc: mt_p -> GTot loc
let mt_loc mt = B.loc_all_regions_from false (B.frameOf mt)
val mt_safe_preserved:
mt:mt_p -> p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (mt_safe h0 mt /\
loc_disjoint p (mt_loc mt) /\
modifies p h0 h1))
(ensures (B.get h0 mt 0 == B.get h1 mt 0 /\
mt_safe h1 mt))
let mt_safe_preserved mt p h0 h1 =
assert (loc_includes (mt_loc mt) (B.loc_buffer mt));
let mtv = B.get h0 mt 0 in
assert (loc_includes (mt_loc mt) (RV.loc_rvector (MT?.hs mtv)));
assert (loc_includes (mt_loc mt) (RV.loc_rvector (MT?.rhs mtv)));
assert (loc_includes (mt_loc mt) (V.loc_vector (MT?.hs mtv)));
assert (loc_includes (mt_loc mt)
(B.loc_all_regions_from false (B.frameOf (MT?.mroot mtv))));
RV.rv_inv_preserved (MT?.hs mtv) p h0 h1;
RV.rv_inv_preserved (MT?.rhs mtv) p h0 h1;
Rgl?.r_sep (hreg (MT?.hash_size mtv)) (MT?.mroot mtv) p h0 h1;
V.loc_vector_within_included (MT?.hs mtv) 0ul (V.size_of (MT?.hs mtv));
mt_safe_elts_preserved 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv) p h0 h1
/// Lifting to a high-level Merkle tree structure
val mt_safe_elts_spec:
#hsz:hash_size_t ->
h:HS.mem ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t ->
j:index_t{j >= i} ->
Lemma (requires (RV.rv_inv h hs /\
mt_safe_elts #hsz h lv hs i j))
(ensures (MTH.hs_wf_elts #(U32.v hsz)
(U32.v lv) (RV.as_seq h hs)
(U32.v i) (U32.v j)))
(decreases (32 - U32.v lv))
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let rec mt_safe_elts_spec #_ h lv hs i j =
if lv = merkle_tree_size_lg then ()
else mt_safe_elts_spec h (lv + 1ul) hs (i / 2ul) (j / 2ul)
#pop-options
val merkle_tree_lift:
h:HS.mem ->
mtv:merkle_tree{
RV.rv_inv h (MT?.hs mtv) /\
RV.rv_inv h (MT?.rhs mtv) /\
Rgl?.r_inv (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv) /\
mt_safe_elts #(MT?.hash_size mtv) h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv)} ->
GTot (r:MTH.merkle_tree #(U32.v (MT?.hash_size mtv)) {MTH.mt_wf_elts #_ r})
let merkle_tree_lift h mtv =
mt_safe_elts_spec h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv);
MTH.MT #(U32.v (MT?.hash_size mtv))
(U32.v (MT?.i mtv))
(U32.v (MT?.j mtv))
(RV.as_seq h (MT?.hs mtv))
(MT?.rhs_ok mtv)
(RV.as_seq h (MT?.rhs mtv))
(Rgl?.r_repr (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv))
(Ghost.reveal (MT?.hash_spec mtv))
val mt_lift:
h:HS.mem -> mt:mt_p{mt_safe h mt} ->
GTot (r:MTH.merkle_tree #(U32.v (MT?.hash_size (B.get h mt 0))) {MTH.mt_wf_elts #_ r})
let mt_lift h mt =
merkle_tree_lift h (B.get h mt 0)
val mt_preserved:
mt:mt_p -> p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (mt_safe h0 mt /\
loc_disjoint p (mt_loc mt) /\
modifies p h0 h1))
(ensures (mt_safe_preserved mt p h0 h1;
mt_lift h0 mt == mt_lift h1 mt))
let mt_preserved mt p h0 h1 =
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(B.loc_buffer mt));
B.modifies_buffer_elim mt p h0 h1;
assert (B.get h0 mt 0 == B.get h1 mt 0);
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(RV.loc_rvector (MT?.hs (B.get h0 mt 0))));
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(RV.loc_rvector (MT?.rhs (B.get h0 mt 0))));
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(B.loc_buffer (MT?.mroot (B.get h0 mt 0))));
RV.as_seq_preserved (MT?.hs (B.get h0 mt 0)) p h0 h1;
RV.as_seq_preserved (MT?.rhs (B.get h0 mt 0)) p h0 h1;
B.modifies_buffer_elim (MT?.mroot (B.get h0 mt 0)) p h0 h1
/// Construction
// Note that the public function for creation is `mt_create` defined below,
// which builds a tree with an initial hash.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
private
val create_empty_mt:
hash_size:hash_size_t ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
r:HST.erid ->
HST.ST mt_p
(requires (fun _ -> true))
(ensures (fun h0 mt h1 ->
let dmt = B.get h1 mt 0 in
// memory safety
B.frameOf mt = r /\
modifies (mt_loc mt) h0 h1 /\
mt_safe h1 mt /\
mt_not_full h1 mt /\
// correctness
MT?.hash_size dmt = hash_size /\
MT?.offset dmt = 0UL /\
merkle_tree_lift h1 dmt == MTH.create_empty_mt #_ #(Ghost.reveal hash_spec) ()))
let create_empty_mt hsz hash_spec hash_fun r =
[@inline_let] let hrg = hreg hsz in
[@inline_let] let hvrg = hvreg hsz in
[@inline_let] let hvvrg = hvvreg hsz in
let hs_region = HST.new_region r in
let hs = RV.alloc_rid hvrg merkle_tree_size_lg hs_region in
let h0 = HST.get () in
mt_safe_elts_init #hsz h0 0ul hs;
let rhs_region = HST.new_region r in
let rhs = RV.alloc_rid hrg merkle_tree_size_lg rhs_region in
let h1 = HST.get () in
assert (RV.as_seq h1 rhs == S.create 32 (MTH.hash_init #(U32.v hsz)));
RV.rv_inv_preserved hs (V.loc_vector rhs) h0 h1;
RV.as_seq_preserved hs (V.loc_vector rhs) h0 h1;
V.loc_vector_within_included hs 0ul (V.size_of hs);
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul (V.loc_vector rhs) h0 h1;
let mroot_region = HST.new_region r in
let mroot = rg_alloc hrg mroot_region in
let h2 = HST.get () in
RV.as_seq_preserved hs loc_none h1 h2;
RV.as_seq_preserved rhs loc_none h1 h2;
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul loc_none h1 h2;
let mt = B.malloc r (MT hsz 0UL 0ul 0ul hs false rhs mroot hash_spec hash_fun) 1ul in
let h3 = HST.get () in
RV.as_seq_preserved hs loc_none h2 h3;
RV.as_seq_preserved rhs loc_none h2 h3;
Rgl?.r_sep hrg mroot loc_none h2 h3;
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul loc_none h2 h3;
mt
#pop-options
/// Destruction (free)
val mt_free: mt:mt_p ->
HST.ST unit
(requires (fun h0 -> mt_safe h0 mt))
(ensures (fun h0 _ h1 -> modifies (mt_loc mt) h0 h1))
#push-options "--z3rlimit 100"
let mt_free mt =
let mtv = !*mt in
RV.free (MT?.hs mtv);
RV.free (MT?.rhs mtv);
[@inline_let] let rg = hreg (MT?.hash_size mtv) in
rg_free rg (MT?.mroot mtv);
B.free mt
#pop-options
/// Insertion
private
val as_seq_sub_upd:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector #a #rst rg ->
i:uint32_t{i < V.size_of rv} -> v:Rgl?.repr rg ->
Lemma (requires (RV.rv_inv h rv))
(ensures (S.equal (S.upd (RV.as_seq h rv) (U32.v i) v)
(S.append
(RV.as_seq_sub h rv 0ul i)
(S.cons v (RV.as_seq_sub h rv (i + 1ul) (V.size_of rv))))))
#push-options "--z3rlimit 20"
let as_seq_sub_upd #a #rst #rg h rv i v =
Seq.Properties.slice_upd (RV.as_seq h rv) 0 (U32.v i) (U32.v i) v;
Seq.Properties.slice_upd (RV.as_seq h rv) (U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i) v;
RV.as_seq_seq_slice rg h (V.as_seq h rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i);
assert (S.equal (S.slice (RV.as_seq h rv) 0 (U32.v i))
(RV.as_seq_sub h rv 0ul i));
RV.as_seq_seq_slice rg h (V.as_seq h rv)
0 (U32.v (V.size_of rv)) (U32.v i + 1) (U32.v (V.size_of rv));
assert (S.equal (S.slice (RV.as_seq h rv) (U32.v i + 1) (U32.v (V.size_of rv)))
(RV.as_seq_sub h rv (i + 1ul) (V.size_of rv)));
assert (S.index (S.upd (RV.as_seq h rv) (U32.v i) v) (U32.v i) == v)
#pop-options
// `hash_vv_insert_copy` inserts a hash element at a level `lv`, by copying
// and pushing its content to `hs[lv]`. For detailed insertion procedure, see
// `insert_` and `mt_insert`.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1"
private
inline_for_extraction
val hash_vv_insert_copy:
#hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:Ghost.erased index_t ->
j:index_t{
Ghost.reveal i <= j &&
U32.v j < pow2 (32 - U32.v lv) - 1 &&
j < uint32_32_max} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
v:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\
Rgl?.r_inv (hreg hsz) h0 v /\
HH.disjoint (V.frameOf hs) (B.frameOf v) /\
mt_safe_elts #hsz h0 lv hs (Ghost.reveal i) j))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
h0 h1 /\
RV.rv_inv h1 hs /\
Rgl?.r_inv (hreg hsz) h1 v /\
V.size_of (V.get h1 hs lv) == j + 1ul - offset_of (Ghost.reveal i) /\
V.size_of (V.get h1 hs lv) == V.size_of (V.get h0 hs lv) + 1ul /\
mt_safe_elts #hsz h1 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul) /\
RV.rv_loc_elems h0 hs (lv + 1ul) (V.size_of hs) ==
RV.rv_loc_elems h1 hs (lv + 1ul) (V.size_of hs) /\
// correctness
(mt_safe_elts_spec #hsz h0 lv hs (Ghost.reveal i) j;
S.equal (RV.as_seq h1 hs)
(MTH.hashess_insert
(U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq h0 hs) (Rgl?.r_repr (hreg hsz) h0 v))) /\
S.equal (S.index (RV.as_seq h1 hs) (U32.v lv))
(S.snoc (S.index (RV.as_seq h0 hs) (U32.v lv))
(Rgl?.r_repr (hreg hsz) h0 v))))
let hash_vv_insert_copy #hsz lv i j hs v =
let hh0 = HST.get () in
mt_safe_elts_rec hh0 lv hs (Ghost.reveal i) j;
/// 1) Insert an element at the level `lv`, where the new vector is not yet
/// connected to `hs`.
let ihv = RV.insert_copy (hcpy hsz) (V.index hs lv) v in
let hh1 = HST.get () in
// 1-0) Basic disjointness conditions
V.forall2_forall_left hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.forall2_forall_right hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.loc_vector_within_included hs lv (lv + 1ul);
V.loc_vector_within_included hs (lv + 1ul) (V.size_of hs);
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
// 1-1) For the `modifies` postcondition.
assert (modifies (RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv)) hh0 hh1);
// 1-2) Preservation
Rgl?.r_sep (hreg hsz) v (RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
// 1-3) For `mt_safe_elts`
assert (V.size_of ihv == j + 1ul - offset_of (Ghost.reveal i)); // head updated
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1; // tail not yet
// 1-4) For the `rv_inv` postcondition
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs)) 0 (U32.v lv) (U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v lv);
RV.rv_elems_inv_preserved
hs 0ul lv (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs 0ul lv);
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs))
(U32.v lv + 1) (U32.v (V.size_of hs))
(U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
(U32.v lv + 1) (U32.v (V.size_of hs));
RV.rv_elems_inv_preserved
hs (lv + 1ul) (V.size_of hs) (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs (lv + 1ul) (V.size_of hs));
// assert (rv_itself_inv hh1 hs);
// assert (elems_reg hh1 hs);
// 1-5) Correctness
assert (S.equal (RV.as_seq hh1 ihv)
(S.snoc (RV.as_seq hh0 (V.get hh0 hs lv)) (Rgl?.r_repr (hreg hsz) hh0 v)));
/// 2) Assign the updated vector to `hs` at the level `lv`.
RV.assign hs lv ihv;
let hh2 = HST.get () in
// 2-1) For the `modifies` postcondition.
assert (modifies (V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2);
assert (modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))) hh0 hh2);
// 2-2) Preservation
Rgl?.r_sep (hreg hsz) v (RV.loc_rvector hs) hh1 hh2;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-3) For `mt_safe_elts`
assert (V.size_of (V.get hh2 hs lv) == j + 1ul - offset_of (Ghost.reveal i));
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-4) Correctness
RV.as_seq_sub_preserved hs 0ul lv (loc_rvector ihv) hh0 hh1;
RV.as_seq_sub_preserved hs (lv + 1ul) merkle_tree_size_lg (loc_rvector ihv) hh0 hh1;
assert (S.equal (RV.as_seq hh2 hs)
(S.append
(RV.as_seq_sub hh0 hs 0ul lv)
(S.cons (RV.as_seq hh1 ihv)
(RV.as_seq_sub hh0 hs (lv + 1ul) merkle_tree_size_lg))));
as_seq_sub_upd hh0 hs lv (RV.as_seq hh1 ihv)
#pop-options
private
val insert_index_helper_even:
lv:uint32_t{lv < merkle_tree_size_lg} ->
j:index_t{U32.v j < pow2 (32 - U32.v lv) - 1} ->
Lemma (requires (j % 2ul <> 1ul))
(ensures (U32.v j % 2 <> 1 /\ j / 2ul == (j + 1ul) / 2ul))
let insert_index_helper_even lv j = ()
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
private
val insert_index_helper_odd:
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:index_t ->
j:index_t{i <= j && U32.v j < pow2 (32 - U32.v lv) - 1} ->
Lemma (requires (j % 2ul = 1ul /\
j < uint32_32_max))
(ensures (U32.v j % 2 = 1 /\
U32.v (j / 2ul) < pow2 (32 - U32.v (lv + 1ul)) - 1 /\
(j + 1ul) / 2ul == j / 2ul + 1ul /\
j - offset_of i > 0ul))
let insert_index_helper_odd lv i j = ()
#pop-options
private
val loc_union_assoc_4:
a:loc -> b:loc -> c:loc -> d:loc ->
Lemma (loc_union (loc_union a b) (loc_union c d) ==
loc_union (loc_union a c) (loc_union b d))
let loc_union_assoc_4 a b c d =
loc_union_assoc (loc_union a b) c d;
loc_union_assoc a b c;
loc_union_assoc a c b;
loc_union_assoc (loc_union a c) b d
private
val insert_modifies_rec_helper:
#hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
aloc:loc ->
h:HS.mem ->
Lemma (loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
aloc)
(loc_union
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc) ==
loc_union
(loc_union
(RV.rv_loc_elems h hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
aloc)
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let insert_modifies_rec_helper #hsz lv hs aloc h =
assert (V.loc_vector_within hs lv (V.size_of hs) ==
loc_union (V.loc_vector_within hs lv (lv + 1ul))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)));
RV.rs_loc_elems_rec_inverse (hvreg hsz) (V.as_seq h hs) (U32.v lv) (U32.v (V.size_of hs));
assert (RV.rv_loc_elems h hs lv (V.size_of hs) ==
loc_union (RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs)));
// Applying some association rules...
loc_union_assoc
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))) aloc
(loc_union
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc);
loc_union_assoc
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))) aloc aloc;
loc_union_assoc
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc;
loc_union_assoc_4
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))
#pop-options
private
val insert_modifies_union_loc_weakening:
l1:loc -> l2:loc -> l3:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (modifies l1 h0 h1))
(ensures (modifies (loc_union (loc_union l1 l2) l3) h0 h1)) | false | false | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val insert_modifies_union_loc_weakening:
l1:loc -> l2:loc -> l3:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (modifies l1 h0 h1))
(ensures (modifies (loc_union (loc_union l1 l2) l3) h0 h1)) | [] | MerkleTree.Low.insert_modifies_union_loc_weakening | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} |
l1: LowStar.Monotonic.Buffer.loc ->
l2: LowStar.Monotonic.Buffer.loc ->
l3: LowStar.Monotonic.Buffer.loc ->
h0: FStar.Monotonic.HyperStack.mem ->
h1: FStar.Monotonic.HyperStack.mem
-> FStar.Pervasives.Lemma (requires LowStar.Monotonic.Buffer.modifies l1 h0 h1)
(ensures
LowStar.Monotonic.Buffer.modifies (LowStar.Monotonic.Buffer.loc_union (LowStar.Monotonic.Buffer.loc_union
l1
l2)
l3)
h0
h1) | {
"end_col": 63,
"end_line": 669,
"start_col": 2,
"start_line": 668
} |
Prims.Tot | val mt_insert_pre_nst: mtv:merkle_tree -> v:hash #(MT?.hash_size mtv) -> Tot bool | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mt_insert_pre_nst mtv v = mt_not_full_nst mtv && add64_fits (MT?.offset mtv) ((MT?.j mtv) + 1ul) | val mt_insert_pre_nst: mtv:merkle_tree -> v:hash #(MT?.hash_size mtv) -> Tot bool
let mt_insert_pre_nst mtv v = | false | null | false | mt_not_full_nst mtv && add64_fits (MT?.offset mtv) ((MT?.j mtv) + 1ul) | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"total"
] | [
"MerkleTree.Low.merkle_tree",
"MerkleTree.Low.Datastructures.hash",
"MerkleTree.Low.__proj__MT__item__hash_size",
"Prims.op_AmpAmp",
"MerkleTree.Low.mt_not_full_nst",
"MerkleTree.Low.add64_fits",
"MerkleTree.Low.__proj__MT__item__offset",
"FStar.Integers.op_Plus",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"MerkleTree.Low.__proj__MT__item__j",
"FStar.UInt32.__uint_to_t",
"Prims.bool"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction
let uint32_max = 4294967295UL
let uint64_max = 18446744073709551615UL
let offset_range_limit = uint32_max
type offset_t = uint64_t
inline_for_extraction noextract unfold let u32_64 = Int.Cast.uint32_to_uint64
inline_for_extraction noextract unfold let u64_32 = Int.Cast.uint64_to_uint32
private inline_for_extraction
let offsets_connect (x:offset_t) (y:offset_t): Tot bool = y >= x && (y - x) <= offset_range_limit
private inline_for_extraction
let split_offset (tree:offset_t) (index:offset_t{offsets_connect tree index}): Tot index_t =
[@inline_let] let diff = U64.sub_mod index tree in
assert (diff <= offset_range_limit);
Int.Cast.uint64_to_uint32 diff
private inline_for_extraction
let add64_fits (x:offset_t) (i:index_t): Tot bool = uint64_max - x >= (u32_64 i)
private inline_for_extraction
let join_offset (tree:offset_t) (i:index_t{add64_fits tree i}): Tot (r:offset_t{offsets_connect tree r}) =
U64.add tree (u32_64 i)
inline_for_extraction val merkle_tree_size_lg: uint32_t
let merkle_tree_size_lg = 32ul
// A Merkle tree `MT i j hs rhs_ok rhs` stores all necessary hashes to generate
// a Merkle path for each element from the index `i` to `j-1`.
// - Parameters
// `hs`: a 2-dim store for hashes, where `hs[0]` contains leaf hash values.
// `rhs_ok`: to check the rightmost hashes are up-to-date
// `rhs`: a store for "rightmost" hashes, manipulated only when required to
// calculate some merkle paths that need the rightmost hashes
// as a part of them.
// `mroot`: during the construction of `rhs` we can also calculate the Merkle
// root of the tree. If `rhs_ok` is true then it has the up-to-date
// root value.
noeq type merkle_tree =
| MT: hash_size:hash_size_t ->
offset:offset_t ->
i:index_t -> j:index_t{i <= j /\ add64_fits offset j} ->
hs:hash_vv hash_size {V.size_of hs = merkle_tree_size_lg} ->
rhs_ok:bool ->
rhs:hash_vec #hash_size {V.size_of rhs = merkle_tree_size_lg} ->
mroot:hash #hash_size ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
merkle_tree
type mt_p = B.pointer merkle_tree
type const_mt_p = const_pointer merkle_tree
inline_for_extraction
let merkle_tree_conditions (#hsz:Ghost.erased hash_size_t) (offset:uint64_t) (i j:uint32_t) (hs:hash_vv hsz) (rhs_ok:bool) (rhs:hash_vec #hsz) (mroot:hash #hsz): Tot bool =
j >= i && add64_fits offset j &&
V.size_of hs = merkle_tree_size_lg &&
V.size_of rhs = merkle_tree_size_lg
// The maximum number of currently held elements in the tree is (2^32 - 1).
// cwinter: even when using 64-bit indices, we fail if the underlying 32-bit
// vector is full; this can be fixed if necessary.
private inline_for_extraction
val mt_not_full_nst: mtv:merkle_tree -> Tot bool
let mt_not_full_nst mtv = MT?.j mtv < uint32_32_max
val mt_not_full: HS.mem -> mt_p -> GTot bool
let mt_not_full h mt = mt_not_full_nst (B.get h mt 0)
/// (Memory) Safety
val offset_of: i:index_t -> Tot index_t
let offset_of i = if i % 2ul = 0ul then i else i - 1ul
// `mt_safe_elts` says that it is safe to access an element from `i` to `j - 1`
// at level `lv` in the Merkle tree, i.e., hs[lv][k] (i <= k < j) is a valid
// element.
inline_for_extraction noextract
val mt_safe_elts:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
GTot Type0 (decreases (32 - U32.v lv))
let rec mt_safe_elts #hsz h lv hs i j =
if lv = merkle_tree_size_lg then true
else (let ofs = offset_of i in
V.size_of (V.get h hs lv) == j - ofs /\
mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul))
#push-options "--initial_fuel 1 --max_fuel 1"
val mt_safe_elts_constr:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (V.size_of (V.get h hs lv) == j - offset_of i /\
mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)))
(ensures (mt_safe_elts #hsz h lv hs i j))
let mt_safe_elts_constr #_ h lv hs i j = ()
val mt_safe_elts_head:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (mt_safe_elts #hsz h lv hs i j))
(ensures (V.size_of (V.get h hs lv) == j - offset_of i))
let mt_safe_elts_head #_ h lv hs i j = ()
val mt_safe_elts_rec:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (mt_safe_elts #hsz h lv hs i j))
(ensures (mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)))
let mt_safe_elts_rec #_ h lv hs i j = ()
val mt_safe_elts_init:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
Lemma (requires (V.forall_ h hs lv (V.size_of hs)
(fun hv -> V.size_of hv = 0ul)))
(ensures (mt_safe_elts #hsz h lv hs 0ul 0ul))
(decreases (32 - U32.v lv))
let rec mt_safe_elts_init #hsz h lv hs =
if lv = merkle_tree_size_lg then ()
else mt_safe_elts_init #hsz h (lv + 1ul) hs
#pop-options
val mt_safe_elts_preserved:
#hsz:hash_size_t ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 hs /\
mt_safe_elts #hsz h0 lv hs i j /\
loc_disjoint p (V.loc_vector_within hs lv (V.size_of hs)) /\
modifies p h0 h1))
(ensures (mt_safe_elts #hsz h1 lv hs i j))
(decreases (32 - U32.v lv))
[SMTPat (V.live h0 hs);
SMTPat (mt_safe_elts #hsz h0 lv hs i j);
SMTPat (loc_disjoint p (RV.loc_rvector hs));
SMTPat (modifies p h0 h1)]
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let rec mt_safe_elts_preserved #hsz lv hs i j p h0 h1 =
if lv = merkle_tree_size_lg then ()
else (V.get_preserved hs lv p h0 h1;
mt_safe_elts_preserved #hsz (lv + 1ul) hs (i / 2ul) (j / 2ul) p h0 h1)
#pop-options
// `mt_safe` is the invariant of a Merkle tree through its lifetime.
// It includes liveness, regionality, disjointness (to each data structure),
// and valid element access (`mt_safe_elts`).
inline_for_extraction noextract
val mt_safe: HS.mem -> mt_p -> GTot Type0
let mt_safe h mt =
B.live h mt /\ B.freeable mt /\
(let mtv = B.get h mt 0 in
// Liveness & Accessibility
RV.rv_inv h (MT?.hs mtv) /\
RV.rv_inv h (MT?.rhs mtv) /\
Rgl?.r_inv (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv) /\
mt_safe_elts h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv) /\
// Regionality
HH.extends (V.frameOf (MT?.hs mtv)) (B.frameOf mt) /\
HH.extends (V.frameOf (MT?.rhs mtv)) (B.frameOf mt) /\
HH.extends (B.frameOf (MT?.mroot mtv)) (B.frameOf mt) /\
HH.disjoint (V.frameOf (MT?.hs mtv)) (V.frameOf (MT?.rhs mtv)) /\
HH.disjoint (V.frameOf (MT?.hs mtv)) (B.frameOf (MT?.mroot mtv)) /\
HH.disjoint (V.frameOf (MT?.rhs mtv)) (B.frameOf (MT?.mroot mtv)))
// Since a Merkle tree satisfies regionality, it's ok to take all regions from
// a tree pointer as a location of the tree.
val mt_loc: mt_p -> GTot loc
let mt_loc mt = B.loc_all_regions_from false (B.frameOf mt)
val mt_safe_preserved:
mt:mt_p -> p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (mt_safe h0 mt /\
loc_disjoint p (mt_loc mt) /\
modifies p h0 h1))
(ensures (B.get h0 mt 0 == B.get h1 mt 0 /\
mt_safe h1 mt))
let mt_safe_preserved mt p h0 h1 =
assert (loc_includes (mt_loc mt) (B.loc_buffer mt));
let mtv = B.get h0 mt 0 in
assert (loc_includes (mt_loc mt) (RV.loc_rvector (MT?.hs mtv)));
assert (loc_includes (mt_loc mt) (RV.loc_rvector (MT?.rhs mtv)));
assert (loc_includes (mt_loc mt) (V.loc_vector (MT?.hs mtv)));
assert (loc_includes (mt_loc mt)
(B.loc_all_regions_from false (B.frameOf (MT?.mroot mtv))));
RV.rv_inv_preserved (MT?.hs mtv) p h0 h1;
RV.rv_inv_preserved (MT?.rhs mtv) p h0 h1;
Rgl?.r_sep (hreg (MT?.hash_size mtv)) (MT?.mroot mtv) p h0 h1;
V.loc_vector_within_included (MT?.hs mtv) 0ul (V.size_of (MT?.hs mtv));
mt_safe_elts_preserved 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv) p h0 h1
/// Lifting to a high-level Merkle tree structure
val mt_safe_elts_spec:
#hsz:hash_size_t ->
h:HS.mem ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t ->
j:index_t{j >= i} ->
Lemma (requires (RV.rv_inv h hs /\
mt_safe_elts #hsz h lv hs i j))
(ensures (MTH.hs_wf_elts #(U32.v hsz)
(U32.v lv) (RV.as_seq h hs)
(U32.v i) (U32.v j)))
(decreases (32 - U32.v lv))
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let rec mt_safe_elts_spec #_ h lv hs i j =
if lv = merkle_tree_size_lg then ()
else mt_safe_elts_spec h (lv + 1ul) hs (i / 2ul) (j / 2ul)
#pop-options
val merkle_tree_lift:
h:HS.mem ->
mtv:merkle_tree{
RV.rv_inv h (MT?.hs mtv) /\
RV.rv_inv h (MT?.rhs mtv) /\
Rgl?.r_inv (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv) /\
mt_safe_elts #(MT?.hash_size mtv) h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv)} ->
GTot (r:MTH.merkle_tree #(U32.v (MT?.hash_size mtv)) {MTH.mt_wf_elts #_ r})
let merkle_tree_lift h mtv =
mt_safe_elts_spec h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv);
MTH.MT #(U32.v (MT?.hash_size mtv))
(U32.v (MT?.i mtv))
(U32.v (MT?.j mtv))
(RV.as_seq h (MT?.hs mtv))
(MT?.rhs_ok mtv)
(RV.as_seq h (MT?.rhs mtv))
(Rgl?.r_repr (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv))
(Ghost.reveal (MT?.hash_spec mtv))
val mt_lift:
h:HS.mem -> mt:mt_p{mt_safe h mt} ->
GTot (r:MTH.merkle_tree #(U32.v (MT?.hash_size (B.get h mt 0))) {MTH.mt_wf_elts #_ r})
let mt_lift h mt =
merkle_tree_lift h (B.get h mt 0)
val mt_preserved:
mt:mt_p -> p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (mt_safe h0 mt /\
loc_disjoint p (mt_loc mt) /\
modifies p h0 h1))
(ensures (mt_safe_preserved mt p h0 h1;
mt_lift h0 mt == mt_lift h1 mt))
let mt_preserved mt p h0 h1 =
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(B.loc_buffer mt));
B.modifies_buffer_elim mt p h0 h1;
assert (B.get h0 mt 0 == B.get h1 mt 0);
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(RV.loc_rvector (MT?.hs (B.get h0 mt 0))));
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(RV.loc_rvector (MT?.rhs (B.get h0 mt 0))));
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(B.loc_buffer (MT?.mroot (B.get h0 mt 0))));
RV.as_seq_preserved (MT?.hs (B.get h0 mt 0)) p h0 h1;
RV.as_seq_preserved (MT?.rhs (B.get h0 mt 0)) p h0 h1;
B.modifies_buffer_elim (MT?.mroot (B.get h0 mt 0)) p h0 h1
/// Construction
// Note that the public function for creation is `mt_create` defined below,
// which builds a tree with an initial hash.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
private
val create_empty_mt:
hash_size:hash_size_t ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
r:HST.erid ->
HST.ST mt_p
(requires (fun _ -> true))
(ensures (fun h0 mt h1 ->
let dmt = B.get h1 mt 0 in
// memory safety
B.frameOf mt = r /\
modifies (mt_loc mt) h0 h1 /\
mt_safe h1 mt /\
mt_not_full h1 mt /\
// correctness
MT?.hash_size dmt = hash_size /\
MT?.offset dmt = 0UL /\
merkle_tree_lift h1 dmt == MTH.create_empty_mt #_ #(Ghost.reveal hash_spec) ()))
let create_empty_mt hsz hash_spec hash_fun r =
[@inline_let] let hrg = hreg hsz in
[@inline_let] let hvrg = hvreg hsz in
[@inline_let] let hvvrg = hvvreg hsz in
let hs_region = HST.new_region r in
let hs = RV.alloc_rid hvrg merkle_tree_size_lg hs_region in
let h0 = HST.get () in
mt_safe_elts_init #hsz h0 0ul hs;
let rhs_region = HST.new_region r in
let rhs = RV.alloc_rid hrg merkle_tree_size_lg rhs_region in
let h1 = HST.get () in
assert (RV.as_seq h1 rhs == S.create 32 (MTH.hash_init #(U32.v hsz)));
RV.rv_inv_preserved hs (V.loc_vector rhs) h0 h1;
RV.as_seq_preserved hs (V.loc_vector rhs) h0 h1;
V.loc_vector_within_included hs 0ul (V.size_of hs);
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul (V.loc_vector rhs) h0 h1;
let mroot_region = HST.new_region r in
let mroot = rg_alloc hrg mroot_region in
let h2 = HST.get () in
RV.as_seq_preserved hs loc_none h1 h2;
RV.as_seq_preserved rhs loc_none h1 h2;
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul loc_none h1 h2;
let mt = B.malloc r (MT hsz 0UL 0ul 0ul hs false rhs mroot hash_spec hash_fun) 1ul in
let h3 = HST.get () in
RV.as_seq_preserved hs loc_none h2 h3;
RV.as_seq_preserved rhs loc_none h2 h3;
Rgl?.r_sep hrg mroot loc_none h2 h3;
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul loc_none h2 h3;
mt
#pop-options
/// Destruction (free)
val mt_free: mt:mt_p ->
HST.ST unit
(requires (fun h0 -> mt_safe h0 mt))
(ensures (fun h0 _ h1 -> modifies (mt_loc mt) h0 h1))
#push-options "--z3rlimit 100"
let mt_free mt =
let mtv = !*mt in
RV.free (MT?.hs mtv);
RV.free (MT?.rhs mtv);
[@inline_let] let rg = hreg (MT?.hash_size mtv) in
rg_free rg (MT?.mroot mtv);
B.free mt
#pop-options
/// Insertion
private
val as_seq_sub_upd:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector #a #rst rg ->
i:uint32_t{i < V.size_of rv} -> v:Rgl?.repr rg ->
Lemma (requires (RV.rv_inv h rv))
(ensures (S.equal (S.upd (RV.as_seq h rv) (U32.v i) v)
(S.append
(RV.as_seq_sub h rv 0ul i)
(S.cons v (RV.as_seq_sub h rv (i + 1ul) (V.size_of rv))))))
#push-options "--z3rlimit 20"
let as_seq_sub_upd #a #rst #rg h rv i v =
Seq.Properties.slice_upd (RV.as_seq h rv) 0 (U32.v i) (U32.v i) v;
Seq.Properties.slice_upd (RV.as_seq h rv) (U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i) v;
RV.as_seq_seq_slice rg h (V.as_seq h rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i);
assert (S.equal (S.slice (RV.as_seq h rv) 0 (U32.v i))
(RV.as_seq_sub h rv 0ul i));
RV.as_seq_seq_slice rg h (V.as_seq h rv)
0 (U32.v (V.size_of rv)) (U32.v i + 1) (U32.v (V.size_of rv));
assert (S.equal (S.slice (RV.as_seq h rv) (U32.v i + 1) (U32.v (V.size_of rv)))
(RV.as_seq_sub h rv (i + 1ul) (V.size_of rv)));
assert (S.index (S.upd (RV.as_seq h rv) (U32.v i) v) (U32.v i) == v)
#pop-options
// `hash_vv_insert_copy` inserts a hash element at a level `lv`, by copying
// and pushing its content to `hs[lv]`. For detailed insertion procedure, see
// `insert_` and `mt_insert`.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1"
private
inline_for_extraction
val hash_vv_insert_copy:
#hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:Ghost.erased index_t ->
j:index_t{
Ghost.reveal i <= j &&
U32.v j < pow2 (32 - U32.v lv) - 1 &&
j < uint32_32_max} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
v:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\
Rgl?.r_inv (hreg hsz) h0 v /\
HH.disjoint (V.frameOf hs) (B.frameOf v) /\
mt_safe_elts #hsz h0 lv hs (Ghost.reveal i) j))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
h0 h1 /\
RV.rv_inv h1 hs /\
Rgl?.r_inv (hreg hsz) h1 v /\
V.size_of (V.get h1 hs lv) == j + 1ul - offset_of (Ghost.reveal i) /\
V.size_of (V.get h1 hs lv) == V.size_of (V.get h0 hs lv) + 1ul /\
mt_safe_elts #hsz h1 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul) /\
RV.rv_loc_elems h0 hs (lv + 1ul) (V.size_of hs) ==
RV.rv_loc_elems h1 hs (lv + 1ul) (V.size_of hs) /\
// correctness
(mt_safe_elts_spec #hsz h0 lv hs (Ghost.reveal i) j;
S.equal (RV.as_seq h1 hs)
(MTH.hashess_insert
(U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq h0 hs) (Rgl?.r_repr (hreg hsz) h0 v))) /\
S.equal (S.index (RV.as_seq h1 hs) (U32.v lv))
(S.snoc (S.index (RV.as_seq h0 hs) (U32.v lv))
(Rgl?.r_repr (hreg hsz) h0 v))))
let hash_vv_insert_copy #hsz lv i j hs v =
let hh0 = HST.get () in
mt_safe_elts_rec hh0 lv hs (Ghost.reveal i) j;
/// 1) Insert an element at the level `lv`, where the new vector is not yet
/// connected to `hs`.
let ihv = RV.insert_copy (hcpy hsz) (V.index hs lv) v in
let hh1 = HST.get () in
// 1-0) Basic disjointness conditions
V.forall2_forall_left hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.forall2_forall_right hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.loc_vector_within_included hs lv (lv + 1ul);
V.loc_vector_within_included hs (lv + 1ul) (V.size_of hs);
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
// 1-1) For the `modifies` postcondition.
assert (modifies (RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv)) hh0 hh1);
// 1-2) Preservation
Rgl?.r_sep (hreg hsz) v (RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
// 1-3) For `mt_safe_elts`
assert (V.size_of ihv == j + 1ul - offset_of (Ghost.reveal i)); // head updated
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1; // tail not yet
// 1-4) For the `rv_inv` postcondition
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs)) 0 (U32.v lv) (U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v lv);
RV.rv_elems_inv_preserved
hs 0ul lv (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs 0ul lv);
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs))
(U32.v lv + 1) (U32.v (V.size_of hs))
(U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
(U32.v lv + 1) (U32.v (V.size_of hs));
RV.rv_elems_inv_preserved
hs (lv + 1ul) (V.size_of hs) (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs (lv + 1ul) (V.size_of hs));
// assert (rv_itself_inv hh1 hs);
// assert (elems_reg hh1 hs);
// 1-5) Correctness
assert (S.equal (RV.as_seq hh1 ihv)
(S.snoc (RV.as_seq hh0 (V.get hh0 hs lv)) (Rgl?.r_repr (hreg hsz) hh0 v)));
/// 2) Assign the updated vector to `hs` at the level `lv`.
RV.assign hs lv ihv;
let hh2 = HST.get () in
// 2-1) For the `modifies` postcondition.
assert (modifies (V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2);
assert (modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))) hh0 hh2);
// 2-2) Preservation
Rgl?.r_sep (hreg hsz) v (RV.loc_rvector hs) hh1 hh2;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-3) For `mt_safe_elts`
assert (V.size_of (V.get hh2 hs lv) == j + 1ul - offset_of (Ghost.reveal i));
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-4) Correctness
RV.as_seq_sub_preserved hs 0ul lv (loc_rvector ihv) hh0 hh1;
RV.as_seq_sub_preserved hs (lv + 1ul) merkle_tree_size_lg (loc_rvector ihv) hh0 hh1;
assert (S.equal (RV.as_seq hh2 hs)
(S.append
(RV.as_seq_sub hh0 hs 0ul lv)
(S.cons (RV.as_seq hh1 ihv)
(RV.as_seq_sub hh0 hs (lv + 1ul) merkle_tree_size_lg))));
as_seq_sub_upd hh0 hs lv (RV.as_seq hh1 ihv)
#pop-options
private
val insert_index_helper_even:
lv:uint32_t{lv < merkle_tree_size_lg} ->
j:index_t{U32.v j < pow2 (32 - U32.v lv) - 1} ->
Lemma (requires (j % 2ul <> 1ul))
(ensures (U32.v j % 2 <> 1 /\ j / 2ul == (j + 1ul) / 2ul))
let insert_index_helper_even lv j = ()
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
private
val insert_index_helper_odd:
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:index_t ->
j:index_t{i <= j && U32.v j < pow2 (32 - U32.v lv) - 1} ->
Lemma (requires (j % 2ul = 1ul /\
j < uint32_32_max))
(ensures (U32.v j % 2 = 1 /\
U32.v (j / 2ul) < pow2 (32 - U32.v (lv + 1ul)) - 1 /\
(j + 1ul) / 2ul == j / 2ul + 1ul /\
j - offset_of i > 0ul))
let insert_index_helper_odd lv i j = ()
#pop-options
private
val loc_union_assoc_4:
a:loc -> b:loc -> c:loc -> d:loc ->
Lemma (loc_union (loc_union a b) (loc_union c d) ==
loc_union (loc_union a c) (loc_union b d))
let loc_union_assoc_4 a b c d =
loc_union_assoc (loc_union a b) c d;
loc_union_assoc a b c;
loc_union_assoc a c b;
loc_union_assoc (loc_union a c) b d
private
val insert_modifies_rec_helper:
#hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
aloc:loc ->
h:HS.mem ->
Lemma (loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
aloc)
(loc_union
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc) ==
loc_union
(loc_union
(RV.rv_loc_elems h hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
aloc)
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let insert_modifies_rec_helper #hsz lv hs aloc h =
assert (V.loc_vector_within hs lv (V.size_of hs) ==
loc_union (V.loc_vector_within hs lv (lv + 1ul))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)));
RV.rs_loc_elems_rec_inverse (hvreg hsz) (V.as_seq h hs) (U32.v lv) (U32.v (V.size_of hs));
assert (RV.rv_loc_elems h hs lv (V.size_of hs) ==
loc_union (RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs)));
// Applying some association rules...
loc_union_assoc
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))) aloc
(loc_union
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc);
loc_union_assoc
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))) aloc aloc;
loc_union_assoc
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc;
loc_union_assoc_4
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))
#pop-options
private
val insert_modifies_union_loc_weakening:
l1:loc -> l2:loc -> l3:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (modifies l1 h0 h1))
(ensures (modifies (loc_union (loc_union l1 l2) l3) h0 h1))
let insert_modifies_union_loc_weakening l1 l2 l3 h0 h1 =
B.loc_includes_union_l l1 l2 l1;
B.loc_includes_union_l (loc_union l1 l2) l3 (loc_union l1 l2)
private
val insert_snoc_last_helper:
#a:Type -> s:S.seq a{S.length s > 0} -> v:a ->
Lemma (S.index (S.snoc s v) (S.length s - 1) == S.last s)
let insert_snoc_last_helper #a s v = ()
private
val rv_inv_rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (RV.rv_inv h rv))
(ensures (RV.rv_elems_reg h rv i j))
let rv_inv_rv_elems_reg #a #rst #rg h rv i j = ()
// `insert_` recursively inserts proper hashes to each level `lv` by
// accumulating a compressed hash. For example, if there are three leaf elements
// in the tree, `insert_` will change `hs` as follow:
// (`hij` is a compressed hash from `hi` to `hj`)
//
// BEFORE INSERTION AFTER INSERTION
// lv
// 0 h0 h1 h2 ====> h0 h1 h2 h3
// 1 h01 h01 h23
// 2 h03
//
private
val insert_:
#hsz:hash_size_t ->
#hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hsz)) ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:Ghost.erased index_t ->
j:index_t{
Ghost.reveal i <= j &&
U32.v j < pow2 (32 - U32.v lv) - 1 &&
j < uint32_32_max} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
acc:hash #hsz ->
hash_fun:hash_fun_t #hsz #hash_spec ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\
Rgl?.r_inv (hreg hsz) h0 acc /\
HH.disjoint (V.frameOf hs) (B.frameOf acc) /\
mt_safe_elts h0 lv hs (Ghost.reveal i) j))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(loc_union
(RV.rv_loc_elems h0 hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
h0 h1 /\
RV.rv_inv h1 hs /\
Rgl?.r_inv (hreg hsz) h1 acc /\
mt_safe_elts h1 lv hs (Ghost.reveal i) (j + 1ul) /\
// correctness
(mt_safe_elts_spec h0 lv hs (Ghost.reveal i) j;
S.equal (RV.as_seq h1 hs)
(MTH.insert_ #(U32.v hsz) #hash_spec (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq h0 hs) (Rgl?.r_repr (hreg hsz) h0 acc)))))
(decreases (U32.v j))
#push-options "--z3rlimit 800 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
let rec insert_ #hsz #hash_spec lv i j hs acc hash_fun =
let hh0 = HST.get () in
hash_vv_insert_copy lv i j hs acc;
let hh1 = HST.get () in
// Base conditions
V.loc_vector_within_included hs lv (lv + 1ul);
V.loc_vector_within_included hs (lv + 1ul) (V.size_of hs);
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
assert (V.size_of (V.get hh1 hs lv) == j + 1ul - offset_of (Ghost.reveal i));
assert (mt_safe_elts hh1 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul));
if j % 2ul = 1ul
then (insert_index_helper_odd lv (Ghost.reveal i) j;
assert (S.length (S.index (RV.as_seq hh0 hs) (U32.v lv)) > 0);
let lvhs = V.index hs lv in
assert (U32.v (V.size_of lvhs) ==
S.length (S.index (RV.as_seq hh0 hs) (U32.v lv)) + 1);
assert (V.size_of lvhs > 1ul);
/// 3) Update the accumulator `acc`.
hash_vec_rv_inv_r_inv hh1 (V.get hh1 hs lv) (V.size_of (V.get hh1 hs lv) - 2ul);
assert (Rgl?.r_inv (hreg hsz) hh1 acc);
hash_fun (V.index lvhs (V.size_of lvhs - 2ul)) acc acc;
let hh2 = HST.get () in
// 3-1) For the `modifies` postcondition
assert (modifies (B.loc_all_regions_from false (B.frameOf acc)) hh1 hh2);
assert (modifies
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh0 hh2);
// 3-2) Preservation
RV.rv_inv_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.as_seq_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(B.loc_region_only false (B.frameOf acc)) hh1 hh2;
assert (RV.rv_inv hh2 hs);
assert (Rgl?.r_inv (hreg hsz) hh2 acc);
// 3-3) For `mt_safe_elts`
V.get_preserved hs lv
(B.loc_region_only false (B.frameOf acc)) hh1 hh2; // head preserved
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(B.loc_region_only false (B.frameOf acc)) hh1 hh2; // tail preserved
// 3-4) Correctness
insert_snoc_last_helper
(RV.as_seq hh0 (V.get hh0 hs lv))
(Rgl?.r_repr (hreg hsz) hh0 acc);
assert (S.equal (Rgl?.r_repr (hreg hsz) hh2 acc) // `nacc` in `MTH.insert_`
((Ghost.reveal hash_spec)
(S.last (S.index (RV.as_seq hh0 hs) (U32.v lv)))
(Rgl?.r_repr (hreg hsz) hh0 acc)));
/// 4) Recursion
insert_ (lv + 1ul)
(Ghost.hide (Ghost.reveal i / 2ul)) (j / 2ul)
hs acc hash_fun;
let hh3 = HST.get () in
// 4-0) Memory safety brought from the postcondition of the recursion
assert (RV.rv_inv hh3 hs);
assert (Rgl?.r_inv (hreg hsz) hh3 acc);
assert (modifies (loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh2 hh3);
assert (modifies
(loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc)))
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc))))
hh0 hh3);
// 4-1) For `mt_safe_elts`
rv_inv_rv_elems_reg hh2 hs (lv + 1ul) (V.size_of hs);
RV.rv_loc_elems_included hh2 hs (lv + 1ul) (V.size_of hs);
assert (loc_disjoint
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs)));
assert (loc_disjoint
(V.loc_vector_within hs lv (lv + 1ul))
(B.loc_all_regions_from false (B.frameOf acc)));
V.get_preserved hs lv
(loc_union
(loc_union
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh2 hh3;
assert (V.size_of (V.get hh3 hs lv) ==
j + 1ul - offset_of (Ghost.reveal i)); // head preserved
assert (mt_safe_elts hh3 (lv + 1ul) hs
(Ghost.reveal i / 2ul) (j / 2ul + 1ul)); // tail by recursion
mt_safe_elts_constr hh3 lv hs (Ghost.reveal i) (j + 1ul);
assert (mt_safe_elts hh3 lv hs (Ghost.reveal i) (j + 1ul));
// 4-2) Correctness
mt_safe_elts_spec hh2 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul);
assert (S.equal (RV.as_seq hh3 hs)
(MTH.insert_ #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv + 1) (U32.v (Ghost.reveal i) / 2) (U32.v j / 2)
(RV.as_seq hh2 hs) (Rgl?.r_repr (hreg hsz) hh2 acc)));
mt_safe_elts_spec hh0 lv hs (Ghost.reveal i) j;
MTH.insert_rec #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc);
assert (S.equal (RV.as_seq hh3 hs)
(MTH.insert_ #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc))))
else (insert_index_helper_even lv j;
// memory safety
assert (mt_safe_elts hh1 (lv + 1ul) hs (Ghost.reveal i / 2ul) ((j + 1ul) / 2ul));
mt_safe_elts_constr hh1 lv hs (Ghost.reveal i) (j + 1ul);
assert (mt_safe_elts hh1 lv hs (Ghost.reveal i) (j + 1ul));
assert (modifies
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
hh0 hh1);
insert_modifies_union_loc_weakening
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc))
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh0 hh1;
// correctness
mt_safe_elts_spec hh0 lv hs (Ghost.reveal i) j;
MTH.insert_base #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc);
assert (S.equal (RV.as_seq hh1 hs)
(MTH.insert_ #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc))));
/// 5) Proving the postcondition after recursion
let hh4 = HST.get () in
// 5-1) For the `modifies` postcondition.
assert (modifies
(loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc)))
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc))))
hh0 hh4);
insert_modifies_rec_helper
lv hs (B.loc_all_regions_from false (B.frameOf acc)) hh0;
// 5-2) For `mt_safe_elts`
assert (mt_safe_elts hh4 lv hs (Ghost.reveal i) (j + 1ul));
// 5-3) Preservation
assert (RV.rv_inv hh4 hs);
assert (Rgl?.r_inv (hreg hsz) hh4 acc);
// 5-4) Correctness
mt_safe_elts_spec hh0 lv hs (Ghost.reveal i) j;
assert (S.equal (RV.as_seq hh4 hs)
(MTH.insert_ #(U32.v hsz) #hash_spec (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc))) // QED
#pop-options
private inline_for_extraction | false | false | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mt_insert_pre_nst: mtv:merkle_tree -> v:hash #(MT?.hash_size mtv) -> Tot bool | [] | MerkleTree.Low.mt_insert_pre_nst | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} | mtv: MerkleTree.Low.merkle_tree -> v: MerkleTree.Low.Datastructures.hash -> Prims.bool | {
"end_col": 100,
"end_line": 927,
"start_col": 30,
"start_line": 927
} |
Prims.Tot | val add64_fits (x: offset_t) (i: index_t) : Tot bool | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let add64_fits (x:offset_t) (i:index_t): Tot bool = uint64_max - x >= (u32_64 i) | val add64_fits (x: offset_t) (i: index_t) : Tot bool
let add64_fits (x: offset_t) (i: index_t) : Tot bool = | false | null | false | uint64_max - x >= (u32_64 i) | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"total"
] | [
"MerkleTree.Low.offset_t",
"MerkleTree.Low.index_t",
"FStar.Integers.op_Greater_Equals",
"FStar.Integers.Unsigned",
"FStar.Integers.W64",
"FStar.Integers.op_Subtraction",
"MerkleTree.Low.uint64_max",
"MerkleTree.Low.u32_64",
"Prims.bool"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction
let uint32_max = 4294967295UL
let uint64_max = 18446744073709551615UL
let offset_range_limit = uint32_max
type offset_t = uint64_t
inline_for_extraction noextract unfold let u32_64 = Int.Cast.uint32_to_uint64
inline_for_extraction noextract unfold let u64_32 = Int.Cast.uint64_to_uint32
private inline_for_extraction
let offsets_connect (x:offset_t) (y:offset_t): Tot bool = y >= x && (y - x) <= offset_range_limit
private inline_for_extraction
let split_offset (tree:offset_t) (index:offset_t{offsets_connect tree index}): Tot index_t =
[@inline_let] let diff = U64.sub_mod index tree in
assert (diff <= offset_range_limit);
Int.Cast.uint64_to_uint32 diff | false | true | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val add64_fits (x: offset_t) (i: index_t) : Tot bool | [] | MerkleTree.Low.add64_fits | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} | x: MerkleTree.Low.offset_t -> i: MerkleTree.Low.index_t -> Prims.bool | {
"end_col": 80,
"end_line": 71,
"start_col": 52,
"start_line": 71
} |
Prims.Tot | val offset_of: i:index_t -> Tot index_t | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let offset_of i = if i % 2ul = 0ul then i else i - 1ul | val offset_of: i:index_t -> Tot index_t
let offset_of i = | false | null | false | if i % 2ul = 0ul then i else i - 1ul | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"total"
] | [
"MerkleTree.Low.index_t",
"Prims.op_Equality",
"FStar.UInt32.t",
"FStar.Integers.op_Percent",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"FStar.UInt32.__uint_to_t",
"Prims.bool",
"FStar.Integers.op_Subtraction"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction
let uint32_max = 4294967295UL
let uint64_max = 18446744073709551615UL
let offset_range_limit = uint32_max
type offset_t = uint64_t
inline_for_extraction noextract unfold let u32_64 = Int.Cast.uint32_to_uint64
inline_for_extraction noextract unfold let u64_32 = Int.Cast.uint64_to_uint32
private inline_for_extraction
let offsets_connect (x:offset_t) (y:offset_t): Tot bool = y >= x && (y - x) <= offset_range_limit
private inline_for_extraction
let split_offset (tree:offset_t) (index:offset_t{offsets_connect tree index}): Tot index_t =
[@inline_let] let diff = U64.sub_mod index tree in
assert (diff <= offset_range_limit);
Int.Cast.uint64_to_uint32 diff
private inline_for_extraction
let add64_fits (x:offset_t) (i:index_t): Tot bool = uint64_max - x >= (u32_64 i)
private inline_for_extraction
let join_offset (tree:offset_t) (i:index_t{add64_fits tree i}): Tot (r:offset_t{offsets_connect tree r}) =
U64.add tree (u32_64 i)
inline_for_extraction val merkle_tree_size_lg: uint32_t
let merkle_tree_size_lg = 32ul
// A Merkle tree `MT i j hs rhs_ok rhs` stores all necessary hashes to generate
// a Merkle path for each element from the index `i` to `j-1`.
// - Parameters
// `hs`: a 2-dim store for hashes, where `hs[0]` contains leaf hash values.
// `rhs_ok`: to check the rightmost hashes are up-to-date
// `rhs`: a store for "rightmost" hashes, manipulated only when required to
// calculate some merkle paths that need the rightmost hashes
// as a part of them.
// `mroot`: during the construction of `rhs` we can also calculate the Merkle
// root of the tree. If `rhs_ok` is true then it has the up-to-date
// root value.
noeq type merkle_tree =
| MT: hash_size:hash_size_t ->
offset:offset_t ->
i:index_t -> j:index_t{i <= j /\ add64_fits offset j} ->
hs:hash_vv hash_size {V.size_of hs = merkle_tree_size_lg} ->
rhs_ok:bool ->
rhs:hash_vec #hash_size {V.size_of rhs = merkle_tree_size_lg} ->
mroot:hash #hash_size ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
merkle_tree
type mt_p = B.pointer merkle_tree
type const_mt_p = const_pointer merkle_tree
inline_for_extraction
let merkle_tree_conditions (#hsz:Ghost.erased hash_size_t) (offset:uint64_t) (i j:uint32_t) (hs:hash_vv hsz) (rhs_ok:bool) (rhs:hash_vec #hsz) (mroot:hash #hsz): Tot bool =
j >= i && add64_fits offset j &&
V.size_of hs = merkle_tree_size_lg &&
V.size_of rhs = merkle_tree_size_lg
// The maximum number of currently held elements in the tree is (2^32 - 1).
// cwinter: even when using 64-bit indices, we fail if the underlying 32-bit
// vector is full; this can be fixed if necessary.
private inline_for_extraction
val mt_not_full_nst: mtv:merkle_tree -> Tot bool
let mt_not_full_nst mtv = MT?.j mtv < uint32_32_max
val mt_not_full: HS.mem -> mt_p -> GTot bool
let mt_not_full h mt = mt_not_full_nst (B.get h mt 0)
/// (Memory) Safety | false | true | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val offset_of: i:index_t -> Tot index_t | [] | MerkleTree.Low.offset_of | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} | i: MerkleTree.Low.index_t -> MerkleTree.Low.index_t | {
"end_col": 54,
"end_line": 125,
"start_col": 18,
"start_line": 125
} |
Prims.GTot | val mt_safe_elts:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
GTot Type0 (decreases (32 - U32.v lv)) | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec mt_safe_elts #hsz h lv hs i j =
if lv = merkle_tree_size_lg then true
else (let ofs = offset_of i in
V.size_of (V.get h hs lv) == j - ofs /\
mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)) | val mt_safe_elts:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
GTot Type0 (decreases (32 - U32.v lv))
let rec mt_safe_elts #hsz h lv hs i j = | false | null | false | if lv = merkle_tree_size_lg
then true
else
(let ofs = offset_of i in
V.size_of (V.get h hs lv) == j - ofs /\ mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)) | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"sometrivial",
""
] | [
"MerkleTree.Low.Datastructures.hash_size_t",
"FStar.Monotonic.HyperStack.mem",
"LowStar.Vector.uint32_t",
"Prims.b2t",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Unsigned",
"FStar.Integers.W32",
"MerkleTree.Low.merkle_tree_size_lg",
"MerkleTree.Low.Datastructures.hash_vv",
"Prims.op_Equality",
"LowStar.Vector.size_of",
"MerkleTree.Low.Datastructures.hash_vec",
"MerkleTree.Low.index_t",
"FStar.Integers.op_Greater_Equals",
"Prims.bool",
"Prims.l_and",
"Prims.eq2",
"FStar.UInt32.t",
"MerkleTree.Low.Datastructures.hash",
"LowStar.Vector.get",
"FStar.Integers.op_Subtraction",
"MerkleTree.Low.mt_safe_elts",
"FStar.Integers.op_Plus",
"FStar.UInt32.__uint_to_t",
"FStar.Integers.op_Slash",
"MerkleTree.Low.offset_of"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction
let uint32_max = 4294967295UL
let uint64_max = 18446744073709551615UL
let offset_range_limit = uint32_max
type offset_t = uint64_t
inline_for_extraction noextract unfold let u32_64 = Int.Cast.uint32_to_uint64
inline_for_extraction noextract unfold let u64_32 = Int.Cast.uint64_to_uint32
private inline_for_extraction
let offsets_connect (x:offset_t) (y:offset_t): Tot bool = y >= x && (y - x) <= offset_range_limit
private inline_for_extraction
let split_offset (tree:offset_t) (index:offset_t{offsets_connect tree index}): Tot index_t =
[@inline_let] let diff = U64.sub_mod index tree in
assert (diff <= offset_range_limit);
Int.Cast.uint64_to_uint32 diff
private inline_for_extraction
let add64_fits (x:offset_t) (i:index_t): Tot bool = uint64_max - x >= (u32_64 i)
private inline_for_extraction
let join_offset (tree:offset_t) (i:index_t{add64_fits tree i}): Tot (r:offset_t{offsets_connect tree r}) =
U64.add tree (u32_64 i)
inline_for_extraction val merkle_tree_size_lg: uint32_t
let merkle_tree_size_lg = 32ul
// A Merkle tree `MT i j hs rhs_ok rhs` stores all necessary hashes to generate
// a Merkle path for each element from the index `i` to `j-1`.
// - Parameters
// `hs`: a 2-dim store for hashes, where `hs[0]` contains leaf hash values.
// `rhs_ok`: to check the rightmost hashes are up-to-date
// `rhs`: a store for "rightmost" hashes, manipulated only when required to
// calculate some merkle paths that need the rightmost hashes
// as a part of them.
// `mroot`: during the construction of `rhs` we can also calculate the Merkle
// root of the tree. If `rhs_ok` is true then it has the up-to-date
// root value.
noeq type merkle_tree =
| MT: hash_size:hash_size_t ->
offset:offset_t ->
i:index_t -> j:index_t{i <= j /\ add64_fits offset j} ->
hs:hash_vv hash_size {V.size_of hs = merkle_tree_size_lg} ->
rhs_ok:bool ->
rhs:hash_vec #hash_size {V.size_of rhs = merkle_tree_size_lg} ->
mroot:hash #hash_size ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
merkle_tree
type mt_p = B.pointer merkle_tree
type const_mt_p = const_pointer merkle_tree
inline_for_extraction
let merkle_tree_conditions (#hsz:Ghost.erased hash_size_t) (offset:uint64_t) (i j:uint32_t) (hs:hash_vv hsz) (rhs_ok:bool) (rhs:hash_vec #hsz) (mroot:hash #hsz): Tot bool =
j >= i && add64_fits offset j &&
V.size_of hs = merkle_tree_size_lg &&
V.size_of rhs = merkle_tree_size_lg
// The maximum number of currently held elements in the tree is (2^32 - 1).
// cwinter: even when using 64-bit indices, we fail if the underlying 32-bit
// vector is full; this can be fixed if necessary.
private inline_for_extraction
val mt_not_full_nst: mtv:merkle_tree -> Tot bool
let mt_not_full_nst mtv = MT?.j mtv < uint32_32_max
val mt_not_full: HS.mem -> mt_p -> GTot bool
let mt_not_full h mt = mt_not_full_nst (B.get h mt 0)
/// (Memory) Safety
val offset_of: i:index_t -> Tot index_t
let offset_of i = if i % 2ul = 0ul then i else i - 1ul
// `mt_safe_elts` says that it is safe to access an element from `i` to `j - 1`
// at level `lv` in the Merkle tree, i.e., hs[lv][k] (i <= k < j) is a valid
// element.
inline_for_extraction noextract
val mt_safe_elts:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
GTot Type0 (decreases (32 - U32.v lv)) | false | false | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mt_safe_elts:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
GTot Type0 (decreases (32 - U32.v lv)) | [
"recursion"
] | MerkleTree.Low.mt_safe_elts | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} |
h: FStar.Monotonic.HyperStack.mem ->
lv: LowStar.Vector.uint32_t{lv <= MerkleTree.Low.merkle_tree_size_lg} ->
hs:
MerkleTree.Low.Datastructures.hash_vv hsz
{LowStar.Vector.size_of hs = MerkleTree.Low.merkle_tree_size_lg} ->
i: MerkleTree.Low.index_t ->
j: MerkleTree.Low.index_t{j >= i}
-> Prims.GTot Type0 | {
"end_col": 61,
"end_line": 141,
"start_col": 2,
"start_line": 138
} |
Prims.Tot | val join_offset (tree: offset_t) (i: index_t{add64_fits tree i})
: Tot (r: offset_t{offsets_connect tree r}) | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let join_offset (tree:offset_t) (i:index_t{add64_fits tree i}): Tot (r:offset_t{offsets_connect tree r}) =
U64.add tree (u32_64 i) | val join_offset (tree: offset_t) (i: index_t{add64_fits tree i})
: Tot (r: offset_t{offsets_connect tree r})
let join_offset (tree: offset_t) (i: index_t{add64_fits tree i})
: Tot (r: offset_t{offsets_connect tree r}) = | false | null | false | U64.add tree (u32_64 i) | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"total"
] | [
"MerkleTree.Low.offset_t",
"MerkleTree.Low.index_t",
"Prims.b2t",
"MerkleTree.Low.add64_fits",
"FStar.UInt64.add",
"MerkleTree.Low.u32_64",
"MerkleTree.Low.offsets_connect"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction
let uint32_max = 4294967295UL
let uint64_max = 18446744073709551615UL
let offset_range_limit = uint32_max
type offset_t = uint64_t
inline_for_extraction noextract unfold let u32_64 = Int.Cast.uint32_to_uint64
inline_for_extraction noextract unfold let u64_32 = Int.Cast.uint64_to_uint32
private inline_for_extraction
let offsets_connect (x:offset_t) (y:offset_t): Tot bool = y >= x && (y - x) <= offset_range_limit
private inline_for_extraction
let split_offset (tree:offset_t) (index:offset_t{offsets_connect tree index}): Tot index_t =
[@inline_let] let diff = U64.sub_mod index tree in
assert (diff <= offset_range_limit);
Int.Cast.uint64_to_uint32 diff
private inline_for_extraction
let add64_fits (x:offset_t) (i:index_t): Tot bool = uint64_max - x >= (u32_64 i)
private inline_for_extraction | false | false | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val join_offset (tree: offset_t) (i: index_t{add64_fits tree i})
: Tot (r: offset_t{offsets_connect tree r}) | [] | MerkleTree.Low.join_offset | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} | tree: MerkleTree.Low.offset_t -> i: MerkleTree.Low.index_t{MerkleTree.Low.add64_fits tree i}
-> r: MerkleTree.Low.offset_t{MerkleTree.Low.offsets_connect tree r} | {
"end_col": 25,
"end_line": 75,
"start_col": 2,
"start_line": 75
} |
Prims.Tot | val split_offset (tree: offset_t) (index: offset_t{offsets_connect tree index}) : Tot index_t | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let split_offset (tree:offset_t) (index:offset_t{offsets_connect tree index}): Tot index_t =
[@inline_let] let diff = U64.sub_mod index tree in
assert (diff <= offset_range_limit);
Int.Cast.uint64_to_uint32 diff | val split_offset (tree: offset_t) (index: offset_t{offsets_connect tree index}) : Tot index_t
let split_offset (tree: offset_t) (index: offset_t{offsets_connect tree index}) : Tot index_t = | false | null | false | [@@ inline_let ]let diff = U64.sub_mod index tree in
assert (diff <= offset_range_limit);
Int.Cast.uint64_to_uint32 diff | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"total"
] | [
"MerkleTree.Low.offset_t",
"Prims.b2t",
"MerkleTree.Low.offsets_connect",
"FStar.Int.Cast.uint64_to_uint32",
"Prims.unit",
"Prims._assert",
"FStar.Integers.op_Less_Equals",
"FStar.Integers.Unsigned",
"FStar.Integers.W64",
"MerkleTree.Low.offset_range_limit",
"FStar.UInt64.t",
"FStar.UInt64.sub_mod",
"MerkleTree.Low.index_t"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction
let uint32_max = 4294967295UL
let uint64_max = 18446744073709551615UL
let offset_range_limit = uint32_max
type offset_t = uint64_t
inline_for_extraction noextract unfold let u32_64 = Int.Cast.uint32_to_uint64
inline_for_extraction noextract unfold let u64_32 = Int.Cast.uint64_to_uint32
private inline_for_extraction
let offsets_connect (x:offset_t) (y:offset_t): Tot bool = y >= x && (y - x) <= offset_range_limit
private inline_for_extraction | false | false | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val split_offset (tree: offset_t) (index: offset_t{offsets_connect tree index}) : Tot index_t | [] | MerkleTree.Low.split_offset | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} |
tree: MerkleTree.Low.offset_t ->
index: MerkleTree.Low.offset_t{MerkleTree.Low.offsets_connect tree index}
-> MerkleTree.Low.index_t | {
"end_col": 32,
"end_line": 68,
"start_col": 2,
"start_line": 66
} |
Prims.GTot | val phashes (h: HS.mem) (p: path_p) : GTot (V.vector (hash #(Path?.hash_size (B.get h p 0)))) | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let phashes (h:HS.mem) (p:path_p)
: GTot (V.vector (hash #(Path?.hash_size (B.get h p 0))))
= Path?.hashes (B.get h p 0) | val phashes (h: HS.mem) (p: path_p) : GTot (V.vector (hash #(Path?.hash_size (B.get h p 0))))
let phashes (h: HS.mem) (p: path_p) : GTot (V.vector (hash #(Path?.hash_size (B.get h p 0)))) = | false | null | false | Path?.hashes (B.get h p 0) | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"sometrivial"
] | [
"FStar.Monotonic.HyperStack.mem",
"MerkleTree.Low.path_p",
"MerkleTree.Low.__proj__Path__item__hashes",
"LowStar.Monotonic.Buffer.get",
"MerkleTree.Low.path",
"LowStar.Buffer.trivial_preorder",
"LowStar.Vector.vector",
"MerkleTree.Low.Datastructures.hash",
"MerkleTree.Low.__proj__Path__item__hash_size"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction
let uint32_max = 4294967295UL
let uint64_max = 18446744073709551615UL
let offset_range_limit = uint32_max
type offset_t = uint64_t
inline_for_extraction noextract unfold let u32_64 = Int.Cast.uint32_to_uint64
inline_for_extraction noextract unfold let u64_32 = Int.Cast.uint64_to_uint32
private inline_for_extraction
let offsets_connect (x:offset_t) (y:offset_t): Tot bool = y >= x && (y - x) <= offset_range_limit
private inline_for_extraction
let split_offset (tree:offset_t) (index:offset_t{offsets_connect tree index}): Tot index_t =
[@inline_let] let diff = U64.sub_mod index tree in
assert (diff <= offset_range_limit);
Int.Cast.uint64_to_uint32 diff
private inline_for_extraction
let add64_fits (x:offset_t) (i:index_t): Tot bool = uint64_max - x >= (u32_64 i)
private inline_for_extraction
let join_offset (tree:offset_t) (i:index_t{add64_fits tree i}): Tot (r:offset_t{offsets_connect tree r}) =
U64.add tree (u32_64 i)
inline_for_extraction val merkle_tree_size_lg: uint32_t
let merkle_tree_size_lg = 32ul
// A Merkle tree `MT i j hs rhs_ok rhs` stores all necessary hashes to generate
// a Merkle path for each element from the index `i` to `j-1`.
// - Parameters
// `hs`: a 2-dim store for hashes, where `hs[0]` contains leaf hash values.
// `rhs_ok`: to check the rightmost hashes are up-to-date
// `rhs`: a store for "rightmost" hashes, manipulated only when required to
// calculate some merkle paths that need the rightmost hashes
// as a part of them.
// `mroot`: during the construction of `rhs` we can also calculate the Merkle
// root of the tree. If `rhs_ok` is true then it has the up-to-date
// root value.
noeq type merkle_tree =
| MT: hash_size:hash_size_t ->
offset:offset_t ->
i:index_t -> j:index_t{i <= j /\ add64_fits offset j} ->
hs:hash_vv hash_size {V.size_of hs = merkle_tree_size_lg} ->
rhs_ok:bool ->
rhs:hash_vec #hash_size {V.size_of rhs = merkle_tree_size_lg} ->
mroot:hash #hash_size ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
merkle_tree
type mt_p = B.pointer merkle_tree
type const_mt_p = const_pointer merkle_tree
inline_for_extraction
let merkle_tree_conditions (#hsz:Ghost.erased hash_size_t) (offset:uint64_t) (i j:uint32_t) (hs:hash_vv hsz) (rhs_ok:bool) (rhs:hash_vec #hsz) (mroot:hash #hsz): Tot bool =
j >= i && add64_fits offset j &&
V.size_of hs = merkle_tree_size_lg &&
V.size_of rhs = merkle_tree_size_lg
// The maximum number of currently held elements in the tree is (2^32 - 1).
// cwinter: even when using 64-bit indices, we fail if the underlying 32-bit
// vector is full; this can be fixed if necessary.
private inline_for_extraction
val mt_not_full_nst: mtv:merkle_tree -> Tot bool
let mt_not_full_nst mtv = MT?.j mtv < uint32_32_max
val mt_not_full: HS.mem -> mt_p -> GTot bool
let mt_not_full h mt = mt_not_full_nst (B.get h mt 0)
/// (Memory) Safety
val offset_of: i:index_t -> Tot index_t
let offset_of i = if i % 2ul = 0ul then i else i - 1ul
// `mt_safe_elts` says that it is safe to access an element from `i` to `j - 1`
// at level `lv` in the Merkle tree, i.e., hs[lv][k] (i <= k < j) is a valid
// element.
inline_for_extraction noextract
val mt_safe_elts:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
GTot Type0 (decreases (32 - U32.v lv))
let rec mt_safe_elts #hsz h lv hs i j =
if lv = merkle_tree_size_lg then true
else (let ofs = offset_of i in
V.size_of (V.get h hs lv) == j - ofs /\
mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul))
#push-options "--initial_fuel 1 --max_fuel 1"
val mt_safe_elts_constr:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (V.size_of (V.get h hs lv) == j - offset_of i /\
mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)))
(ensures (mt_safe_elts #hsz h lv hs i j))
let mt_safe_elts_constr #_ h lv hs i j = ()
val mt_safe_elts_head:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (mt_safe_elts #hsz h lv hs i j))
(ensures (V.size_of (V.get h hs lv) == j - offset_of i))
let mt_safe_elts_head #_ h lv hs i j = ()
val mt_safe_elts_rec:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (mt_safe_elts #hsz h lv hs i j))
(ensures (mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)))
let mt_safe_elts_rec #_ h lv hs i j = ()
val mt_safe_elts_init:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
Lemma (requires (V.forall_ h hs lv (V.size_of hs)
(fun hv -> V.size_of hv = 0ul)))
(ensures (mt_safe_elts #hsz h lv hs 0ul 0ul))
(decreases (32 - U32.v lv))
let rec mt_safe_elts_init #hsz h lv hs =
if lv = merkle_tree_size_lg then ()
else mt_safe_elts_init #hsz h (lv + 1ul) hs
#pop-options
val mt_safe_elts_preserved:
#hsz:hash_size_t ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 hs /\
mt_safe_elts #hsz h0 lv hs i j /\
loc_disjoint p (V.loc_vector_within hs lv (V.size_of hs)) /\
modifies p h0 h1))
(ensures (mt_safe_elts #hsz h1 lv hs i j))
(decreases (32 - U32.v lv))
[SMTPat (V.live h0 hs);
SMTPat (mt_safe_elts #hsz h0 lv hs i j);
SMTPat (loc_disjoint p (RV.loc_rvector hs));
SMTPat (modifies p h0 h1)]
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let rec mt_safe_elts_preserved #hsz lv hs i j p h0 h1 =
if lv = merkle_tree_size_lg then ()
else (V.get_preserved hs lv p h0 h1;
mt_safe_elts_preserved #hsz (lv + 1ul) hs (i / 2ul) (j / 2ul) p h0 h1)
#pop-options
// `mt_safe` is the invariant of a Merkle tree through its lifetime.
// It includes liveness, regionality, disjointness (to each data structure),
// and valid element access (`mt_safe_elts`).
inline_for_extraction noextract
val mt_safe: HS.mem -> mt_p -> GTot Type0
let mt_safe h mt =
B.live h mt /\ B.freeable mt /\
(let mtv = B.get h mt 0 in
// Liveness & Accessibility
RV.rv_inv h (MT?.hs mtv) /\
RV.rv_inv h (MT?.rhs mtv) /\
Rgl?.r_inv (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv) /\
mt_safe_elts h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv) /\
// Regionality
HH.extends (V.frameOf (MT?.hs mtv)) (B.frameOf mt) /\
HH.extends (V.frameOf (MT?.rhs mtv)) (B.frameOf mt) /\
HH.extends (B.frameOf (MT?.mroot mtv)) (B.frameOf mt) /\
HH.disjoint (V.frameOf (MT?.hs mtv)) (V.frameOf (MT?.rhs mtv)) /\
HH.disjoint (V.frameOf (MT?.hs mtv)) (B.frameOf (MT?.mroot mtv)) /\
HH.disjoint (V.frameOf (MT?.rhs mtv)) (B.frameOf (MT?.mroot mtv)))
// Since a Merkle tree satisfies regionality, it's ok to take all regions from
// a tree pointer as a location of the tree.
val mt_loc: mt_p -> GTot loc
let mt_loc mt = B.loc_all_regions_from false (B.frameOf mt)
val mt_safe_preserved:
mt:mt_p -> p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (mt_safe h0 mt /\
loc_disjoint p (mt_loc mt) /\
modifies p h0 h1))
(ensures (B.get h0 mt 0 == B.get h1 mt 0 /\
mt_safe h1 mt))
let mt_safe_preserved mt p h0 h1 =
assert (loc_includes (mt_loc mt) (B.loc_buffer mt));
let mtv = B.get h0 mt 0 in
assert (loc_includes (mt_loc mt) (RV.loc_rvector (MT?.hs mtv)));
assert (loc_includes (mt_loc mt) (RV.loc_rvector (MT?.rhs mtv)));
assert (loc_includes (mt_loc mt) (V.loc_vector (MT?.hs mtv)));
assert (loc_includes (mt_loc mt)
(B.loc_all_regions_from false (B.frameOf (MT?.mroot mtv))));
RV.rv_inv_preserved (MT?.hs mtv) p h0 h1;
RV.rv_inv_preserved (MT?.rhs mtv) p h0 h1;
Rgl?.r_sep (hreg (MT?.hash_size mtv)) (MT?.mroot mtv) p h0 h1;
V.loc_vector_within_included (MT?.hs mtv) 0ul (V.size_of (MT?.hs mtv));
mt_safe_elts_preserved 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv) p h0 h1
/// Lifting to a high-level Merkle tree structure
val mt_safe_elts_spec:
#hsz:hash_size_t ->
h:HS.mem ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t ->
j:index_t{j >= i} ->
Lemma (requires (RV.rv_inv h hs /\
mt_safe_elts #hsz h lv hs i j))
(ensures (MTH.hs_wf_elts #(U32.v hsz)
(U32.v lv) (RV.as_seq h hs)
(U32.v i) (U32.v j)))
(decreases (32 - U32.v lv))
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let rec mt_safe_elts_spec #_ h lv hs i j =
if lv = merkle_tree_size_lg then ()
else mt_safe_elts_spec h (lv + 1ul) hs (i / 2ul) (j / 2ul)
#pop-options
val merkle_tree_lift:
h:HS.mem ->
mtv:merkle_tree{
RV.rv_inv h (MT?.hs mtv) /\
RV.rv_inv h (MT?.rhs mtv) /\
Rgl?.r_inv (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv) /\
mt_safe_elts #(MT?.hash_size mtv) h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv)} ->
GTot (r:MTH.merkle_tree #(U32.v (MT?.hash_size mtv)) {MTH.mt_wf_elts #_ r})
let merkle_tree_lift h mtv =
mt_safe_elts_spec h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv);
MTH.MT #(U32.v (MT?.hash_size mtv))
(U32.v (MT?.i mtv))
(U32.v (MT?.j mtv))
(RV.as_seq h (MT?.hs mtv))
(MT?.rhs_ok mtv)
(RV.as_seq h (MT?.rhs mtv))
(Rgl?.r_repr (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv))
(Ghost.reveal (MT?.hash_spec mtv))
val mt_lift:
h:HS.mem -> mt:mt_p{mt_safe h mt} ->
GTot (r:MTH.merkle_tree #(U32.v (MT?.hash_size (B.get h mt 0))) {MTH.mt_wf_elts #_ r})
let mt_lift h mt =
merkle_tree_lift h (B.get h mt 0)
val mt_preserved:
mt:mt_p -> p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (mt_safe h0 mt /\
loc_disjoint p (mt_loc mt) /\
modifies p h0 h1))
(ensures (mt_safe_preserved mt p h0 h1;
mt_lift h0 mt == mt_lift h1 mt))
let mt_preserved mt p h0 h1 =
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(B.loc_buffer mt));
B.modifies_buffer_elim mt p h0 h1;
assert (B.get h0 mt 0 == B.get h1 mt 0);
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(RV.loc_rvector (MT?.hs (B.get h0 mt 0))));
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(RV.loc_rvector (MT?.rhs (B.get h0 mt 0))));
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(B.loc_buffer (MT?.mroot (B.get h0 mt 0))));
RV.as_seq_preserved (MT?.hs (B.get h0 mt 0)) p h0 h1;
RV.as_seq_preserved (MT?.rhs (B.get h0 mt 0)) p h0 h1;
B.modifies_buffer_elim (MT?.mroot (B.get h0 mt 0)) p h0 h1
/// Construction
// Note that the public function for creation is `mt_create` defined below,
// which builds a tree with an initial hash.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
private
val create_empty_mt:
hash_size:hash_size_t ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
r:HST.erid ->
HST.ST mt_p
(requires (fun _ -> true))
(ensures (fun h0 mt h1 ->
let dmt = B.get h1 mt 0 in
// memory safety
B.frameOf mt = r /\
modifies (mt_loc mt) h0 h1 /\
mt_safe h1 mt /\
mt_not_full h1 mt /\
// correctness
MT?.hash_size dmt = hash_size /\
MT?.offset dmt = 0UL /\
merkle_tree_lift h1 dmt == MTH.create_empty_mt #_ #(Ghost.reveal hash_spec) ()))
let create_empty_mt hsz hash_spec hash_fun r =
[@inline_let] let hrg = hreg hsz in
[@inline_let] let hvrg = hvreg hsz in
[@inline_let] let hvvrg = hvvreg hsz in
let hs_region = HST.new_region r in
let hs = RV.alloc_rid hvrg merkle_tree_size_lg hs_region in
let h0 = HST.get () in
mt_safe_elts_init #hsz h0 0ul hs;
let rhs_region = HST.new_region r in
let rhs = RV.alloc_rid hrg merkle_tree_size_lg rhs_region in
let h1 = HST.get () in
assert (RV.as_seq h1 rhs == S.create 32 (MTH.hash_init #(U32.v hsz)));
RV.rv_inv_preserved hs (V.loc_vector rhs) h0 h1;
RV.as_seq_preserved hs (V.loc_vector rhs) h0 h1;
V.loc_vector_within_included hs 0ul (V.size_of hs);
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul (V.loc_vector rhs) h0 h1;
let mroot_region = HST.new_region r in
let mroot = rg_alloc hrg mroot_region in
let h2 = HST.get () in
RV.as_seq_preserved hs loc_none h1 h2;
RV.as_seq_preserved rhs loc_none h1 h2;
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul loc_none h1 h2;
let mt = B.malloc r (MT hsz 0UL 0ul 0ul hs false rhs mroot hash_spec hash_fun) 1ul in
let h3 = HST.get () in
RV.as_seq_preserved hs loc_none h2 h3;
RV.as_seq_preserved rhs loc_none h2 h3;
Rgl?.r_sep hrg mroot loc_none h2 h3;
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul loc_none h2 h3;
mt
#pop-options
/// Destruction (free)
val mt_free: mt:mt_p ->
HST.ST unit
(requires (fun h0 -> mt_safe h0 mt))
(ensures (fun h0 _ h1 -> modifies (mt_loc mt) h0 h1))
#push-options "--z3rlimit 100"
let mt_free mt =
let mtv = !*mt in
RV.free (MT?.hs mtv);
RV.free (MT?.rhs mtv);
[@inline_let] let rg = hreg (MT?.hash_size mtv) in
rg_free rg (MT?.mroot mtv);
B.free mt
#pop-options
/// Insertion
private
val as_seq_sub_upd:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector #a #rst rg ->
i:uint32_t{i < V.size_of rv} -> v:Rgl?.repr rg ->
Lemma (requires (RV.rv_inv h rv))
(ensures (S.equal (S.upd (RV.as_seq h rv) (U32.v i) v)
(S.append
(RV.as_seq_sub h rv 0ul i)
(S.cons v (RV.as_seq_sub h rv (i + 1ul) (V.size_of rv))))))
#push-options "--z3rlimit 20"
let as_seq_sub_upd #a #rst #rg h rv i v =
Seq.Properties.slice_upd (RV.as_seq h rv) 0 (U32.v i) (U32.v i) v;
Seq.Properties.slice_upd (RV.as_seq h rv) (U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i) v;
RV.as_seq_seq_slice rg h (V.as_seq h rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i);
assert (S.equal (S.slice (RV.as_seq h rv) 0 (U32.v i))
(RV.as_seq_sub h rv 0ul i));
RV.as_seq_seq_slice rg h (V.as_seq h rv)
0 (U32.v (V.size_of rv)) (U32.v i + 1) (U32.v (V.size_of rv));
assert (S.equal (S.slice (RV.as_seq h rv) (U32.v i + 1) (U32.v (V.size_of rv)))
(RV.as_seq_sub h rv (i + 1ul) (V.size_of rv)));
assert (S.index (S.upd (RV.as_seq h rv) (U32.v i) v) (U32.v i) == v)
#pop-options
// `hash_vv_insert_copy` inserts a hash element at a level `lv`, by copying
// and pushing its content to `hs[lv]`. For detailed insertion procedure, see
// `insert_` and `mt_insert`.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1"
private
inline_for_extraction
val hash_vv_insert_copy:
#hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:Ghost.erased index_t ->
j:index_t{
Ghost.reveal i <= j &&
U32.v j < pow2 (32 - U32.v lv) - 1 &&
j < uint32_32_max} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
v:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\
Rgl?.r_inv (hreg hsz) h0 v /\
HH.disjoint (V.frameOf hs) (B.frameOf v) /\
mt_safe_elts #hsz h0 lv hs (Ghost.reveal i) j))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
h0 h1 /\
RV.rv_inv h1 hs /\
Rgl?.r_inv (hreg hsz) h1 v /\
V.size_of (V.get h1 hs lv) == j + 1ul - offset_of (Ghost.reveal i) /\
V.size_of (V.get h1 hs lv) == V.size_of (V.get h0 hs lv) + 1ul /\
mt_safe_elts #hsz h1 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul) /\
RV.rv_loc_elems h0 hs (lv + 1ul) (V.size_of hs) ==
RV.rv_loc_elems h1 hs (lv + 1ul) (V.size_of hs) /\
// correctness
(mt_safe_elts_spec #hsz h0 lv hs (Ghost.reveal i) j;
S.equal (RV.as_seq h1 hs)
(MTH.hashess_insert
(U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq h0 hs) (Rgl?.r_repr (hreg hsz) h0 v))) /\
S.equal (S.index (RV.as_seq h1 hs) (U32.v lv))
(S.snoc (S.index (RV.as_seq h0 hs) (U32.v lv))
(Rgl?.r_repr (hreg hsz) h0 v))))
let hash_vv_insert_copy #hsz lv i j hs v =
let hh0 = HST.get () in
mt_safe_elts_rec hh0 lv hs (Ghost.reveal i) j;
/// 1) Insert an element at the level `lv`, where the new vector is not yet
/// connected to `hs`.
let ihv = RV.insert_copy (hcpy hsz) (V.index hs lv) v in
let hh1 = HST.get () in
// 1-0) Basic disjointness conditions
V.forall2_forall_left hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.forall2_forall_right hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.loc_vector_within_included hs lv (lv + 1ul);
V.loc_vector_within_included hs (lv + 1ul) (V.size_of hs);
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
// 1-1) For the `modifies` postcondition.
assert (modifies (RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv)) hh0 hh1);
// 1-2) Preservation
Rgl?.r_sep (hreg hsz) v (RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
// 1-3) For `mt_safe_elts`
assert (V.size_of ihv == j + 1ul - offset_of (Ghost.reveal i)); // head updated
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1; // tail not yet
// 1-4) For the `rv_inv` postcondition
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs)) 0 (U32.v lv) (U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v lv);
RV.rv_elems_inv_preserved
hs 0ul lv (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs 0ul lv);
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs))
(U32.v lv + 1) (U32.v (V.size_of hs))
(U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
(U32.v lv + 1) (U32.v (V.size_of hs));
RV.rv_elems_inv_preserved
hs (lv + 1ul) (V.size_of hs) (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs (lv + 1ul) (V.size_of hs));
// assert (rv_itself_inv hh1 hs);
// assert (elems_reg hh1 hs);
// 1-5) Correctness
assert (S.equal (RV.as_seq hh1 ihv)
(S.snoc (RV.as_seq hh0 (V.get hh0 hs lv)) (Rgl?.r_repr (hreg hsz) hh0 v)));
/// 2) Assign the updated vector to `hs` at the level `lv`.
RV.assign hs lv ihv;
let hh2 = HST.get () in
// 2-1) For the `modifies` postcondition.
assert (modifies (V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2);
assert (modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))) hh0 hh2);
// 2-2) Preservation
Rgl?.r_sep (hreg hsz) v (RV.loc_rvector hs) hh1 hh2;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-3) For `mt_safe_elts`
assert (V.size_of (V.get hh2 hs lv) == j + 1ul - offset_of (Ghost.reveal i));
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-4) Correctness
RV.as_seq_sub_preserved hs 0ul lv (loc_rvector ihv) hh0 hh1;
RV.as_seq_sub_preserved hs (lv + 1ul) merkle_tree_size_lg (loc_rvector ihv) hh0 hh1;
assert (S.equal (RV.as_seq hh2 hs)
(S.append
(RV.as_seq_sub hh0 hs 0ul lv)
(S.cons (RV.as_seq hh1 ihv)
(RV.as_seq_sub hh0 hs (lv + 1ul) merkle_tree_size_lg))));
as_seq_sub_upd hh0 hs lv (RV.as_seq hh1 ihv)
#pop-options
private
val insert_index_helper_even:
lv:uint32_t{lv < merkle_tree_size_lg} ->
j:index_t{U32.v j < pow2 (32 - U32.v lv) - 1} ->
Lemma (requires (j % 2ul <> 1ul))
(ensures (U32.v j % 2 <> 1 /\ j / 2ul == (j + 1ul) / 2ul))
let insert_index_helper_even lv j = ()
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
private
val insert_index_helper_odd:
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:index_t ->
j:index_t{i <= j && U32.v j < pow2 (32 - U32.v lv) - 1} ->
Lemma (requires (j % 2ul = 1ul /\
j < uint32_32_max))
(ensures (U32.v j % 2 = 1 /\
U32.v (j / 2ul) < pow2 (32 - U32.v (lv + 1ul)) - 1 /\
(j + 1ul) / 2ul == j / 2ul + 1ul /\
j - offset_of i > 0ul))
let insert_index_helper_odd lv i j = ()
#pop-options
private
val loc_union_assoc_4:
a:loc -> b:loc -> c:loc -> d:loc ->
Lemma (loc_union (loc_union a b) (loc_union c d) ==
loc_union (loc_union a c) (loc_union b d))
let loc_union_assoc_4 a b c d =
loc_union_assoc (loc_union a b) c d;
loc_union_assoc a b c;
loc_union_assoc a c b;
loc_union_assoc (loc_union a c) b d
private
val insert_modifies_rec_helper:
#hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
aloc:loc ->
h:HS.mem ->
Lemma (loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
aloc)
(loc_union
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc) ==
loc_union
(loc_union
(RV.rv_loc_elems h hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
aloc)
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let insert_modifies_rec_helper #hsz lv hs aloc h =
assert (V.loc_vector_within hs lv (V.size_of hs) ==
loc_union (V.loc_vector_within hs lv (lv + 1ul))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)));
RV.rs_loc_elems_rec_inverse (hvreg hsz) (V.as_seq h hs) (U32.v lv) (U32.v (V.size_of hs));
assert (RV.rv_loc_elems h hs lv (V.size_of hs) ==
loc_union (RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs)));
// Applying some association rules...
loc_union_assoc
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))) aloc
(loc_union
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc);
loc_union_assoc
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))) aloc aloc;
loc_union_assoc
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc;
loc_union_assoc_4
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))
#pop-options
private
val insert_modifies_union_loc_weakening:
l1:loc -> l2:loc -> l3:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (modifies l1 h0 h1))
(ensures (modifies (loc_union (loc_union l1 l2) l3) h0 h1))
let insert_modifies_union_loc_weakening l1 l2 l3 h0 h1 =
B.loc_includes_union_l l1 l2 l1;
B.loc_includes_union_l (loc_union l1 l2) l3 (loc_union l1 l2)
private
val insert_snoc_last_helper:
#a:Type -> s:S.seq a{S.length s > 0} -> v:a ->
Lemma (S.index (S.snoc s v) (S.length s - 1) == S.last s)
let insert_snoc_last_helper #a s v = ()
private
val rv_inv_rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (RV.rv_inv h rv))
(ensures (RV.rv_elems_reg h rv i j))
let rv_inv_rv_elems_reg #a #rst #rg h rv i j = ()
// `insert_` recursively inserts proper hashes to each level `lv` by
// accumulating a compressed hash. For example, if there are three leaf elements
// in the tree, `insert_` will change `hs` as follow:
// (`hij` is a compressed hash from `hi` to `hj`)
//
// BEFORE INSERTION AFTER INSERTION
// lv
// 0 h0 h1 h2 ====> h0 h1 h2 h3
// 1 h01 h01 h23
// 2 h03
//
private
val insert_:
#hsz:hash_size_t ->
#hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hsz)) ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:Ghost.erased index_t ->
j:index_t{
Ghost.reveal i <= j &&
U32.v j < pow2 (32 - U32.v lv) - 1 &&
j < uint32_32_max} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
acc:hash #hsz ->
hash_fun:hash_fun_t #hsz #hash_spec ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\
Rgl?.r_inv (hreg hsz) h0 acc /\
HH.disjoint (V.frameOf hs) (B.frameOf acc) /\
mt_safe_elts h0 lv hs (Ghost.reveal i) j))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(loc_union
(RV.rv_loc_elems h0 hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
h0 h1 /\
RV.rv_inv h1 hs /\
Rgl?.r_inv (hreg hsz) h1 acc /\
mt_safe_elts h1 lv hs (Ghost.reveal i) (j + 1ul) /\
// correctness
(mt_safe_elts_spec h0 lv hs (Ghost.reveal i) j;
S.equal (RV.as_seq h1 hs)
(MTH.insert_ #(U32.v hsz) #hash_spec (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq h0 hs) (Rgl?.r_repr (hreg hsz) h0 acc)))))
(decreases (U32.v j))
#push-options "--z3rlimit 800 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
let rec insert_ #hsz #hash_spec lv i j hs acc hash_fun =
let hh0 = HST.get () in
hash_vv_insert_copy lv i j hs acc;
let hh1 = HST.get () in
// Base conditions
V.loc_vector_within_included hs lv (lv + 1ul);
V.loc_vector_within_included hs (lv + 1ul) (V.size_of hs);
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
assert (V.size_of (V.get hh1 hs lv) == j + 1ul - offset_of (Ghost.reveal i));
assert (mt_safe_elts hh1 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul));
if j % 2ul = 1ul
then (insert_index_helper_odd lv (Ghost.reveal i) j;
assert (S.length (S.index (RV.as_seq hh0 hs) (U32.v lv)) > 0);
let lvhs = V.index hs lv in
assert (U32.v (V.size_of lvhs) ==
S.length (S.index (RV.as_seq hh0 hs) (U32.v lv)) + 1);
assert (V.size_of lvhs > 1ul);
/// 3) Update the accumulator `acc`.
hash_vec_rv_inv_r_inv hh1 (V.get hh1 hs lv) (V.size_of (V.get hh1 hs lv) - 2ul);
assert (Rgl?.r_inv (hreg hsz) hh1 acc);
hash_fun (V.index lvhs (V.size_of lvhs - 2ul)) acc acc;
let hh2 = HST.get () in
// 3-1) For the `modifies` postcondition
assert (modifies (B.loc_all_regions_from false (B.frameOf acc)) hh1 hh2);
assert (modifies
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh0 hh2);
// 3-2) Preservation
RV.rv_inv_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.as_seq_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(B.loc_region_only false (B.frameOf acc)) hh1 hh2;
assert (RV.rv_inv hh2 hs);
assert (Rgl?.r_inv (hreg hsz) hh2 acc);
// 3-3) For `mt_safe_elts`
V.get_preserved hs lv
(B.loc_region_only false (B.frameOf acc)) hh1 hh2; // head preserved
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(B.loc_region_only false (B.frameOf acc)) hh1 hh2; // tail preserved
// 3-4) Correctness
insert_snoc_last_helper
(RV.as_seq hh0 (V.get hh0 hs lv))
(Rgl?.r_repr (hreg hsz) hh0 acc);
assert (S.equal (Rgl?.r_repr (hreg hsz) hh2 acc) // `nacc` in `MTH.insert_`
((Ghost.reveal hash_spec)
(S.last (S.index (RV.as_seq hh0 hs) (U32.v lv)))
(Rgl?.r_repr (hreg hsz) hh0 acc)));
/// 4) Recursion
insert_ (lv + 1ul)
(Ghost.hide (Ghost.reveal i / 2ul)) (j / 2ul)
hs acc hash_fun;
let hh3 = HST.get () in
// 4-0) Memory safety brought from the postcondition of the recursion
assert (RV.rv_inv hh3 hs);
assert (Rgl?.r_inv (hreg hsz) hh3 acc);
assert (modifies (loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh2 hh3);
assert (modifies
(loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc)))
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc))))
hh0 hh3);
// 4-1) For `mt_safe_elts`
rv_inv_rv_elems_reg hh2 hs (lv + 1ul) (V.size_of hs);
RV.rv_loc_elems_included hh2 hs (lv + 1ul) (V.size_of hs);
assert (loc_disjoint
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs)));
assert (loc_disjoint
(V.loc_vector_within hs lv (lv + 1ul))
(B.loc_all_regions_from false (B.frameOf acc)));
V.get_preserved hs lv
(loc_union
(loc_union
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh2 hh3;
assert (V.size_of (V.get hh3 hs lv) ==
j + 1ul - offset_of (Ghost.reveal i)); // head preserved
assert (mt_safe_elts hh3 (lv + 1ul) hs
(Ghost.reveal i / 2ul) (j / 2ul + 1ul)); // tail by recursion
mt_safe_elts_constr hh3 lv hs (Ghost.reveal i) (j + 1ul);
assert (mt_safe_elts hh3 lv hs (Ghost.reveal i) (j + 1ul));
// 4-2) Correctness
mt_safe_elts_spec hh2 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul);
assert (S.equal (RV.as_seq hh3 hs)
(MTH.insert_ #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv + 1) (U32.v (Ghost.reveal i) / 2) (U32.v j / 2)
(RV.as_seq hh2 hs) (Rgl?.r_repr (hreg hsz) hh2 acc)));
mt_safe_elts_spec hh0 lv hs (Ghost.reveal i) j;
MTH.insert_rec #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc);
assert (S.equal (RV.as_seq hh3 hs)
(MTH.insert_ #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc))))
else (insert_index_helper_even lv j;
// memory safety
assert (mt_safe_elts hh1 (lv + 1ul) hs (Ghost.reveal i / 2ul) ((j + 1ul) / 2ul));
mt_safe_elts_constr hh1 lv hs (Ghost.reveal i) (j + 1ul);
assert (mt_safe_elts hh1 lv hs (Ghost.reveal i) (j + 1ul));
assert (modifies
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
hh0 hh1);
insert_modifies_union_loc_weakening
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc))
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh0 hh1;
// correctness
mt_safe_elts_spec hh0 lv hs (Ghost.reveal i) j;
MTH.insert_base #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc);
assert (S.equal (RV.as_seq hh1 hs)
(MTH.insert_ #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc))));
/// 5) Proving the postcondition after recursion
let hh4 = HST.get () in
// 5-1) For the `modifies` postcondition.
assert (modifies
(loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc)))
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc))))
hh0 hh4);
insert_modifies_rec_helper
lv hs (B.loc_all_regions_from false (B.frameOf acc)) hh0;
// 5-2) For `mt_safe_elts`
assert (mt_safe_elts hh4 lv hs (Ghost.reveal i) (j + 1ul));
// 5-3) Preservation
assert (RV.rv_inv hh4 hs);
assert (Rgl?.r_inv (hreg hsz) hh4 acc);
// 5-4) Correctness
mt_safe_elts_spec hh0 lv hs (Ghost.reveal i) j;
assert (S.equal (RV.as_seq hh4 hs)
(MTH.insert_ #(U32.v hsz) #hash_spec (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc))) // QED
#pop-options
private inline_for_extraction
val mt_insert_pre_nst: mtv:merkle_tree -> v:hash #(MT?.hash_size mtv) -> Tot bool
let mt_insert_pre_nst mtv v = mt_not_full_nst mtv && add64_fits (MT?.offset mtv) ((MT?.j mtv) + 1ul)
val mt_insert_pre: #hsz:Ghost.erased hash_size_t -> mt:const_mt_p -> v:hash #hsz -> HST.ST bool
(requires (fun h0 -> mt_safe h0 (CB.cast mt) /\ (MT?.hash_size (B.get h0 (CB.cast mt) 0)) = Ghost.reveal hsz))
(ensures (fun _ _ _ -> True))
let mt_insert_pre #hsz mt v =
let mt = !*(CB.cast mt) in
assert (MT?.hash_size mt == (MT?.hash_size mt));
mt_insert_pre_nst mt v
// `mt_insert` inserts a hash to a Merkle tree. Note that this operation
// manipulates the content in `v`, since it uses `v` as an accumulator during
// insertion.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
val mt_insert:
hsz:Ghost.erased hash_size_t ->
mt:mt_p -> v:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
let dmt = B.get h0 mt 0 in
mt_safe h0 mt /\
Rgl?.r_inv (hreg hsz) h0 v /\
HH.disjoint (B.frameOf mt) (B.frameOf v) /\
MT?.hash_size dmt = Ghost.reveal hsz /\
mt_insert_pre_nst dmt v))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(mt_loc mt)
(B.loc_all_regions_from false (B.frameOf v)))
h0 h1 /\
mt_safe h1 mt /\
// correctness
MT?.hash_size (B.get h1 mt 0) = Ghost.reveal hsz /\
mt_lift h1 mt == MTH.mt_insert (mt_lift h0 mt) (Rgl?.r_repr (hreg hsz) h0 v)))
#pop-options
#push-options "--z3rlimit 40"
let mt_insert hsz mt v =
let hh0 = HST.get () in
let mtv = !*mt in
let hs = MT?.hs mtv in
let hsz = MT?.hash_size mtv in
insert_ #hsz #(Ghost.reveal (MT?.hash_spec mtv)) 0ul (Ghost.hide (MT?.i mtv)) (MT?.j mtv) hs v (MT?.hash_fun mtv);
let hh1 = HST.get () in
RV.rv_loc_elems_included hh0 (MT?.hs mtv) 0ul (V.size_of hs);
V.loc_vector_within_included hs 0ul (V.size_of hs);
RV.rv_inv_preserved
(MT?.rhs mtv)
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf v)))
hh0 hh1;
RV.as_seq_preserved
(MT?.rhs mtv)
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf v)))
hh0 hh1;
Rgl?.r_sep (hreg hsz) (MT?.mroot mtv)
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf v)))
hh0 hh1;
mt *= MT (MT?.hash_size mtv)
(MT?.offset mtv)
(MT?.i mtv)
(MT?.j mtv + 1ul)
(MT?.hs mtv)
false // `rhs` is always deprecated right after an insertion.
(MT?.rhs mtv)
(MT?.mroot mtv)
(MT?.hash_spec mtv)
(MT?.hash_fun mtv);
let hh2 = HST.get () in
RV.rv_inv_preserved
(MT?.hs mtv) (B.loc_buffer mt) hh1 hh2;
RV.rv_inv_preserved
(MT?.rhs mtv) (B.loc_buffer mt) hh1 hh2;
RV.as_seq_preserved
(MT?.hs mtv) (B.loc_buffer mt) hh1 hh2;
RV.as_seq_preserved
(MT?.rhs mtv) (B.loc_buffer mt) hh1 hh2;
Rgl?.r_sep (hreg hsz) (MT?.mroot mtv) (B.loc_buffer mt) hh1 hh2;
mt_safe_elts_preserved
0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv + 1ul) (B.loc_buffer mt)
hh1 hh2
#pop-options
// `mt_create` initiates a Merkle tree with a given initial hash `init`.
// A valid Merkle tree should contain at least one element.
val mt_create_custom:
hsz:hash_size_t ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hsz)) ->
r:HST.erid -> init:hash #hsz -> hash_fun:hash_fun_t #hsz #hash_spec -> HST.ST mt_p
(requires (fun h0 ->
Rgl?.r_inv (hreg hsz) h0 init /\
HH.disjoint r (B.frameOf init)))
(ensures (fun h0 mt h1 ->
// memory safety
modifies (loc_union (mt_loc mt) (B.loc_all_regions_from false (B.frameOf init))) h0 h1 /\
mt_safe h1 mt /\
// correctness
MT?.hash_size (B.get h1 mt 0) = hsz /\
mt_lift h1 mt == MTH.mt_create (U32.v hsz) (Ghost.reveal hash_spec) (Rgl?.r_repr (hreg hsz) h0 init)))
#push-options "--z3rlimit 40"
let mt_create_custom hsz hash_spec r init hash_fun =
let hh0 = HST.get () in
let mt = create_empty_mt hsz hash_spec hash_fun r in
mt_insert hsz mt init;
let hh2 = HST.get () in
mt
#pop-options
/// Construction and Destruction of paths
// Since each element pointer in `path` is from the target Merkle tree and
// each element has different location in `MT?.hs` (thus different region id),
// we cannot use the regionality property for `path`s. Hence here we manually
// define invariants and representation.
noeq type path =
| Path: hash_size:hash_size_t ->
hashes:V.vector (hash #hash_size) ->
path
type path_p = B.pointer path
type const_path_p = const_pointer path
private
let phashes (h:HS.mem) (p:path_p) | false | false | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val phashes (h: HS.mem) (p: path_p) : GTot (V.vector (hash #(Path?.hash_size (B.get h p 0)))) | [] | MerkleTree.Low.phashes | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} | h: FStar.Monotonic.HyperStack.mem -> p: MerkleTree.Low.path_p
-> Prims.GTot (LowStar.Vector.vector MerkleTree.Low.Datastructures.hash) | {
"end_col": 28,
"end_line": 1062,
"start_col": 2,
"start_line": 1062
} |
Prims.Tot | val mt_verify_pre_nst: mt:merkle_tree -> k:offset_t -> j:offset_t -> p:path -> rt:(hash #(MT?.hash_size mt)) -> Tot bool | [
{
"abbrev": false,
"full_module": "MerkleTree.Low.VectorExtras",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Hashfunctions",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree.Low.Datastructures",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": true,
"full_module": "MerkleTree.Spec",
"short_module": "MTS"
},
{
"abbrev": true,
"full_module": "MerkleTree.New.High",
"short_module": "MTH"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "LowStar.Regional.Instances",
"short_module": "RVI"
},
{
"abbrev": true,
"full_module": "LowStar.RVector",
"short_module": "RV"
},
{
"abbrev": true,
"full_module": "LowStar.Vector",
"short_module": "V"
},
{
"abbrev": true,
"full_module": "LowStar.ConstBuffer",
"short_module": "CB"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperHeap",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.Monotonic.HyperStack",
"short_module": "MHS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": false,
"full_module": "LowStar.Regional.Instances",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.RVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Regional",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Vector",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.BufferOps",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowStar.Buffer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Integers",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.All",
"short_module": null
},
{
"abbrev": false,
"full_module": "EverCrypt.Helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "MerkleTree",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mt_verify_pre_nst mt k j p rt =
k < j &&
offsets_connect (MT?.offset mt) k &&
offsets_connect (MT?.offset mt) j &&
MT?.hash_size mt = Path?.hash_size p &&
([@inline_let] let k = split_offset (MT?.offset mt) k in
[@inline_let] let j = split_offset (MT?.offset mt) j in
// We need to add one since the first element is the hash to verify.
V.size_of (Path?.hashes p) = 1ul + mt_path_length 0ul k j false) | val mt_verify_pre_nst: mt:merkle_tree -> k:offset_t -> j:offset_t -> p:path -> rt:(hash #(MT?.hash_size mt)) -> Tot bool
let mt_verify_pre_nst mt k j p rt = | false | null | false | k < j && offsets_connect (MT?.offset mt) k && offsets_connect (MT?.offset mt) j &&
MT?.hash_size mt = Path?.hash_size p &&
([@@ inline_let ]let k = split_offset (MT?.offset mt) k in
[@@ inline_let ]let j = split_offset (MT?.offset mt) j in
V.size_of (Path?.hashes p) = 1ul + mt_path_length 0ul k j false) | {
"checked_file": "MerkleTree.Low.fst.checked",
"dependencies": [
"prims.fst.checked",
"MerkleTree.Spec.fst.checked",
"MerkleTree.New.High.fst.checked",
"MerkleTree.Low.VectorExtras.fst.checked",
"MerkleTree.Low.Hashfunctions.fst.checked",
"MerkleTree.Low.Datastructures.fst.checked",
"LowStar.Vector.fst.checked",
"LowStar.RVector.fst.checked",
"LowStar.Regional.Instances.fst.checked",
"LowStar.Regional.fst.checked",
"LowStar.ConstBuffer.fsti.checked",
"LowStar.BufferOps.fst.checked",
"LowStar.Buffer.fst.checked",
"Lib.IntTypes.fsti.checked",
"Lib.ByteBuffer.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Monotonic.HyperStack.fsti.checked",
"FStar.Monotonic.HyperHeap.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Integers.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked",
"FStar.All.fst.checked",
"EverCrypt.Helpers.fsti.checked"
],
"interface_file": false,
"source_file": "MerkleTree.Low.fst"
} | [
"total"
] | [
"MerkleTree.Low.merkle_tree",
"MerkleTree.Low.offset_t",
"MerkleTree.Low.path",
"MerkleTree.Low.Datastructures.hash",
"MerkleTree.Low.__proj__MT__item__hash_size",
"Prims.op_AmpAmp",
"FStar.Integers.op_Less",
"FStar.Integers.Unsigned",
"FStar.Integers.W64",
"MerkleTree.Low.offsets_connect",
"MerkleTree.Low.__proj__MT__item__offset",
"Prims.op_Equality",
"MerkleTree.Low.Datastructures.hash_size_t",
"MerkleTree.Low.__proj__Path__item__hash_size",
"FStar.UInt32.t",
"LowStar.Vector.size_of",
"MerkleTree.Low.__proj__Path__item__hashes",
"FStar.Integers.op_Plus",
"FStar.Integers.W32",
"FStar.UInt32.__uint_to_t",
"MerkleTree.Low.mt_path_length",
"MerkleTree.Low.index_t",
"MerkleTree.Low.split_offset",
"Prims.bool"
] | [] | module MerkleTree.Low
open EverCrypt.Helpers
open FStar.All
open FStar.Integers
open FStar.Mul
open LowStar.Buffer
open LowStar.BufferOps
open LowStar.Vector
open LowStar.Regional
open LowStar.RVector
open LowStar.Regional.Instances
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
module MHS = FStar.Monotonic.HyperStack
module HH = FStar.Monotonic.HyperHeap
module B = LowStar.Buffer
module CB = LowStar.ConstBuffer
module V = LowStar.Vector
module RV = LowStar.RVector
module RVI = LowStar.Regional.Instances
module S = FStar.Seq
module U32 = FStar.UInt32
module U64 = FStar.UInt64
module MTH = MerkleTree.New.High
module MTS = MerkleTree.Spec
open Lib.IntTypes
open MerkleTree.Low.Datastructures
open MerkleTree.Low.Hashfunctions
open MerkleTree.Low.VectorExtras
#set-options "--z3rlimit 10 --initial_fuel 0 --max_fuel 0 --initial_ifuel 0 --max_ifuel 0"
type const_pointer (a:Type0) = b:CB.const_buffer a{CB.length b == 1 /\ CB.qual_of b == CB.MUTABLE}
/// Low-level Merkle tree data structure
///
// NOTE: because of a lack of 64-bit LowStar.Buffer support, currently
// we cannot change below to some other types.
type index_t = uint32_t
let uint32_32_max = 4294967295ul
inline_for_extraction
let uint32_max = 4294967295UL
let uint64_max = 18446744073709551615UL
let offset_range_limit = uint32_max
type offset_t = uint64_t
inline_for_extraction noextract unfold let u32_64 = Int.Cast.uint32_to_uint64
inline_for_extraction noextract unfold let u64_32 = Int.Cast.uint64_to_uint32
private inline_for_extraction
let offsets_connect (x:offset_t) (y:offset_t): Tot bool = y >= x && (y - x) <= offset_range_limit
private inline_for_extraction
let split_offset (tree:offset_t) (index:offset_t{offsets_connect tree index}): Tot index_t =
[@inline_let] let diff = U64.sub_mod index tree in
assert (diff <= offset_range_limit);
Int.Cast.uint64_to_uint32 diff
private inline_for_extraction
let add64_fits (x:offset_t) (i:index_t): Tot bool = uint64_max - x >= (u32_64 i)
private inline_for_extraction
let join_offset (tree:offset_t) (i:index_t{add64_fits tree i}): Tot (r:offset_t{offsets_connect tree r}) =
U64.add tree (u32_64 i)
inline_for_extraction val merkle_tree_size_lg: uint32_t
let merkle_tree_size_lg = 32ul
// A Merkle tree `MT i j hs rhs_ok rhs` stores all necessary hashes to generate
// a Merkle path for each element from the index `i` to `j-1`.
// - Parameters
// `hs`: a 2-dim store for hashes, where `hs[0]` contains leaf hash values.
// `rhs_ok`: to check the rightmost hashes are up-to-date
// `rhs`: a store for "rightmost" hashes, manipulated only when required to
// calculate some merkle paths that need the rightmost hashes
// as a part of them.
// `mroot`: during the construction of `rhs` we can also calculate the Merkle
// root of the tree. If `rhs_ok` is true then it has the up-to-date
// root value.
noeq type merkle_tree =
| MT: hash_size:hash_size_t ->
offset:offset_t ->
i:index_t -> j:index_t{i <= j /\ add64_fits offset j} ->
hs:hash_vv hash_size {V.size_of hs = merkle_tree_size_lg} ->
rhs_ok:bool ->
rhs:hash_vec #hash_size {V.size_of rhs = merkle_tree_size_lg} ->
mroot:hash #hash_size ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
merkle_tree
type mt_p = B.pointer merkle_tree
type const_mt_p = const_pointer merkle_tree
inline_for_extraction
let merkle_tree_conditions (#hsz:Ghost.erased hash_size_t) (offset:uint64_t) (i j:uint32_t) (hs:hash_vv hsz) (rhs_ok:bool) (rhs:hash_vec #hsz) (mroot:hash #hsz): Tot bool =
j >= i && add64_fits offset j &&
V.size_of hs = merkle_tree_size_lg &&
V.size_of rhs = merkle_tree_size_lg
// The maximum number of currently held elements in the tree is (2^32 - 1).
// cwinter: even when using 64-bit indices, we fail if the underlying 32-bit
// vector is full; this can be fixed if necessary.
private inline_for_extraction
val mt_not_full_nst: mtv:merkle_tree -> Tot bool
let mt_not_full_nst mtv = MT?.j mtv < uint32_32_max
val mt_not_full: HS.mem -> mt_p -> GTot bool
let mt_not_full h mt = mt_not_full_nst (B.get h mt 0)
/// (Memory) Safety
val offset_of: i:index_t -> Tot index_t
let offset_of i = if i % 2ul = 0ul then i else i - 1ul
// `mt_safe_elts` says that it is safe to access an element from `i` to `j - 1`
// at level `lv` in the Merkle tree, i.e., hs[lv][k] (i <= k < j) is a valid
// element.
inline_for_extraction noextract
val mt_safe_elts:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
GTot Type0 (decreases (32 - U32.v lv))
let rec mt_safe_elts #hsz h lv hs i j =
if lv = merkle_tree_size_lg then true
else (let ofs = offset_of i in
V.size_of (V.get h hs lv) == j - ofs /\
mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul))
#push-options "--initial_fuel 1 --max_fuel 1"
val mt_safe_elts_constr:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (V.size_of (V.get h hs lv) == j - offset_of i /\
mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)))
(ensures (mt_safe_elts #hsz h lv hs i j))
let mt_safe_elts_constr #_ h lv hs i j = ()
val mt_safe_elts_head:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (mt_safe_elts #hsz h lv hs i j))
(ensures (V.size_of (V.get h hs lv) == j - offset_of i))
let mt_safe_elts_head #_ h lv hs i j = ()
val mt_safe_elts_rec:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
Lemma (requires (mt_safe_elts #hsz h lv hs i j))
(ensures (mt_safe_elts #hsz h (lv + 1ul) hs (i / 2ul) (j / 2ul)))
let mt_safe_elts_rec #_ h lv hs i j = ()
val mt_safe_elts_init:
#hsz:hash_size_t ->
h:HS.mem -> lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
Lemma (requires (V.forall_ h hs lv (V.size_of hs)
(fun hv -> V.size_of hv = 0ul)))
(ensures (mt_safe_elts #hsz h lv hs 0ul 0ul))
(decreases (32 - U32.v lv))
let rec mt_safe_elts_init #hsz h lv hs =
if lv = merkle_tree_size_lg then ()
else mt_safe_elts_init #hsz h (lv + 1ul) hs
#pop-options
val mt_safe_elts_preserved:
#hsz:hash_size_t ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{j >= i} ->
p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.live h0 hs /\
mt_safe_elts #hsz h0 lv hs i j /\
loc_disjoint p (V.loc_vector_within hs lv (V.size_of hs)) /\
modifies p h0 h1))
(ensures (mt_safe_elts #hsz h1 lv hs i j))
(decreases (32 - U32.v lv))
[SMTPat (V.live h0 hs);
SMTPat (mt_safe_elts #hsz h0 lv hs i j);
SMTPat (loc_disjoint p (RV.loc_rvector hs));
SMTPat (modifies p h0 h1)]
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let rec mt_safe_elts_preserved #hsz lv hs i j p h0 h1 =
if lv = merkle_tree_size_lg then ()
else (V.get_preserved hs lv p h0 h1;
mt_safe_elts_preserved #hsz (lv + 1ul) hs (i / 2ul) (j / 2ul) p h0 h1)
#pop-options
// `mt_safe` is the invariant of a Merkle tree through its lifetime.
// It includes liveness, regionality, disjointness (to each data structure),
// and valid element access (`mt_safe_elts`).
inline_for_extraction noextract
val mt_safe: HS.mem -> mt_p -> GTot Type0
let mt_safe h mt =
B.live h mt /\ B.freeable mt /\
(let mtv = B.get h mt 0 in
// Liveness & Accessibility
RV.rv_inv h (MT?.hs mtv) /\
RV.rv_inv h (MT?.rhs mtv) /\
Rgl?.r_inv (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv) /\
mt_safe_elts h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv) /\
// Regionality
HH.extends (V.frameOf (MT?.hs mtv)) (B.frameOf mt) /\
HH.extends (V.frameOf (MT?.rhs mtv)) (B.frameOf mt) /\
HH.extends (B.frameOf (MT?.mroot mtv)) (B.frameOf mt) /\
HH.disjoint (V.frameOf (MT?.hs mtv)) (V.frameOf (MT?.rhs mtv)) /\
HH.disjoint (V.frameOf (MT?.hs mtv)) (B.frameOf (MT?.mroot mtv)) /\
HH.disjoint (V.frameOf (MT?.rhs mtv)) (B.frameOf (MT?.mroot mtv)))
// Since a Merkle tree satisfies regionality, it's ok to take all regions from
// a tree pointer as a location of the tree.
val mt_loc: mt_p -> GTot loc
let mt_loc mt = B.loc_all_regions_from false (B.frameOf mt)
val mt_safe_preserved:
mt:mt_p -> p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (mt_safe h0 mt /\
loc_disjoint p (mt_loc mt) /\
modifies p h0 h1))
(ensures (B.get h0 mt 0 == B.get h1 mt 0 /\
mt_safe h1 mt))
let mt_safe_preserved mt p h0 h1 =
assert (loc_includes (mt_loc mt) (B.loc_buffer mt));
let mtv = B.get h0 mt 0 in
assert (loc_includes (mt_loc mt) (RV.loc_rvector (MT?.hs mtv)));
assert (loc_includes (mt_loc mt) (RV.loc_rvector (MT?.rhs mtv)));
assert (loc_includes (mt_loc mt) (V.loc_vector (MT?.hs mtv)));
assert (loc_includes (mt_loc mt)
(B.loc_all_regions_from false (B.frameOf (MT?.mroot mtv))));
RV.rv_inv_preserved (MT?.hs mtv) p h0 h1;
RV.rv_inv_preserved (MT?.rhs mtv) p h0 h1;
Rgl?.r_sep (hreg (MT?.hash_size mtv)) (MT?.mroot mtv) p h0 h1;
V.loc_vector_within_included (MT?.hs mtv) 0ul (V.size_of (MT?.hs mtv));
mt_safe_elts_preserved 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv) p h0 h1
/// Lifting to a high-level Merkle tree structure
val mt_safe_elts_spec:
#hsz:hash_size_t ->
h:HS.mem ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
i:index_t ->
j:index_t{j >= i} ->
Lemma (requires (RV.rv_inv h hs /\
mt_safe_elts #hsz h lv hs i j))
(ensures (MTH.hs_wf_elts #(U32.v hsz)
(U32.v lv) (RV.as_seq h hs)
(U32.v i) (U32.v j)))
(decreases (32 - U32.v lv))
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let rec mt_safe_elts_spec #_ h lv hs i j =
if lv = merkle_tree_size_lg then ()
else mt_safe_elts_spec h (lv + 1ul) hs (i / 2ul) (j / 2ul)
#pop-options
val merkle_tree_lift:
h:HS.mem ->
mtv:merkle_tree{
RV.rv_inv h (MT?.hs mtv) /\
RV.rv_inv h (MT?.rhs mtv) /\
Rgl?.r_inv (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv) /\
mt_safe_elts #(MT?.hash_size mtv) h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv)} ->
GTot (r:MTH.merkle_tree #(U32.v (MT?.hash_size mtv)) {MTH.mt_wf_elts #_ r})
let merkle_tree_lift h mtv =
mt_safe_elts_spec h 0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv);
MTH.MT #(U32.v (MT?.hash_size mtv))
(U32.v (MT?.i mtv))
(U32.v (MT?.j mtv))
(RV.as_seq h (MT?.hs mtv))
(MT?.rhs_ok mtv)
(RV.as_seq h (MT?.rhs mtv))
(Rgl?.r_repr (hreg (MT?.hash_size mtv)) h (MT?.mroot mtv))
(Ghost.reveal (MT?.hash_spec mtv))
val mt_lift:
h:HS.mem -> mt:mt_p{mt_safe h mt} ->
GTot (r:MTH.merkle_tree #(U32.v (MT?.hash_size (B.get h mt 0))) {MTH.mt_wf_elts #_ r})
let mt_lift h mt =
merkle_tree_lift h (B.get h mt 0)
val mt_preserved:
mt:mt_p -> p:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (mt_safe h0 mt /\
loc_disjoint p (mt_loc mt) /\
modifies p h0 h1))
(ensures (mt_safe_preserved mt p h0 h1;
mt_lift h0 mt == mt_lift h1 mt))
let mt_preserved mt p h0 h1 =
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(B.loc_buffer mt));
B.modifies_buffer_elim mt p h0 h1;
assert (B.get h0 mt 0 == B.get h1 mt 0);
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(RV.loc_rvector (MT?.hs (B.get h0 mt 0))));
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(RV.loc_rvector (MT?.rhs (B.get h0 mt 0))));
assert (loc_includes (B.loc_all_regions_from false (B.frameOf mt))
(B.loc_buffer (MT?.mroot (B.get h0 mt 0))));
RV.as_seq_preserved (MT?.hs (B.get h0 mt 0)) p h0 h1;
RV.as_seq_preserved (MT?.rhs (B.get h0 mt 0)) p h0 h1;
B.modifies_buffer_elim (MT?.mroot (B.get h0 mt 0)) p h0 h1
/// Construction
// Note that the public function for creation is `mt_create` defined below,
// which builds a tree with an initial hash.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
private
val create_empty_mt:
hash_size:hash_size_t ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hash_size)) ->
hash_fun:hash_fun_t #hash_size #hash_spec ->
r:HST.erid ->
HST.ST mt_p
(requires (fun _ -> true))
(ensures (fun h0 mt h1 ->
let dmt = B.get h1 mt 0 in
// memory safety
B.frameOf mt = r /\
modifies (mt_loc mt) h0 h1 /\
mt_safe h1 mt /\
mt_not_full h1 mt /\
// correctness
MT?.hash_size dmt = hash_size /\
MT?.offset dmt = 0UL /\
merkle_tree_lift h1 dmt == MTH.create_empty_mt #_ #(Ghost.reveal hash_spec) ()))
let create_empty_mt hsz hash_spec hash_fun r =
[@inline_let] let hrg = hreg hsz in
[@inline_let] let hvrg = hvreg hsz in
[@inline_let] let hvvrg = hvvreg hsz in
let hs_region = HST.new_region r in
let hs = RV.alloc_rid hvrg merkle_tree_size_lg hs_region in
let h0 = HST.get () in
mt_safe_elts_init #hsz h0 0ul hs;
let rhs_region = HST.new_region r in
let rhs = RV.alloc_rid hrg merkle_tree_size_lg rhs_region in
let h1 = HST.get () in
assert (RV.as_seq h1 rhs == S.create 32 (MTH.hash_init #(U32.v hsz)));
RV.rv_inv_preserved hs (V.loc_vector rhs) h0 h1;
RV.as_seq_preserved hs (V.loc_vector rhs) h0 h1;
V.loc_vector_within_included hs 0ul (V.size_of hs);
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul (V.loc_vector rhs) h0 h1;
let mroot_region = HST.new_region r in
let mroot = rg_alloc hrg mroot_region in
let h2 = HST.get () in
RV.as_seq_preserved hs loc_none h1 h2;
RV.as_seq_preserved rhs loc_none h1 h2;
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul loc_none h1 h2;
let mt = B.malloc r (MT hsz 0UL 0ul 0ul hs false rhs mroot hash_spec hash_fun) 1ul in
let h3 = HST.get () in
RV.as_seq_preserved hs loc_none h2 h3;
RV.as_seq_preserved rhs loc_none h2 h3;
Rgl?.r_sep hrg mroot loc_none h2 h3;
mt_safe_elts_preserved #hsz 0ul hs 0ul 0ul loc_none h2 h3;
mt
#pop-options
/// Destruction (free)
val mt_free: mt:mt_p ->
HST.ST unit
(requires (fun h0 -> mt_safe h0 mt))
(ensures (fun h0 _ h1 -> modifies (mt_loc mt) h0 h1))
#push-options "--z3rlimit 100"
let mt_free mt =
let mtv = !*mt in
RV.free (MT?.hs mtv);
RV.free (MT?.rhs mtv);
[@inline_let] let rg = hreg (MT?.hash_size mtv) in
rg_free rg (MT?.mroot mtv);
B.free mt
#pop-options
/// Insertion
private
val as_seq_sub_upd:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector #a #rst rg ->
i:uint32_t{i < V.size_of rv} -> v:Rgl?.repr rg ->
Lemma (requires (RV.rv_inv h rv))
(ensures (S.equal (S.upd (RV.as_seq h rv) (U32.v i) v)
(S.append
(RV.as_seq_sub h rv 0ul i)
(S.cons v (RV.as_seq_sub h rv (i + 1ul) (V.size_of rv))))))
#push-options "--z3rlimit 20"
let as_seq_sub_upd #a #rst #rg h rv i v =
Seq.Properties.slice_upd (RV.as_seq h rv) 0 (U32.v i) (U32.v i) v;
Seq.Properties.slice_upd (RV.as_seq h rv) (U32.v i + 1) (U32.v (V.size_of rv)) (U32.v i) v;
RV.as_seq_seq_slice rg h (V.as_seq h rv)
0 (U32.v (V.size_of rv)) 0 (U32.v i);
assert (S.equal (S.slice (RV.as_seq h rv) 0 (U32.v i))
(RV.as_seq_sub h rv 0ul i));
RV.as_seq_seq_slice rg h (V.as_seq h rv)
0 (U32.v (V.size_of rv)) (U32.v i + 1) (U32.v (V.size_of rv));
assert (S.equal (S.slice (RV.as_seq h rv) (U32.v i + 1) (U32.v (V.size_of rv)))
(RV.as_seq_sub h rv (i + 1ul) (V.size_of rv)));
assert (S.index (S.upd (RV.as_seq h rv) (U32.v i) v) (U32.v i) == v)
#pop-options
// `hash_vv_insert_copy` inserts a hash element at a level `lv`, by copying
// and pushing its content to `hs[lv]`. For detailed insertion procedure, see
// `insert_` and `mt_insert`.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1"
private
inline_for_extraction
val hash_vv_insert_copy:
#hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:Ghost.erased index_t ->
j:index_t{
Ghost.reveal i <= j &&
U32.v j < pow2 (32 - U32.v lv) - 1 &&
j < uint32_32_max} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
v:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\
Rgl?.r_inv (hreg hsz) h0 v /\
HH.disjoint (V.frameOf hs) (B.frameOf v) /\
mt_safe_elts #hsz h0 lv hs (Ghost.reveal i) j))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
h0 h1 /\
RV.rv_inv h1 hs /\
Rgl?.r_inv (hreg hsz) h1 v /\
V.size_of (V.get h1 hs lv) == j + 1ul - offset_of (Ghost.reveal i) /\
V.size_of (V.get h1 hs lv) == V.size_of (V.get h0 hs lv) + 1ul /\
mt_safe_elts #hsz h1 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul) /\
RV.rv_loc_elems h0 hs (lv + 1ul) (V.size_of hs) ==
RV.rv_loc_elems h1 hs (lv + 1ul) (V.size_of hs) /\
// correctness
(mt_safe_elts_spec #hsz h0 lv hs (Ghost.reveal i) j;
S.equal (RV.as_seq h1 hs)
(MTH.hashess_insert
(U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq h0 hs) (Rgl?.r_repr (hreg hsz) h0 v))) /\
S.equal (S.index (RV.as_seq h1 hs) (U32.v lv))
(S.snoc (S.index (RV.as_seq h0 hs) (U32.v lv))
(Rgl?.r_repr (hreg hsz) h0 v))))
let hash_vv_insert_copy #hsz lv i j hs v =
let hh0 = HST.get () in
mt_safe_elts_rec hh0 lv hs (Ghost.reveal i) j;
/// 1) Insert an element at the level `lv`, where the new vector is not yet
/// connected to `hs`.
let ihv = RV.insert_copy (hcpy hsz) (V.index hs lv) v in
let hh1 = HST.get () in
// 1-0) Basic disjointness conditions
V.forall2_forall_left hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.forall2_forall_right hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.loc_vector_within_included hs lv (lv + 1ul);
V.loc_vector_within_included hs (lv + 1ul) (V.size_of hs);
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
// 1-1) For the `modifies` postcondition.
assert (modifies (RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv)) hh0 hh1);
// 1-2) Preservation
Rgl?.r_sep (hreg hsz) v (RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
// 1-3) For `mt_safe_elts`
assert (V.size_of ihv == j + 1ul - offset_of (Ghost.reveal i)); // head updated
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1; // tail not yet
// 1-4) For the `rv_inv` postcondition
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs)) 0 (U32.v lv) (U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v lv);
RV.rv_elems_inv_preserved
hs 0ul lv (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs 0ul lv);
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs))
(U32.v lv + 1) (U32.v (V.size_of hs))
(U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
(U32.v lv + 1) (U32.v (V.size_of hs));
RV.rv_elems_inv_preserved
hs (lv + 1ul) (V.size_of hs) (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs (lv + 1ul) (V.size_of hs));
// assert (rv_itself_inv hh1 hs);
// assert (elems_reg hh1 hs);
// 1-5) Correctness
assert (S.equal (RV.as_seq hh1 ihv)
(S.snoc (RV.as_seq hh0 (V.get hh0 hs lv)) (Rgl?.r_repr (hreg hsz) hh0 v)));
/// 2) Assign the updated vector to `hs` at the level `lv`.
RV.assign hs lv ihv;
let hh2 = HST.get () in
// 2-1) For the `modifies` postcondition.
assert (modifies (V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2);
assert (modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))) hh0 hh2);
// 2-2) Preservation
Rgl?.r_sep (hreg hsz) v (RV.loc_rvector hs) hh1 hh2;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-3) For `mt_safe_elts`
assert (V.size_of (V.get hh2 hs lv) == j + 1ul - offset_of (Ghost.reveal i));
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-4) Correctness
RV.as_seq_sub_preserved hs 0ul lv (loc_rvector ihv) hh0 hh1;
RV.as_seq_sub_preserved hs (lv + 1ul) merkle_tree_size_lg (loc_rvector ihv) hh0 hh1;
assert (S.equal (RV.as_seq hh2 hs)
(S.append
(RV.as_seq_sub hh0 hs 0ul lv)
(S.cons (RV.as_seq hh1 ihv)
(RV.as_seq_sub hh0 hs (lv + 1ul) merkle_tree_size_lg))));
as_seq_sub_upd hh0 hs lv (RV.as_seq hh1 ihv)
#pop-options
private
val insert_index_helper_even:
lv:uint32_t{lv < merkle_tree_size_lg} ->
j:index_t{U32.v j < pow2 (32 - U32.v lv) - 1} ->
Lemma (requires (j % 2ul <> 1ul))
(ensures (U32.v j % 2 <> 1 /\ j / 2ul == (j + 1ul) / 2ul))
let insert_index_helper_even lv j = ()
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
private
val insert_index_helper_odd:
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:index_t ->
j:index_t{i <= j && U32.v j < pow2 (32 - U32.v lv) - 1} ->
Lemma (requires (j % 2ul = 1ul /\
j < uint32_32_max))
(ensures (U32.v j % 2 = 1 /\
U32.v (j / 2ul) < pow2 (32 - U32.v (lv + 1ul)) - 1 /\
(j + 1ul) / 2ul == j / 2ul + 1ul /\
j - offset_of i > 0ul))
let insert_index_helper_odd lv i j = ()
#pop-options
private
val loc_union_assoc_4:
a:loc -> b:loc -> c:loc -> d:loc ->
Lemma (loc_union (loc_union a b) (loc_union c d) ==
loc_union (loc_union a c) (loc_union b d))
let loc_union_assoc_4 a b c d =
loc_union_assoc (loc_union a b) c d;
loc_union_assoc a b c;
loc_union_assoc a c b;
loc_union_assoc (loc_union a c) b d
private
val insert_modifies_rec_helper:
#hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
aloc:loc ->
h:HS.mem ->
Lemma (loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
aloc)
(loc_union
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc) ==
loc_union
(loc_union
(RV.rv_loc_elems h hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
aloc)
#push-options "--z3rlimit 100 --initial_fuel 2 --max_fuel 2"
let insert_modifies_rec_helper #hsz lv hs aloc h =
assert (V.loc_vector_within hs lv (V.size_of hs) ==
loc_union (V.loc_vector_within hs lv (lv + 1ul))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)));
RV.rs_loc_elems_rec_inverse (hvreg hsz) (V.as_seq h hs) (U32.v lv) (U32.v (V.size_of hs));
assert (RV.rv_loc_elems h hs lv (V.size_of hs) ==
loc_union (RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs)));
// Applying some association rules...
loc_union_assoc
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))) aloc
(loc_union
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc);
loc_union_assoc
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))) aloc aloc;
loc_union_assoc
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
aloc;
loc_union_assoc_4
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))
#pop-options
private
val insert_modifies_union_loc_weakening:
l1:loc -> l2:loc -> l3:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (modifies l1 h0 h1))
(ensures (modifies (loc_union (loc_union l1 l2) l3) h0 h1))
let insert_modifies_union_loc_weakening l1 l2 l3 h0 h1 =
B.loc_includes_union_l l1 l2 l1;
B.loc_includes_union_l (loc_union l1 l2) l3 (loc_union l1 l2)
private
val insert_snoc_last_helper:
#a:Type -> s:S.seq a{S.length s > 0} -> v:a ->
Lemma (S.index (S.snoc s v) (S.length s - 1) == S.last s)
let insert_snoc_last_helper #a s v = ()
private
val rv_inv_rv_elems_reg:
#a:Type0 -> #rst:Type -> #rg:regional rst a ->
h:HS.mem -> rv:rvector rg ->
i:uint32_t -> j:uint32_t{i <= j && j <= V.size_of rv} ->
Lemma (requires (RV.rv_inv h rv))
(ensures (RV.rv_elems_reg h rv i j))
let rv_inv_rv_elems_reg #a #rst #rg h rv i j = ()
// `insert_` recursively inserts proper hashes to each level `lv` by
// accumulating a compressed hash. For example, if there are three leaf elements
// in the tree, `insert_` will change `hs` as follow:
// (`hij` is a compressed hash from `hi` to `hj`)
//
// BEFORE INSERTION AFTER INSERTION
// lv
// 0 h0 h1 h2 ====> h0 h1 h2 h3
// 1 h01 h01 h23
// 2 h03
//
private
val insert_:
#hsz:hash_size_t ->
#hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hsz)) ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
i:Ghost.erased index_t ->
j:index_t{
Ghost.reveal i <= j &&
U32.v j < pow2 (32 - U32.v lv) - 1 &&
j < uint32_32_max} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
acc:hash #hsz ->
hash_fun:hash_fun_t #hsz #hash_spec ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\
Rgl?.r_inv (hreg hsz) h0 acc /\
HH.disjoint (V.frameOf hs) (B.frameOf acc) /\
mt_safe_elts h0 lv hs (Ghost.reveal i) j))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(loc_union
(RV.rv_loc_elems h0 hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
h0 h1 /\
RV.rv_inv h1 hs /\
Rgl?.r_inv (hreg hsz) h1 acc /\
mt_safe_elts h1 lv hs (Ghost.reveal i) (j + 1ul) /\
// correctness
(mt_safe_elts_spec h0 lv hs (Ghost.reveal i) j;
S.equal (RV.as_seq h1 hs)
(MTH.insert_ #(U32.v hsz) #hash_spec (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq h0 hs) (Rgl?.r_repr (hreg hsz) h0 acc)))))
(decreases (U32.v j))
#push-options "--z3rlimit 800 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
let rec insert_ #hsz #hash_spec lv i j hs acc hash_fun =
let hh0 = HST.get () in
hash_vv_insert_copy lv i j hs acc;
let hh1 = HST.get () in
// Base conditions
V.loc_vector_within_included hs lv (lv + 1ul);
V.loc_vector_within_included hs (lv + 1ul) (V.size_of hs);
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
assert (V.size_of (V.get hh1 hs lv) == j + 1ul - offset_of (Ghost.reveal i));
assert (mt_safe_elts hh1 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul));
if j % 2ul = 1ul
then (insert_index_helper_odd lv (Ghost.reveal i) j;
assert (S.length (S.index (RV.as_seq hh0 hs) (U32.v lv)) > 0);
let lvhs = V.index hs lv in
assert (U32.v (V.size_of lvhs) ==
S.length (S.index (RV.as_seq hh0 hs) (U32.v lv)) + 1);
assert (V.size_of lvhs > 1ul);
/// 3) Update the accumulator `acc`.
hash_vec_rv_inv_r_inv hh1 (V.get hh1 hs lv) (V.size_of (V.get hh1 hs lv) - 2ul);
assert (Rgl?.r_inv (hreg hsz) hh1 acc);
hash_fun (V.index lvhs (V.size_of lvhs - 2ul)) acc acc;
let hh2 = HST.get () in
// 3-1) For the `modifies` postcondition
assert (modifies (B.loc_all_regions_from false (B.frameOf acc)) hh1 hh2);
assert (modifies
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh0 hh2);
// 3-2) Preservation
RV.rv_inv_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.as_seq_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(B.loc_region_only false (B.frameOf acc)) hh1 hh2;
assert (RV.rv_inv hh2 hs);
assert (Rgl?.r_inv (hreg hsz) hh2 acc);
// 3-3) For `mt_safe_elts`
V.get_preserved hs lv
(B.loc_region_only false (B.frameOf acc)) hh1 hh2; // head preserved
mt_safe_elts_preserved
(lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul)
(B.loc_region_only false (B.frameOf acc)) hh1 hh2; // tail preserved
// 3-4) Correctness
insert_snoc_last_helper
(RV.as_seq hh0 (V.get hh0 hs lv))
(Rgl?.r_repr (hreg hsz) hh0 acc);
assert (S.equal (Rgl?.r_repr (hreg hsz) hh2 acc) // `nacc` in `MTH.insert_`
((Ghost.reveal hash_spec)
(S.last (S.index (RV.as_seq hh0 hs) (U32.v lv)))
(Rgl?.r_repr (hreg hsz) hh0 acc)));
/// 4) Recursion
insert_ (lv + 1ul)
(Ghost.hide (Ghost.reveal i / 2ul)) (j / 2ul)
hs acc hash_fun;
let hh3 = HST.get () in
// 4-0) Memory safety brought from the postcondition of the recursion
assert (RV.rv_inv hh3 hs);
assert (Rgl?.r_inv (hreg hsz) hh3 acc);
assert (modifies (loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh2 hh3);
assert (modifies
(loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc)))
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc))))
hh0 hh3);
// 4-1) For `mt_safe_elts`
rv_inv_rv_elems_reg hh2 hs (lv + 1ul) (V.size_of hs);
RV.rv_loc_elems_included hh2 hs (lv + 1ul) (V.size_of hs);
assert (loc_disjoint
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs)));
assert (loc_disjoint
(V.loc_vector_within hs lv (lv + 1ul))
(B.loc_all_regions_from false (B.frameOf acc)));
V.get_preserved hs lv
(loc_union
(loc_union
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh2 hh3;
assert (V.size_of (V.get hh3 hs lv) ==
j + 1ul - offset_of (Ghost.reveal i)); // head preserved
assert (mt_safe_elts hh3 (lv + 1ul) hs
(Ghost.reveal i / 2ul) (j / 2ul + 1ul)); // tail by recursion
mt_safe_elts_constr hh3 lv hs (Ghost.reveal i) (j + 1ul);
assert (mt_safe_elts hh3 lv hs (Ghost.reveal i) (j + 1ul));
// 4-2) Correctness
mt_safe_elts_spec hh2 (lv + 1ul) hs (Ghost.reveal i / 2ul) (j / 2ul);
assert (S.equal (RV.as_seq hh3 hs)
(MTH.insert_ #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv + 1) (U32.v (Ghost.reveal i) / 2) (U32.v j / 2)
(RV.as_seq hh2 hs) (Rgl?.r_repr (hreg hsz) hh2 acc)));
mt_safe_elts_spec hh0 lv hs (Ghost.reveal i) j;
MTH.insert_rec #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc);
assert (S.equal (RV.as_seq hh3 hs)
(MTH.insert_ #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc))))
else (insert_index_helper_even lv j;
// memory safety
assert (mt_safe_elts hh1 (lv + 1ul) hs (Ghost.reveal i / 2ul) ((j + 1ul) / 2ul));
mt_safe_elts_constr hh1 lv hs (Ghost.reveal i) (j + 1ul);
assert (mt_safe_elts hh1 lv hs (Ghost.reveal i) (j + 1ul));
assert (modifies
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
hh0 hh1);
insert_modifies_union_loc_weakening
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc))
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc)))
hh0 hh1;
// correctness
mt_safe_elts_spec hh0 lv hs (Ghost.reveal i) j;
MTH.insert_base #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc);
assert (S.equal (RV.as_seq hh1 hs)
(MTH.insert_ #(U32.v hsz) #(Ghost.reveal hash_spec) (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc))));
/// 5) Proving the postcondition after recursion
let hh4 = HST.get () in
// 5-1) For the `modifies` postcondition.
assert (modifies
(loc_union
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(B.loc_all_regions_from false (B.frameOf acc)))
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf acc))))
hh0 hh4);
insert_modifies_rec_helper
lv hs (B.loc_all_regions_from false (B.frameOf acc)) hh0;
// 5-2) For `mt_safe_elts`
assert (mt_safe_elts hh4 lv hs (Ghost.reveal i) (j + 1ul));
// 5-3) Preservation
assert (RV.rv_inv hh4 hs);
assert (Rgl?.r_inv (hreg hsz) hh4 acc);
// 5-4) Correctness
mt_safe_elts_spec hh0 lv hs (Ghost.reveal i) j;
assert (S.equal (RV.as_seq hh4 hs)
(MTH.insert_ #(U32.v hsz) #hash_spec (U32.v lv) (U32.v (Ghost.reveal i)) (U32.v j)
(RV.as_seq hh0 hs) (Rgl?.r_repr (hreg hsz) hh0 acc))) // QED
#pop-options
private inline_for_extraction
val mt_insert_pre_nst: mtv:merkle_tree -> v:hash #(MT?.hash_size mtv) -> Tot bool
let mt_insert_pre_nst mtv v = mt_not_full_nst mtv && add64_fits (MT?.offset mtv) ((MT?.j mtv) + 1ul)
val mt_insert_pre: #hsz:Ghost.erased hash_size_t -> mt:const_mt_p -> v:hash #hsz -> HST.ST bool
(requires (fun h0 -> mt_safe h0 (CB.cast mt) /\ (MT?.hash_size (B.get h0 (CB.cast mt) 0)) = Ghost.reveal hsz))
(ensures (fun _ _ _ -> True))
let mt_insert_pre #hsz mt v =
let mt = !*(CB.cast mt) in
assert (MT?.hash_size mt == (MT?.hash_size mt));
mt_insert_pre_nst mt v
// `mt_insert` inserts a hash to a Merkle tree. Note that this operation
// manipulates the content in `v`, since it uses `v` as an accumulator during
// insertion.
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
val mt_insert:
hsz:Ghost.erased hash_size_t ->
mt:mt_p -> v:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
let dmt = B.get h0 mt 0 in
mt_safe h0 mt /\
Rgl?.r_inv (hreg hsz) h0 v /\
HH.disjoint (B.frameOf mt) (B.frameOf v) /\
MT?.hash_size dmt = Ghost.reveal hsz /\
mt_insert_pre_nst dmt v))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(mt_loc mt)
(B.loc_all_regions_from false (B.frameOf v)))
h0 h1 /\
mt_safe h1 mt /\
// correctness
MT?.hash_size (B.get h1 mt 0) = Ghost.reveal hsz /\
mt_lift h1 mt == MTH.mt_insert (mt_lift h0 mt) (Rgl?.r_repr (hreg hsz) h0 v)))
#pop-options
#push-options "--z3rlimit 40"
let mt_insert hsz mt v =
let hh0 = HST.get () in
let mtv = !*mt in
let hs = MT?.hs mtv in
let hsz = MT?.hash_size mtv in
insert_ #hsz #(Ghost.reveal (MT?.hash_spec mtv)) 0ul (Ghost.hide (MT?.i mtv)) (MT?.j mtv) hs v (MT?.hash_fun mtv);
let hh1 = HST.get () in
RV.rv_loc_elems_included hh0 (MT?.hs mtv) 0ul (V.size_of hs);
V.loc_vector_within_included hs 0ul (V.size_of hs);
RV.rv_inv_preserved
(MT?.rhs mtv)
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf v)))
hh0 hh1;
RV.as_seq_preserved
(MT?.rhs mtv)
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf v)))
hh0 hh1;
Rgl?.r_sep (hreg hsz) (MT?.mroot mtv)
(loc_union
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
(B.loc_all_regions_from false (B.frameOf v)))
hh0 hh1;
mt *= MT (MT?.hash_size mtv)
(MT?.offset mtv)
(MT?.i mtv)
(MT?.j mtv + 1ul)
(MT?.hs mtv)
false // `rhs` is always deprecated right after an insertion.
(MT?.rhs mtv)
(MT?.mroot mtv)
(MT?.hash_spec mtv)
(MT?.hash_fun mtv);
let hh2 = HST.get () in
RV.rv_inv_preserved
(MT?.hs mtv) (B.loc_buffer mt) hh1 hh2;
RV.rv_inv_preserved
(MT?.rhs mtv) (B.loc_buffer mt) hh1 hh2;
RV.as_seq_preserved
(MT?.hs mtv) (B.loc_buffer mt) hh1 hh2;
RV.as_seq_preserved
(MT?.rhs mtv) (B.loc_buffer mt) hh1 hh2;
Rgl?.r_sep (hreg hsz) (MT?.mroot mtv) (B.loc_buffer mt) hh1 hh2;
mt_safe_elts_preserved
0ul (MT?.hs mtv) (MT?.i mtv) (MT?.j mtv + 1ul) (B.loc_buffer mt)
hh1 hh2
#pop-options
// `mt_create` initiates a Merkle tree with a given initial hash `init`.
// A valid Merkle tree should contain at least one element.
val mt_create_custom:
hsz:hash_size_t ->
hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hsz)) ->
r:HST.erid -> init:hash #hsz -> hash_fun:hash_fun_t #hsz #hash_spec -> HST.ST mt_p
(requires (fun h0 ->
Rgl?.r_inv (hreg hsz) h0 init /\
HH.disjoint r (B.frameOf init)))
(ensures (fun h0 mt h1 ->
// memory safety
modifies (loc_union (mt_loc mt) (B.loc_all_regions_from false (B.frameOf init))) h0 h1 /\
mt_safe h1 mt /\
// correctness
MT?.hash_size (B.get h1 mt 0) = hsz /\
mt_lift h1 mt == MTH.mt_create (U32.v hsz) (Ghost.reveal hash_spec) (Rgl?.r_repr (hreg hsz) h0 init)))
#push-options "--z3rlimit 40"
let mt_create_custom hsz hash_spec r init hash_fun =
let hh0 = HST.get () in
let mt = create_empty_mt hsz hash_spec hash_fun r in
mt_insert hsz mt init;
let hh2 = HST.get () in
mt
#pop-options
/// Construction and Destruction of paths
// Since each element pointer in `path` is from the target Merkle tree and
// each element has different location in `MT?.hs` (thus different region id),
// we cannot use the regionality property for `path`s. Hence here we manually
// define invariants and representation.
noeq type path =
| Path: hash_size:hash_size_t ->
hashes:V.vector (hash #hash_size) ->
path
type path_p = B.pointer path
type const_path_p = const_pointer path
private
let phashes (h:HS.mem) (p:path_p)
: GTot (V.vector (hash #(Path?.hash_size (B.get h p 0))))
= Path?.hashes (B.get h p 0)
// Memory safety of a path as an invariant
inline_for_extraction noextract
val path_safe:
h:HS.mem -> mtr:HH.rid -> p:path_p -> GTot Type0
let path_safe h mtr p =
B.live h p /\ B.freeable p /\
V.live h (phashes h p) /\ V.freeable (phashes h p) /\
HST.is_eternal_region (V.frameOf (phashes h p)) /\
(let hsz = Path?.hash_size (B.get h p 0) in
V.forall_all h (phashes h p)
(fun hp -> Rgl?.r_inv (hreg hsz) h hp /\
HH.includes mtr (Rgl?.region_of (hreg hsz) hp)) /\
HH.extends (V.frameOf (phashes h p)) (B.frameOf p) /\
HH.disjoint mtr (B.frameOf p))
val path_loc: path_p -> GTot loc
let path_loc p = B.loc_all_regions_from false (B.frameOf p)
val lift_path_:
#hsz:hash_size_t ->
h:HS.mem ->
hs:S.seq (hash #hsz) ->
i:nat ->
j:nat{
i <= j /\ j <= S.length hs /\
V.forall_seq hs i j (fun hp -> Rgl?.r_inv (hreg hsz) h hp)} ->
GTot (hp:MTH.path #(U32.v hsz) {S.length hp = j - i}) (decreases j)
let rec lift_path_ #hsz h hs i j =
if i = j then S.empty
else (S.snoc (lift_path_ h hs i (j - 1))
(Rgl?.r_repr (hreg hsz) h (S.index hs (j - 1))))
// Representation of a path
val lift_path:
#hsz:hash_size_t ->
h:HS.mem -> mtr:HH.rid -> p:path_p {path_safe h mtr p /\ (Path?.hash_size (B.get h p 0)) = hsz} ->
GTot (hp:MTH.path #(U32.v hsz) {S.length hp = U32.v (V.size_of (phashes h p))})
let lift_path #hsz h mtr p =
lift_path_ h (V.as_seq h (phashes h p))
0 (S.length (V.as_seq h (phashes h p)))
val lift_path_index_:
#hsz:hash_size_t ->
h:HS.mem ->
hs:S.seq (hash #hsz) ->
i:nat -> j:nat{i <= j && j <= S.length hs} ->
k:nat{i <= k && k < j} ->
Lemma (requires (V.forall_seq hs i j (fun hp -> Rgl?.r_inv (hreg hsz) h hp)))
(ensures (Rgl?.r_repr (hreg hsz) h (S.index hs k) ==
S.index (lift_path_ h hs i j) (k - i)))
(decreases j)
[SMTPat (S.index (lift_path_ h hs i j) (k - i))]
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
let rec lift_path_index_ #hsz h hs i j k =
if i = j then ()
else if k = j - 1 then ()
else lift_path_index_ #hsz h hs i (j - 1) k
#pop-options
val lift_path_index:
h:HS.mem -> mtr:HH.rid ->
p:path_p -> i:uint32_t ->
Lemma (requires (path_safe h mtr p /\
i < V.size_of (phashes h p)))
(ensures (let hsz = Path?.hash_size (B.get h p 0) in
Rgl?.r_repr (hreg hsz) h (V.get h (phashes h p) i) ==
S.index (lift_path #(hsz) h mtr p) (U32.v i)))
let lift_path_index h mtr p i =
lift_path_index_ h (V.as_seq h (phashes h p))
0 (S.length (V.as_seq h (phashes h p))) (U32.v i)
val lift_path_eq:
#hsz:hash_size_t ->
h:HS.mem ->
hs1:S.seq (hash #hsz) -> hs2:S.seq (hash #hsz) ->
i:nat -> j:nat ->
Lemma (requires (i <= j /\ j <= S.length hs1 /\ j <= S.length hs2 /\
S.equal (S.slice hs1 i j) (S.slice hs2 i j) /\
V.forall_seq hs1 i j (fun hp -> Rgl?.r_inv (hreg hsz) h hp) /\
V.forall_seq hs2 i j (fun hp -> Rgl?.r_inv (hreg hsz) h hp)))
(ensures (S.equal (lift_path_ h hs1 i j) (lift_path_ h hs2 i j)))
let lift_path_eq #hsz h hs1 hs2 i j =
assert (forall (k:nat{i <= k && k < j}).
S.index (lift_path_ h hs1 i j) (k - i) ==
Rgl?.r_repr (hreg hsz) h (S.index hs1 k));
assert (forall (k:nat{i <= k && k < j}).
S.index (lift_path_ h hs2 i j) (k - i) ==
Rgl?.r_repr (hreg hsz) h (S.index hs2 k));
assert (forall (k:nat{k < j - i}).
S.index (lift_path_ h hs1 i j) k ==
Rgl?.r_repr (hreg hsz) h (S.index hs1 (k + i)));
assert (forall (k:nat{k < j - i}).
S.index (lift_path_ h hs2 i j) k ==
Rgl?.r_repr (hreg hsz) h (S.index hs2 (k + i)));
assert (forall (k:nat{k < j - i}).
S.index (S.slice hs1 i j) k == S.index (S.slice hs2 i j) k);
assert (forall (k:nat{i <= k && k < j}).
S.index (S.slice hs1 i j) (k - i) == S.index (S.slice hs2 i j) (k - i))
private
val path_safe_preserved_:
#hsz:hash_size_t ->
mtr:HH.rid -> hs:S.seq (hash #hsz) ->
i:nat -> j:nat{i <= j && j <= S.length hs} ->
dl:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma
(requires (V.forall_seq hs i j
(fun hp ->
Rgl?.r_inv (hreg hsz) h0 hp /\
HH.includes mtr (Rgl?.region_of (hreg hsz) hp)) /\
loc_disjoint dl (B.loc_all_regions_from false mtr) /\
modifies dl h0 h1))
(ensures (V.forall_seq hs i j
(fun hp ->
Rgl?.r_inv (hreg hsz) h1 hp /\
HH.includes mtr (Rgl?.region_of (hreg hsz) hp))))
(decreases j)
let rec path_safe_preserved_ #hsz mtr hs i j dl h0 h1 =
if i = j then ()
else (assert (loc_includes
(B.loc_all_regions_from false mtr)
(B.loc_all_regions_from false
(Rgl?.region_of (hreg hsz) (S.index hs (j - 1)))));
Rgl?.r_sep (hreg hsz) (S.index hs (j - 1)) dl h0 h1;
path_safe_preserved_ mtr hs i (j - 1) dl h0 h1)
val path_safe_preserved:
mtr:HH.rid -> p:path_p ->
dl:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (path_safe h0 mtr p /\
loc_disjoint dl (path_loc p) /\
loc_disjoint dl (B.loc_all_regions_from false mtr) /\
modifies dl h0 h1))
(ensures (path_safe h1 mtr p))
let path_safe_preserved mtr p dl h0 h1 =
assert (loc_includes (path_loc p) (B.loc_buffer p));
assert (loc_includes (path_loc p) (V.loc_vector (phashes h0 p)));
path_safe_preserved_
mtr (V.as_seq h0 (phashes h0 p))
0 (S.length (V.as_seq h0 (phashes h0 p))) dl h0 h1
val path_safe_init_preserved:
mtr:HH.rid -> p:path_p ->
dl:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (path_safe h0 mtr p /\
V.size_of (phashes h0 p) = 0ul /\
B.loc_disjoint dl (path_loc p) /\
modifies dl h0 h1))
(ensures (path_safe h1 mtr p /\
V.size_of (phashes h1 p) = 0ul))
let path_safe_init_preserved mtr p dl h0 h1 =
assert (loc_includes (path_loc p) (B.loc_buffer p));
assert (loc_includes (path_loc p) (V.loc_vector (phashes h0 p)))
val path_preserved_:
#hsz:hash_size_t ->
mtr:HH.rid ->
hs:S.seq (hash #hsz) ->
i:nat -> j:nat{i <= j && j <= S.length hs} ->
dl:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (V.forall_seq hs i j
(fun hp -> Rgl?.r_inv (hreg hsz) h0 hp /\
HH.includes mtr (Rgl?.region_of (hreg hsz) hp)) /\
loc_disjoint dl (B.loc_all_regions_from false mtr) /\
modifies dl h0 h1))
(ensures (path_safe_preserved_ mtr hs i j dl h0 h1;
S.equal (lift_path_ h0 hs i j)
(lift_path_ h1 hs i j)))
(decreases j)
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
let rec path_preserved_ #hsz mtr hs i j dl h0 h1 =
if i = j then ()
else (path_safe_preserved_ mtr hs i (j - 1) dl h0 h1;
path_preserved_ mtr hs i (j - 1) dl h0 h1;
assert (loc_includes
(B.loc_all_regions_from false mtr)
(B.loc_all_regions_from false
(Rgl?.region_of (hreg hsz) (S.index hs (j - 1)))));
Rgl?.r_sep (hreg hsz) (S.index hs (j - 1)) dl h0 h1)
#pop-options
val path_preserved:
mtr:HH.rid -> p:path_p ->
dl:loc -> h0:HS.mem -> h1:HS.mem ->
Lemma (requires (path_safe h0 mtr p /\
loc_disjoint dl (path_loc p) /\
loc_disjoint dl (B.loc_all_regions_from false mtr) /\
modifies dl h0 h1))
(ensures (path_safe_preserved mtr p dl h0 h1;
let hsz0 = (Path?.hash_size (B.get h0 p 0)) in
let hsz1 = (Path?.hash_size (B.get h1 p 0)) in
let b:MTH.path = lift_path #hsz0 h0 mtr p in
let a:MTH.path = lift_path #hsz1 h1 mtr p in
hsz0 = hsz1 /\ S.equal b a))
let path_preserved mtr p dl h0 h1 =
assert (loc_includes (path_loc p) (B.loc_buffer p));
assert (loc_includes (path_loc p) (V.loc_vector (phashes h0 p)));
path_preserved_ mtr (V.as_seq h0 (phashes h0 p))
0 (S.length (V.as_seq h0 (phashes h0 p)))
dl h0 h1
val init_path:
hsz:hash_size_t ->
mtr:HH.rid -> r:HST.erid ->
HST.ST path_p
(requires (fun h0 -> HH.disjoint mtr r))
(ensures (fun h0 p h1 ->
// memory safety
path_safe h1 mtr p /\
// correctness
Path?.hash_size (B.get h1 p 0) = hsz /\
S.equal (lift_path #hsz h1 mtr p) S.empty))
let init_path hsz mtr r =
let nrid = HST.new_region r in
(B.malloc r (Path hsz (rg_alloc (hvreg hsz) nrid)) 1ul)
val clear_path:
mtr:HH.rid -> p:path_p ->
HST.ST unit
(requires (fun h0 -> path_safe h0 mtr p))
(ensures (fun h0 _ h1 ->
// memory safety
path_safe h1 mtr p /\
// correctness
V.size_of (phashes h1 p) = 0ul /\
S.equal (lift_path #(Path?.hash_size (B.get h1 p 0)) h1 mtr p) S.empty))
let clear_path mtr p =
let pv = !*p in
p *= Path (Path?.hash_size pv) (V.clear (Path?.hashes pv))
val free_path:
p:path_p ->
HST.ST unit
(requires (fun h0 ->
B.live h0 p /\ B.freeable p /\
V.live h0 (phashes h0 p) /\ V.freeable (phashes h0 p) /\
HH.extends (V.frameOf (phashes h0 p)) (B.frameOf p)))
(ensures (fun h0 _ h1 ->
modifies (path_loc p) h0 h1))
let free_path p =
let pv = !*p in
V.free (Path?.hashes pv);
B.free p
/// Getting the Merkle root and path
// Construct "rightmost hashes" for a given (incomplete) Merkle tree.
// This function calculates the Merkle root as well, which is the final
// accumulator value.
private
val construct_rhs:
#hsz:hash_size_t ->
#hash_spec:Ghost.erased (MTS.hash_fun_t #(U32.v hsz)) ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
rhs:hash_vec #hsz {V.size_of rhs = merkle_tree_size_lg} ->
i:index_t ->
j:index_t{i <= j && (U32.v j) < pow2 (32 - U32.v lv)} ->
acc:hash #hsz ->
actd:bool ->
hash_fun:hash_fun_t #hsz #(Ghost.reveal hash_spec) ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\ RV.rv_inv h0 rhs /\
HH.disjoint (V.frameOf hs) (V.frameOf rhs) /\
Rgl?.r_inv (hreg hsz) h0 acc /\
HH.disjoint (B.frameOf acc) (V.frameOf hs) /\
HH.disjoint (B.frameOf acc) (V.frameOf rhs) /\
mt_safe_elts #hsz h0 lv hs i j))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(RV.loc_rvector rhs)
(B.loc_all_regions_from false (B.frameOf acc)))
h0 h1 /\
RV.rv_inv h1 rhs /\
Rgl?.r_inv (hreg hsz) h1 acc /\
// correctness
(mt_safe_elts_spec #hsz h0 lv hs i j;
MTH.construct_rhs #(U32.v hsz) #(Ghost.reveal hash_spec)
(U32.v lv)
(Rgl?.r_repr (hvvreg hsz) h0 hs)
(Rgl?.r_repr (hvreg hsz) h0 rhs)
(U32.v i) (U32.v j)
(Rgl?.r_repr (hreg hsz) h0 acc) actd ==
(Rgl?.r_repr (hvreg hsz) h1 rhs, Rgl?.r_repr (hreg hsz) h1 acc)
)))
(decreases (U32.v j))
#push-options "--z3rlimit 250 --initial_fuel 1 --max_fuel 1 --initial_ifuel 1 --max_ifuel 1"
let rec construct_rhs #hsz #hash_spec lv hs rhs i j acc actd hash_fun =
let hh0 = HST.get () in
if j = 0ul then begin
assert (RV.rv_inv hh0 hs);
assert (mt_safe_elts #hsz hh0 lv hs i j);
mt_safe_elts_spec #hsz hh0 lv hs 0ul 0ul;
assert (MTH.hs_wf_elts #(U32.v hsz)
(U32.v lv) (RV.as_seq hh0 hs)
(U32.v i) (U32.v j));
let hh1 = HST.get() in
assert (MTH.construct_rhs #(U32.v hsz) #(Ghost.reveal hash_spec)
(U32.v lv)
(Rgl?.r_repr (hvvreg hsz) hh0 hs)
(Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j)
(Rgl?.r_repr (hreg hsz) hh0 acc) actd ==
(Rgl?.r_repr (hvreg hsz) hh1 rhs, Rgl?.r_repr (hreg hsz) hh1 acc))
end
else
let ofs = offset_of i in
begin
(if j % 2ul = 0ul
then begin
Math.Lemmas.pow2_double_mult (32 - U32.v lv - 1);
mt_safe_elts_rec #hsz hh0 lv hs i j;
construct_rhs #hsz #hash_spec (lv + 1ul) hs rhs (i / 2ul) (j / 2ul) acc actd hash_fun;
let hh1 = HST.get () in
// correctness
mt_safe_elts_spec #hsz hh0 lv hs i j;
MTH.construct_rhs_even #(U32.v hsz) #hash_spec
(U32.v lv) (Rgl?.r_repr (hvvreg hsz) hh0 hs) (Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j) (Rgl?.r_repr (hreg hsz) hh0 acc) actd;
assert (MTH.construct_rhs #(U32.v hsz) #hash_spec
(U32.v lv)
(Rgl?.r_repr (hvvreg hsz) hh0 hs)
(Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j)
(Rgl?.r_repr (hreg hsz) hh0 acc)
actd ==
(Rgl?.r_repr (hvreg hsz) hh1 rhs, Rgl?.r_repr (hreg hsz) hh1 acc))
end
else begin
if actd
then begin
RV.assign_copy (hcpy hsz) rhs lv acc;
let hh1 = HST.get () in
// memory safety
Rgl?.r_sep (hreg hsz) acc
(B.loc_all_regions_from false (V.frameOf rhs)) hh0 hh1;
RV.rv_inv_preserved
hs (B.loc_all_regions_from false (V.frameOf rhs))
hh0 hh1;
RV.as_seq_preserved
hs (B.loc_all_regions_from false (V.frameOf rhs))
hh0 hh1;
RV.rv_inv_preserved
(V.get hh0 hs lv) (B.loc_all_regions_from false (V.frameOf rhs))
hh0 hh1;
V.loc_vector_within_included hs lv (V.size_of hs);
mt_safe_elts_preserved lv hs i j
(B.loc_all_regions_from false (V.frameOf rhs))
hh0 hh1;
mt_safe_elts_head hh1 lv hs i j;
hash_vv_rv_inv_r_inv hh1 hs lv (j - 1ul - ofs);
// correctness
assert (S.equal (RV.as_seq hh1 rhs)
(S.upd (RV.as_seq hh0 rhs) (U32.v lv)
(Rgl?.r_repr (hreg hsz) hh0 acc)));
hash_fun (V.index (V.index hs lv) (j - 1ul - ofs)) acc acc;
let hh2 = HST.get () in
// memory safety
mt_safe_elts_preserved lv hs i j
(B.loc_all_regions_from false (B.frameOf acc)) hh1 hh2;
RV.rv_inv_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.rv_inv_preserved
rhs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.as_seq_preserved
hs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
RV.as_seq_preserved
rhs (B.loc_region_only false (B.frameOf acc)) hh1 hh2;
// correctness
hash_vv_as_seq_get_index hh0 hs lv (j - 1ul - ofs);
assert (Rgl?.r_repr (hreg hsz) hh2 acc ==
(Ghost.reveal hash_spec) (S.index (S.index (RV.as_seq hh0 hs) (U32.v lv))
(U32.v j - 1 - U32.v ofs))
(Rgl?.r_repr (hreg hsz) hh0 acc))
end
else begin
mt_safe_elts_head hh0 lv hs i j;
hash_vv_rv_inv_r_inv hh0 hs lv (j - 1ul - ofs);
hash_vv_rv_inv_disjoint hh0 hs lv (j - 1ul - ofs) (B.frameOf acc);
Cpy?.copy (hcpy hsz) hsz (V.index (V.index hs lv) (j - 1ul - ofs)) acc;
let hh1 = HST.get () in
// memory safety
V.loc_vector_within_included hs lv (V.size_of hs);
mt_safe_elts_preserved lv hs i j
(B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
RV.rv_inv_preserved
hs (B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
RV.rv_inv_preserved
rhs (B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
RV.as_seq_preserved
hs (B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
RV.as_seq_preserved
rhs (B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
// correctness
hash_vv_as_seq_get_index hh0 hs lv (j - 1ul - ofs);
assert (Rgl?.r_repr (hreg hsz) hh1 acc ==
S.index (S.index (RV.as_seq hh0 hs) (U32.v lv))
(U32.v j - 1 - U32.v ofs))
end;
let hh3 = HST.get () in
assert (S.equal (RV.as_seq hh3 hs) (RV.as_seq hh0 hs));
assert (S.equal (RV.as_seq hh3 rhs)
(if actd
then S.upd (RV.as_seq hh0 rhs) (U32.v lv)
(Rgl?.r_repr (hreg hsz) hh0 acc)
else RV.as_seq hh0 rhs));
assert (Rgl?.r_repr (hreg hsz) hh3 acc ==
(if actd
then (Ghost.reveal hash_spec) (S.index (S.index (RV.as_seq hh0 hs) (U32.v lv))
(U32.v j - 1 - U32.v ofs))
(Rgl?.r_repr (hreg hsz) hh0 acc)
else S.index (S.index (RV.as_seq hh0 hs) (U32.v lv))
(U32.v j - 1 - U32.v ofs)));
mt_safe_elts_rec hh3 lv hs i j;
construct_rhs #hsz #hash_spec (lv + 1ul) hs rhs (i / 2ul) (j / 2ul) acc true hash_fun;
let hh4 = HST.get () in
mt_safe_elts_spec hh3 (lv + 1ul) hs (i / 2ul) (j / 2ul);
assert (MTH.construct_rhs #(U32.v hsz) #hash_spec
(U32.v lv + 1)
(Rgl?.r_repr (hvvreg hsz) hh3 hs)
(Rgl?.r_repr (hvreg hsz) hh3 rhs)
(U32.v i / 2) (U32.v j / 2)
(Rgl?.r_repr (hreg hsz) hh3 acc) true ==
(Rgl?.r_repr (hvreg hsz) hh4 rhs, Rgl?.r_repr (hreg hsz) hh4 acc));
mt_safe_elts_spec hh0 lv hs i j;
MTH.construct_rhs_odd #(U32.v hsz) #hash_spec
(U32.v lv) (Rgl?.r_repr (hvvreg hsz) hh0 hs) (Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j) (Rgl?.r_repr (hreg hsz) hh0 acc) actd;
assert (MTH.construct_rhs #(U32.v hsz) #hash_spec
(U32.v lv)
(Rgl?.r_repr (hvvreg hsz) hh0 hs)
(Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j)
(Rgl?.r_repr (hreg hsz) hh0 acc) actd ==
(Rgl?.r_repr (hvreg hsz) hh4 rhs, Rgl?.r_repr (hreg hsz) hh4 acc))
end)
end
#pop-options
private inline_for_extraction
val mt_get_root_pre_nst: mtv:merkle_tree -> rt:hash #(MT?.hash_size mtv) -> Tot bool
let mt_get_root_pre_nst mtv rt = true
val mt_get_root_pre:
#hsz:Ghost.erased hash_size_t ->
mt:const_mt_p ->
rt:hash #hsz ->
HST.ST bool
(requires (fun h0 ->
let mt = CB.cast mt in
MT?.hash_size (B.get h0 mt 0) = Ghost.reveal hsz /\
mt_safe h0 mt /\ Rgl?.r_inv (hreg hsz) h0 rt /\
HH.disjoint (B.frameOf mt) (B.frameOf rt)))
(ensures (fun _ _ _ -> True))
let mt_get_root_pre #hsz mt rt =
let mt = CB.cast mt in
let mt = !*mt in
let hsz = MT?.hash_size mt in
assert (MT?.hash_size mt = hsz);
mt_get_root_pre_nst mt rt
// `mt_get_root` returns the Merkle root. If it's already calculated with
// up-to-date hashes, the root is returned immediately. Otherwise it calls
// `construct_rhs` to build rightmost hashes and to calculate the Merkle root
// as well.
val mt_get_root:
#hsz:Ghost.erased hash_size_t ->
mt:const_mt_p ->
rt:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
let mt = CB.cast mt in
let dmt = B.get h0 mt 0 in
MT?.hash_size dmt = (Ghost.reveal hsz) /\
mt_get_root_pre_nst dmt rt /\
mt_safe h0 mt /\ Rgl?.r_inv (hreg hsz) h0 rt /\
HH.disjoint (B.frameOf mt) (B.frameOf rt)))
(ensures (fun h0 _ h1 ->
let mt = CB.cast mt in
// memory safety
modifies (loc_union
(mt_loc mt)
(B.loc_all_regions_from false (B.frameOf rt)))
h0 h1 /\
mt_safe h1 mt /\
(let mtv0 = B.get h0 mt 0 in
let mtv1 = B.get h1 mt 0 in
MT?.hash_size mtv0 = (Ghost.reveal hsz) /\
MT?.hash_size mtv1 = (Ghost.reveal hsz) /\
MT?.i mtv1 = MT?.i mtv0 /\ MT?.j mtv1 = MT?.j mtv0 /\
MT?.hs mtv1 == MT?.hs mtv0 /\ MT?.rhs mtv1 == MT?.rhs mtv0 /\
MT?.offset mtv1 == MT?.offset mtv0 /\
MT?.rhs_ok mtv1 = true /\
Rgl?.r_inv (hreg hsz) h1 rt /\
// correctness
MTH.mt_get_root (mt_lift h0 mt) (Rgl?.r_repr (hreg hsz) h0 rt) ==
(mt_lift h1 mt, Rgl?.r_repr (hreg hsz) h1 rt))))
#push-options "--z3rlimit 150 --initial_fuel 1 --max_fuel 1"
let mt_get_root #hsz mt rt =
let mt = CB.cast mt in
let hh0 = HST.get () in
let mtv = !*mt in
let prefix = MT?.offset mtv in
let i = MT?.i mtv in
let j = MT?.j mtv in
let hs = MT?.hs mtv in
let rhs = MT?.rhs mtv in
let mroot = MT?.mroot mtv in
let hash_size = MT?.hash_size mtv in
let hash_spec = MT?.hash_spec mtv in
let hash_fun = MT?.hash_fun mtv in
if MT?.rhs_ok mtv
then begin
Cpy?.copy (hcpy hash_size) hash_size mroot rt;
let hh1 = HST.get () in
mt_safe_preserved mt
(B.loc_all_regions_from false (Rgl?.region_of (hreg hsz) rt)) hh0 hh1;
mt_preserved mt
(B.loc_all_regions_from false (Rgl?.region_of (hreg hsz) rt)) hh0 hh1;
MTH.mt_get_root_rhs_ok_true
(mt_lift hh0 mt) (Rgl?.r_repr (hreg hsz) hh0 rt);
assert (MTH.mt_get_root (mt_lift hh0 mt) (Rgl?.r_repr (hreg hsz) hh0 rt) ==
(mt_lift hh1 mt, Rgl?.r_repr (hreg hsz) hh1 rt))
end
else begin
construct_rhs #hash_size #hash_spec 0ul hs rhs i j rt false hash_fun;
let hh1 = HST.get () in
// memory safety
assert (RV.rv_inv hh1 rhs);
assert (Rgl?.r_inv (hreg hsz) hh1 rt);
assert (B.live hh1 mt);
RV.rv_inv_preserved
hs (loc_union
(RV.loc_rvector rhs)
(B.loc_all_regions_from false (B.frameOf rt)))
hh0 hh1;
RV.as_seq_preserved
hs (loc_union
(RV.loc_rvector rhs)
(B.loc_all_regions_from false (B.frameOf rt)))
hh0 hh1;
V.loc_vector_within_included hs 0ul (V.size_of hs);
mt_safe_elts_preserved 0ul hs i j
(loc_union
(RV.loc_rvector rhs)
(B.loc_all_regions_from false (B.frameOf rt)))
hh0 hh1;
// correctness
mt_safe_elts_spec hh0 0ul hs i j;
assert (MTH.construct_rhs #(U32.v hash_size) #hash_spec 0
(Rgl?.r_repr (hvvreg hsz) hh0 hs)
(Rgl?.r_repr (hvreg hsz) hh0 rhs)
(U32.v i) (U32.v j)
(Rgl?.r_repr (hreg hsz) hh0 rt) false ==
(Rgl?.r_repr (hvreg hsz) hh1 rhs, Rgl?.r_repr (hreg hsz) hh1 rt));
Cpy?.copy (hcpy hash_size) hash_size rt mroot;
let hh2 = HST.get () in
// memory safety
RV.rv_inv_preserved
hs (B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
RV.rv_inv_preserved
rhs (B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
RV.as_seq_preserved
hs (B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
RV.as_seq_preserved
rhs (B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
B.modifies_buffer_elim
rt (B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
mt_safe_elts_preserved 0ul hs i j
(B.loc_all_regions_from false (B.frameOf mroot))
hh1 hh2;
// correctness
assert (Rgl?.r_repr (hreg hsz) hh2 mroot == Rgl?.r_repr (hreg hsz) hh1 rt);
mt *= MT hash_size prefix i j hs true rhs mroot hash_spec hash_fun;
let hh3 = HST.get () in
// memory safety
Rgl?.r_sep (hreg hsz) rt (B.loc_buffer mt) hh2 hh3;
RV.rv_inv_preserved hs (B.loc_buffer mt) hh2 hh3;
RV.rv_inv_preserved rhs (B.loc_buffer mt) hh2 hh3;
RV.as_seq_preserved hs (B.loc_buffer mt) hh2 hh3;
RV.as_seq_preserved rhs (B.loc_buffer mt) hh2 hh3;
Rgl?.r_sep (hreg hsz) mroot (B.loc_buffer mt) hh2 hh3;
mt_safe_elts_preserved 0ul hs i j
(B.loc_buffer mt) hh2 hh3;
assert (mt_safe hh3 mt);
// correctness
MTH.mt_get_root_rhs_ok_false
(mt_lift hh0 mt) (Rgl?.r_repr (hreg hsz) hh0 rt);
assert (MTH.mt_get_root (mt_lift hh0 mt) (Rgl?.r_repr (hreg hsz) hh0 rt) ==
(MTH.MT #(U32.v hash_size)
(U32.v i) (U32.v j)
(RV.as_seq hh0 hs)
true
(RV.as_seq hh1 rhs)
(Rgl?.r_repr (hreg hsz) hh1 rt)
hash_spec,
Rgl?.r_repr (hreg hsz) hh1 rt));
assert (MTH.mt_get_root (mt_lift hh0 mt) (Rgl?.r_repr (hreg hsz) hh0 rt) ==
(mt_lift hh3 mt, Rgl?.r_repr (hreg hsz) hh3 rt))
end
#pop-options
inline_for_extraction
val mt_path_insert:
#hsz:hash_size_t ->
mtr:HH.rid -> p:path_p -> hp:hash #hsz ->
HST.ST unit
(requires (fun h0 ->
path_safe h0 mtr p /\
not (V.is_full (phashes h0 p)) /\
Rgl?.r_inv (hreg hsz) h0 hp /\
HH.disjoint mtr (B.frameOf p) /\
HH.includes mtr (B.frameOf hp) /\
Path?.hash_size (B.get h0 p 0) = hsz))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (path_loc p) h0 h1 /\
path_safe h1 mtr p /\
// correctness
(let hsz0 = Path?.hash_size (B.get h0 p 0) in
let hsz1 = Path?.hash_size (B.get h1 p 0) in
(let before:(S.seq (MTH.hash #(U32.v hsz0))) = lift_path h0 mtr p in
let after:(S.seq (MTH.hash #(U32.v hsz1))) = lift_path h1 mtr p in
V.size_of (phashes h1 p) = V.size_of (phashes h0 p) + 1ul /\
hsz = hsz0 /\ hsz = hsz1 /\
(let hspec:(S.seq (MTH.hash #(U32.v hsz))) = (MTH.path_insert #(U32.v hsz) before (Rgl?.r_repr (hreg hsz) h0 hp)) in
S.equal hspec after)))))
#push-options "--z3rlimit 20 --initial_fuel 1 --max_fuel 1"
let mt_path_insert #hsz mtr p hp =
let pth = !*p in
let pv = Path?.hashes pth in
let hh0 = HST.get () in
let ipv = V.insert pv hp in
let hh1 = HST.get () in
path_safe_preserved_
mtr (V.as_seq hh0 pv) 0 (S.length (V.as_seq hh0 pv))
(B.loc_all_regions_from false (V.frameOf ipv)) hh0 hh1;
path_preserved_
mtr (V.as_seq hh0 pv) 0 (S.length (V.as_seq hh0 pv))
(B.loc_all_regions_from false (V.frameOf ipv)) hh0 hh1;
Rgl?.r_sep (hreg hsz) hp
(B.loc_all_regions_from false (V.frameOf ipv)) hh0 hh1;
p *= Path hsz ipv;
let hh2 = HST.get () in
path_safe_preserved_
mtr (V.as_seq hh1 ipv) 0 (S.length (V.as_seq hh1 ipv))
(B.loc_region_only false (B.frameOf p)) hh1 hh2;
path_preserved_
mtr (V.as_seq hh1 ipv) 0 (S.length (V.as_seq hh1 ipv))
(B.loc_region_only false (B.frameOf p)) hh1 hh2;
Rgl?.r_sep (hreg hsz) hp
(B.loc_region_only false (B.frameOf p)) hh1 hh2;
assert (S.equal (lift_path hh2 mtr p)
(lift_path_ hh1 (S.snoc (V.as_seq hh0 pv) hp)
0 (S.length (V.as_seq hh1 ipv))));
lift_path_eq hh1 (S.snoc (V.as_seq hh0 pv) hp) (V.as_seq hh0 pv)
0 (S.length (V.as_seq hh0 pv))
#pop-options
// For given a target index `k`, the number of elements (in the tree) `j`,
// and a boolean flag (to check the existence of rightmost hashes), we can
// calculate a required Merkle path length.
//
// `mt_path_length` is a postcondition of `mt_get_path`, and a precondition
// of `mt_verify`. For detailed description, see `mt_get_path` and `mt_verify`.
private
val mt_path_length_step:
k:index_t ->
j:index_t{k <= j} ->
actd:bool ->
Tot (sl:uint32_t{U32.v sl = MTH.mt_path_length_step (U32.v k) (U32.v j) actd})
let mt_path_length_step k j actd =
if j = 0ul then 0ul
else (if k % 2ul = 0ul
then (if j = k || (j = k + 1ul && not actd) then 0ul else 1ul)
else 1ul)
private inline_for_extraction
val mt_path_length:
lv:uint32_t{lv <= merkle_tree_size_lg} ->
k:index_t ->
j:index_t{k <= j && U32.v j < pow2 (32 - U32.v lv)} ->
actd:bool ->
Tot (l:uint32_t{
U32.v l = MTH.mt_path_length (U32.v k) (U32.v j) actd &&
l <= 32ul - lv})
(decreases (U32.v j))
#push-options "--z3rlimit 10 --initial_fuel 1 --max_fuel 1"
let rec mt_path_length lv k j actd =
if j = 0ul then 0ul
else (let nactd = actd || (j % 2ul = 1ul) in
mt_path_length_step k j actd +
mt_path_length (lv + 1ul) (k / 2ul) (j / 2ul) nactd)
#pop-options
val mt_get_path_length:
mtr:HH.rid ->
p:const_path_p ->
HST.ST uint32_t
(requires (fun h0 -> path_safe h0 mtr (CB.cast p)))
(ensures (fun h0 _ h1 -> True))
let mt_get_path_length mtr p =
let pd = !*(CB.cast p) in
V.size_of (Path?.hashes pd)
private inline_for_extraction
val mt_make_path_step:
#hsz:hash_size_t ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
mtr:HH.rid ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
rhs:hash_vec #hsz {V.size_of rhs = merkle_tree_size_lg} ->
i:index_t ->
j:index_t{j <> 0ul /\ i <= j /\ U32.v j < pow2 (32 - U32.v lv)} ->
k:index_t{i <= k && k <= j} ->
p:path_p ->
actd:bool ->
HST.ST unit
(requires (fun h0 ->
HH.includes mtr (V.frameOf hs) /\
HH.includes mtr (V.frameOf rhs) /\
RV.rv_inv h0 hs /\ RV.rv_inv h0 rhs /\
mt_safe_elts h0 lv hs i j /\
path_safe h0 mtr p /\
Path?.hash_size (B.get h0 p 0) = hsz /\
V.size_of (phashes h0 p) <= lv + 1ul))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (path_loc p) h0 h1 /\
path_safe h1 mtr p /\
V.size_of (phashes h1 p) == V.size_of (phashes h0 p) + mt_path_length_step k j actd /\
V.size_of (phashes h1 p) <= lv + 2ul /\
// correctness
(mt_safe_elts_spec h0 lv hs i j;
(let hsz0 = Path?.hash_size (B.get h0 p 0) in
let hsz1 = Path?.hash_size (B.get h1 p 0) in
let before:(S.seq (MTH.hash #(U32.v hsz0))) = lift_path h0 mtr p in
let after:(S.seq (MTH.hash #(U32.v hsz1))) = lift_path h1 mtr p in
hsz = hsz0 /\ hsz = hsz1 /\
S.equal after
(MTH.mt_make_path_step
(U32.v lv) (RV.as_seq h0 hs) (RV.as_seq h0 rhs)
(U32.v i) (U32.v j) (U32.v k) before actd)))))
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1 --initial_ifuel 2 --max_ifuel 2"
let mt_make_path_step #hsz lv mtr hs rhs i j k p actd =
let pth = !*p in
let hh0 = HST.get () in
let ofs = offset_of i in
if k % 2ul = 1ul
then begin
hash_vv_rv_inv_includes hh0 hs lv (k - 1ul - ofs);
assert (HH.includes mtr
(B.frameOf (V.get hh0 (V.get hh0 hs lv) (k - 1ul - ofs))));
assert(Path?.hash_size pth = hsz);
mt_path_insert #hsz mtr p (V.index (V.index hs lv) (k - 1ul - ofs))
end
else begin
if k = j then ()
else if k + 1ul = j
then (if actd
then (assert (HH.includes mtr (B.frameOf (V.get hh0 rhs lv)));
mt_path_insert mtr p (V.index rhs lv)))
else (hash_vv_rv_inv_includes hh0 hs lv (k + 1ul - ofs);
assert (HH.includes mtr
(B.frameOf (V.get hh0 (V.get hh0 hs lv) (k + 1ul - ofs))));
mt_path_insert mtr p (V.index (V.index hs lv) (k + 1ul - ofs)))
end
#pop-options
private inline_for_extraction
val mt_get_path_step_pre_nst:
#hsz:Ghost.erased hash_size_t ->
mtr:HH.rid ->
p:path ->
i:uint32_t ->
Tot bool
let mt_get_path_step_pre_nst #hsz mtr p i =
i < V.size_of (Path?.hashes p)
val mt_get_path_step_pre:
#hsz:Ghost.erased hash_size_t ->
mtr:HH.rid ->
p:const_path_p ->
i:uint32_t ->
HST.ST bool
(requires (fun h0 ->
path_safe h0 mtr (CB.cast p) /\
(let pv = B.get h0 (CB.cast p) 0 in
Path?.hash_size pv = Ghost.reveal hsz /\
live h0 (Path?.hashes pv) /\
mt_get_path_step_pre_nst #hsz mtr pv i)))
(ensures (fun _ _ _ -> True))
let mt_get_path_step_pre #hsz mtr p i =
let p = CB.cast p in
mt_get_path_step_pre_nst #hsz mtr !*p i
val mt_get_path_step:
#hsz:Ghost.erased hash_size_t ->
mtr:HH.rid ->
p:const_path_p ->
i:uint32_t ->
HST.ST (hash #hsz)
(requires (fun h0 ->
path_safe h0 mtr (CB.cast p) /\
(let pv = B.get h0 (CB.cast p) 0 in
Path?.hash_size pv = Ghost.reveal hsz /\
live h0 (Path?.hashes pv) /\
i < V.size_of (Path?.hashes pv))))
(ensures (fun h0 r h1 -> True ))
let mt_get_path_step #hsz mtr p i =
let pd = !*(CB.cast p) in
V.index #(hash #(Path?.hash_size pd)) (Path?.hashes pd) i
private
val mt_get_path_:
#hsz:hash_size_t ->
lv:uint32_t{lv <= merkle_tree_size_lg} ->
mtr:HH.rid ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
rhs:hash_vec #hsz {V.size_of rhs = merkle_tree_size_lg} ->
i:index_t -> j:index_t{i <= j /\ U32.v j < pow2 (32 - U32.v lv)} ->
k:index_t{i <= k && k <= j} ->
p:path_p ->
actd:bool ->
HST.ST unit
(requires (fun h0 ->
HH.includes mtr (V.frameOf hs) /\
HH.includes mtr (V.frameOf rhs) /\
RV.rv_inv h0 hs /\ RV.rv_inv h0 rhs /\
mt_safe_elts h0 lv hs i j /\
path_safe h0 mtr p /\
Path?.hash_size (B.get h0 p 0) = hsz /\
V.size_of (phashes h0 p) <= lv + 1ul))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (path_loc p) h0 h1 /\
path_safe h1 mtr p /\
V.size_of (phashes h1 p) ==
V.size_of (phashes h0 p) + mt_path_length lv k j actd /\
// correctness
(mt_safe_elts_spec h0 lv hs i j;
(let hsz0 = Path?.hash_size (B.get h0 p 0) in
let hsz1 = Path?.hash_size (B.get h1 p 0) in
let before:(S.seq (MTH.hash #(U32.v hsz0))) = lift_path h0 mtr p in
let after:(S.seq (MTH.hash #(U32.v hsz1))) = lift_path h1 mtr p in
hsz = hsz0 /\ hsz = hsz1 /\
S.equal after
(MTH.mt_get_path_ (U32.v lv) (RV.as_seq h0 hs) (RV.as_seq h0 rhs)
(U32.v i) (U32.v j) (U32.v k) before actd)))))
(decreases (32 - U32.v lv))
#push-options "--z3rlimit 300 --initial_fuel 1 --max_fuel 1 --max_ifuel 2 --initial_ifuel 2"
let rec mt_get_path_ #hsz lv mtr hs rhs i j k p actd =
let hh0 = HST.get () in
mt_safe_elts_spec hh0 lv hs i j;
let ofs = offset_of i in
if j = 0ul then ()
else
(mt_make_path_step lv mtr hs rhs i j k p actd;
let hh1 = HST.get () in
mt_safe_elts_spec hh0 lv hs i j;
assert (S.equal (lift_path hh1 mtr p)
(MTH.mt_make_path_step
(U32.v lv) (RV.as_seq hh0 hs) (RV.as_seq hh0 rhs)
(U32.v i) (U32.v j) (U32.v k)
(lift_path hh0 mtr p) actd));
RV.rv_inv_preserved hs (path_loc p) hh0 hh1;
RV.rv_inv_preserved rhs (path_loc p) hh0 hh1;
RV.as_seq_preserved hs (path_loc p) hh0 hh1;
RV.as_seq_preserved rhs (path_loc p) hh0 hh1;
V.loc_vector_within_included hs lv (V.size_of hs);
mt_safe_elts_preserved lv hs i j (path_loc p) hh0 hh1;
assert (mt_safe_elts hh1 lv hs i j);
mt_safe_elts_rec hh1 lv hs i j;
mt_safe_elts_spec hh1 (lv + 1ul) hs (i / 2ul) (j / 2ul);
mt_get_path_ (lv + 1ul) mtr hs rhs (i / 2ul) (j / 2ul) (k / 2ul) p
(if j % 2ul = 0ul then actd else true);
let hh2 = HST.get () in
assert (S.equal (lift_path hh2 mtr p)
(MTH.mt_get_path_ (U32.v lv + 1)
(RV.as_seq hh1 hs) (RV.as_seq hh1 rhs)
(U32.v i / 2) (U32.v j / 2) (U32.v k / 2)
(lift_path hh1 mtr p)
(if U32.v j % 2 = 0 then actd else true)));
assert (S.equal (lift_path hh2 mtr p)
(MTH.mt_get_path_ (U32.v lv)
(RV.as_seq hh0 hs) (RV.as_seq hh0 rhs)
(U32.v i) (U32.v j) (U32.v k)
(lift_path hh0 mtr p) actd)))
#pop-options
private inline_for_extraction
val mt_get_path_pre_nst:
mtv:merkle_tree ->
idx:offset_t ->
p:path ->
root:(hash #(MT?.hash_size mtv)) ->
Tot bool
let mt_get_path_pre_nst mtv idx p root =
offsets_connect (MT?.offset mtv) idx &&
Path?.hash_size p = MT?.hash_size mtv &&
([@inline_let] let idx = split_offset (MT?.offset mtv) idx in
MT?.i mtv <= idx && idx < MT?.j mtv &&
V.size_of (Path?.hashes p) = 0ul)
val mt_get_path_pre:
#hsz:Ghost.erased hash_size_t ->
mt:const_mt_p ->
idx:offset_t ->
p:const_path_p ->
root:hash #hsz ->
HST.ST bool
(requires (fun h0 ->
let mt = CB.cast mt in
let p = CB.cast p in
let dmt = B.get h0 mt 0 in
let dp = B.get h0 p 0 in
MT?.hash_size dmt = (Ghost.reveal hsz) /\
Path?.hash_size dp = (Ghost.reveal hsz) /\
mt_safe h0 mt /\
path_safe h0 (B.frameOf mt) p /\
Rgl?.r_inv (hreg hsz) h0 root /\
HH.disjoint (B.frameOf root) (B.frameOf mt) /\
HH.disjoint (B.frameOf root) (B.frameOf p)))
(ensures (fun _ _ _ -> True))
let mt_get_path_pre #_ mt idx p root =
let mt = CB.cast mt in
let p = CB.cast p in
let mtv = !*mt in
mt_get_path_pre_nst mtv idx !*p root
val mt_get_path_loc_union_helper:
l1:loc -> l2:loc ->
Lemma (loc_union (loc_union l1 l2) l2 == loc_union l1 l2)
let mt_get_path_loc_union_helper l1 l2 = ()
// Construct a Merkle path for a given index `idx`, hashes `mt.hs`, and rightmost
// hashes `mt.rhs`. Note that this operation copies "pointers" into the Merkle tree
// to the output path.
#push-options "--z3rlimit 60"
val mt_get_path:
#hsz:Ghost.erased hash_size_t ->
mt:const_mt_p ->
idx:offset_t ->
p:path_p ->
root:hash #hsz ->
HST.ST index_t
(requires (fun h0 ->
let mt = CB.cast mt in
let dmt = B.get h0 mt 0 in
MT?.hash_size dmt = Ghost.reveal hsz /\
Path?.hash_size (B.get h0 p 0) = Ghost.reveal hsz /\
mt_get_path_pre_nst (B.get h0 mt 0) idx (B.get h0 p 0) root /\
mt_safe h0 mt /\
path_safe h0 (B.frameOf mt) p /\
Rgl?.r_inv (hreg hsz) h0 root /\
HH.disjoint (B.frameOf root) (B.frameOf mt) /\
HH.disjoint (B.frameOf root) (B.frameOf p)))
(ensures (fun h0 _ h1 ->
let mt = CB.cast mt in
let mtv0 = B.get h0 mt 0 in
let mtv1 = B.get h1 mt 0 in
let idx = split_offset (MT?.offset mtv0) idx in
MT?.hash_size mtv0 = Ghost.reveal hsz /\
MT?.hash_size mtv1 = Ghost.reveal hsz /\
Path?.hash_size (B.get h0 p 0) = Ghost.reveal hsz /\
Path?.hash_size (B.get h1 p 0) = Ghost.reveal hsz /\
// memory safety
modifies (loc_union
(loc_union
(mt_loc mt)
(B.loc_all_regions_from false (B.frameOf root)))
(path_loc p))
h0 h1 /\
mt_safe h1 mt /\
path_safe h1 (B.frameOf mt) p /\
Rgl?.r_inv (hreg hsz) h1 root /\
V.size_of (phashes h1 p) ==
1ul + mt_path_length 0ul idx (MT?.j mtv0) false /\
// correctness
(let sj, sp, srt =
MTH.mt_get_path
(mt_lift h0 mt) (U32.v idx) (Rgl?.r_repr (hreg hsz) h0 root) in
sj == U32.v (MT?.j mtv1) /\
S.equal sp (lift_path #hsz h1 (B.frameOf mt) p) /\
srt == Rgl?.r_repr (hreg hsz) h1 root)))
#pop-options
#push-options "--z3rlimit 300 --initial_fuel 1 --max_fuel 1"
let mt_get_path #hsz mt idx p root =
let ncmt = CB.cast mt in
let mtframe = B.frameOf ncmt in
let hh0 = HST.get () in
mt_get_root mt root;
let mtv = !*ncmt in
let hsz = MT?.hash_size mtv in
let hh1 = HST.get () in
path_safe_init_preserved mtframe p
(B.loc_union (mt_loc ncmt)
(B.loc_all_regions_from false (B.frameOf root)))
hh0 hh1;
assert (MTH.mt_get_root (mt_lift hh0 ncmt) (Rgl?.r_repr (hreg hsz) hh0 root) ==
(mt_lift hh1 ncmt, Rgl?.r_repr (hreg hsz) hh1 root));
assert (S.equal (lift_path #hsz hh1 mtframe p) S.empty);
let idx = split_offset (MT?.offset mtv) idx in
let i = MT?.i mtv in
let ofs = offset_of (MT?.i mtv) in
let j = MT?.j mtv in
let hs = MT?.hs mtv in
let rhs = MT?.rhs mtv in
assert (mt_safe_elts hh1 0ul hs i j);
assert (V.size_of (V.get hh1 hs 0ul) == j - ofs);
assert (idx < j);
hash_vv_rv_inv_includes hh1 hs 0ul (idx - ofs);
hash_vv_rv_inv_r_inv hh1 hs 0ul (idx - ofs);
hash_vv_as_seq_get_index hh1 hs 0ul (idx - ofs);
let ih = V.index (V.index hs 0ul) (idx - ofs) in
mt_path_insert #hsz mtframe p ih;
let hh2 = HST.get () in
assert (S.equal (lift_path hh2 mtframe p)
(MTH.path_insert
(lift_path hh1 mtframe p)
(S.index (S.index (RV.as_seq hh1 hs) 0) (U32.v idx - U32.v ofs))));
Rgl?.r_sep (hreg hsz) root (path_loc p) hh1 hh2;
mt_safe_preserved ncmt (path_loc p) hh1 hh2;
mt_preserved ncmt (path_loc p) hh1 hh2;
assert (V.size_of (phashes hh2 p) == 1ul);
mt_get_path_ 0ul mtframe hs rhs i j idx p false;
let hh3 = HST.get () in
// memory safety
mt_get_path_loc_union_helper
(loc_union (mt_loc ncmt)
(B.loc_all_regions_from false (B.frameOf root)))
(path_loc p);
Rgl?.r_sep (hreg hsz) root (path_loc p) hh2 hh3;
mt_safe_preserved ncmt (path_loc p) hh2 hh3;
mt_preserved ncmt (path_loc p) hh2 hh3;
assert (V.size_of (phashes hh3 p) ==
1ul + mt_path_length 0ul idx (MT?.j (B.get hh0 ncmt 0)) false);
assert (S.length (lift_path #hsz hh3 mtframe p) ==
S.length (lift_path #hsz hh2 mtframe p) +
MTH.mt_path_length (U32.v idx) (U32.v (MT?.j (B.get hh0 ncmt 0))) false);
assert (modifies (loc_union
(loc_union
(mt_loc ncmt)
(B.loc_all_regions_from false (B.frameOf root)))
(path_loc p))
hh0 hh3);
assert (mt_safe hh3 ncmt);
assert (path_safe hh3 mtframe p);
assert (Rgl?.r_inv (hreg hsz) hh3 root);
assert (V.size_of (phashes hh3 p) ==
1ul + mt_path_length 0ul idx (MT?.j (B.get hh0 ncmt 0)) false);
// correctness
mt_safe_elts_spec hh2 0ul hs i j;
assert (S.equal (lift_path hh3 mtframe p)
(MTH.mt_get_path_ 0 (RV.as_seq hh2 hs) (RV.as_seq hh2 rhs)
(U32.v i) (U32.v j) (U32.v idx)
(lift_path hh2 mtframe p) false));
assert (MTH.mt_get_path
(mt_lift hh0 ncmt) (U32.v idx) (Rgl?.r_repr (hreg hsz) hh0 root) ==
(U32.v (MT?.j (B.get hh3 ncmt 0)),
lift_path hh3 mtframe p,
Rgl?.r_repr (hreg hsz) hh3 root));
j
#pop-options
/// Flushing
private val
mt_flush_to_modifies_rec_helper:
#hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
h:HS.mem ->
Lemma (loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(loc_union
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))) ==
loc_union
(RV.rv_loc_elems h hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
#push-options "--initial_fuel 2 --max_fuel 2"
let mt_flush_to_modifies_rec_helper #hsz lv hs h =
assert (V.loc_vector_within hs lv (V.size_of hs) ==
loc_union (V.loc_vector_within hs lv (lv + 1ul))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)));
RV.rs_loc_elems_rec_inverse (hvreg hsz) (V.as_seq h hs) (U32.v lv) (U32.v (V.size_of hs));
assert (RV.rv_loc_elems h hs lv (V.size_of hs) ==
loc_union (RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs)));
loc_union_assoc_4
(RV.rs_loc_elem (hvreg hsz) (V.as_seq h hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems h hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))
#pop-options
private
val mt_flush_to_:
hsz:hash_size_t ->
lv:uint32_t{lv < merkle_tree_size_lg} ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
pi:index_t ->
i:index_t{i >= pi} ->
j:Ghost.erased index_t{
Ghost.reveal j >= i &&
U32.v (Ghost.reveal j) < pow2 (32 - U32.v lv)} ->
HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\
mt_safe_elts h0 lv hs pi (Ghost.reveal j)))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (loc_union
(RV.rv_loc_elems h0 hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
h0 h1 /\
RV.rv_inv h1 hs /\
mt_safe_elts h1 lv hs i (Ghost.reveal j) /\
// correctness
(mt_safe_elts_spec h0 lv hs pi (Ghost.reveal j);
S.equal (RV.as_seq h1 hs)
(MTH.mt_flush_to_
(U32.v lv) (RV.as_seq h0 hs) (U32.v pi)
(U32.v i) (U32.v (Ghost.reveal j))))))
(decreases (U32.v i))
#restart-solver
#push-options "--z3rlimit 1500 --fuel 1 --ifuel 0"
let rec mt_flush_to_ hsz lv hs pi i j =
let hh0 = HST.get () in
// Base conditions
mt_safe_elts_rec hh0 lv hs pi (Ghost.reveal j);
V.loc_vector_within_included hs 0ul lv;
V.loc_vector_within_included hs lv (lv + 1ul);
V.loc_vector_within_included hs (lv + 1ul) (V.size_of hs);
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
let oi = offset_of i in
let opi = offset_of pi in
if oi = opi then mt_safe_elts_spec hh0 lv hs pi (Ghost.reveal j)
else begin
/// 1) Flush hashes at the level `lv`, where the new vector is
/// not yet connected to `hs`.
let ofs = oi - opi in
let hvec = V.index hs lv in
let flushed:(rvector (hreg hsz)) = rv_flush_inplace hvec ofs in
let hh1 = HST.get () in
// 1-0) Basic disjointness conditions for `RV.assign`
V.forall2_forall_left hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.forall2_forall_right hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.forall_preserved
hs 0ul lv
(fun b -> HH.disjoint (Rgl?.region_of (hvreg hsz) hvec)
(Rgl?.region_of (hvreg hsz) b))
(RV.loc_rvector hvec)
hh0 hh1;
V.forall_preserved
hs (lv + 1ul) (V.size_of hs)
(fun b -> HH.disjoint (Rgl?.region_of (hvreg hsz) hvec)
(Rgl?.region_of (hvreg hsz) b))
(RV.loc_rvector hvec)
hh0 hh1;
assert (Rgl?.region_of (hvreg hsz) hvec == Rgl?.region_of (hvreg hsz) flushed);
// 1-1) For the `modifies` postcondition.
assert (modifies (RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv)) hh0 hh1);
// 1-2) Preservation
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
// 1-3) For `mt_safe_elts`
assert (V.size_of flushed == Ghost.reveal j - offset_of i); // head updated
mt_safe_elts_preserved
(lv + 1ul) hs (pi / 2ul) (Ghost.reveal j / 2ul)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1; // tail not yet
// 1-4) For the `rv_inv` postcondition
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs)) 0 (U32.v lv) (U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v lv);
RV.rv_elems_inv_preserved
hs 0ul lv (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs 0ul lv);
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs))
(U32.v lv + 1) (U32.v (V.size_of hs))
(U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
(U32.v lv + 1) (U32.v (V.size_of hs));
RV.rv_elems_inv_preserved
hs (lv + 1ul) (V.size_of hs) (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs (lv + 1ul) (V.size_of hs));
assert (rv_itself_inv hh1 hs);
assert (elems_reg hh1 hs);
// 1-5) Correctness
assert (S.equal (RV.as_seq hh1 flushed)
(S.slice (RV.as_seq hh0 (V.get hh0 hs lv)) (U32.v ofs)
(S.length (RV.as_seq hh0 (V.get hh0 hs lv)))));
/// 2) Assign the flushed vector to `hs` at the level `lv`.
RV.assign hs lv flushed;
let hh2 = HST.get () in
// 2-1) For the `modifies` postcondition.
assert (modifies (V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2);
assert (modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))) hh0 hh2);
// 2-2) Preservation
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-3) For `mt_safe_elts`
assert (V.size_of (V.get hh2 hs lv) ==
Ghost.reveal j - offset_of i);
mt_safe_elts_preserved
(lv + 1ul) hs (pi / 2ul) (Ghost.reveal j / 2ul)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-4) Correctness
RV.as_seq_sub_preserved hs 0ul lv (loc_rvector flushed) hh0 hh1;
RV.as_seq_sub_preserved hs (lv + 1ul) merkle_tree_size_lg (loc_rvector flushed) hh0 hh1;
assert (S.equal (RV.as_seq hh2 hs)
(S.append
(RV.as_seq_sub hh0 hs 0ul lv)
(S.cons (RV.as_seq hh1 flushed)
(RV.as_seq_sub hh0 hs (lv + 1ul) merkle_tree_size_lg))));
as_seq_sub_upd hh0 hs lv (RV.as_seq hh1 flushed);
// if `lv = 31` then `pi <= i <= j < 2` thus `oi = opi`,
// contradicting the branch.
assert (lv + 1ul < merkle_tree_size_lg);
assert (U32.v (Ghost.reveal j / 2ul) < pow2 (32 - U32.v (lv + 1ul)));
assert (RV.rv_inv hh2 hs);
assert (mt_safe_elts hh2 (lv + 1ul) hs (pi / 2ul) (Ghost.reveal j / 2ul));
/// 3) Recursion
mt_flush_to_ hsz (lv + 1ul) hs (pi / 2ul) (i / 2ul)
(Ghost.hide (Ghost.reveal j / 2ul));
let hh3 = HST.get () in
// 3-0) Memory safety brought from the postcondition of the recursion
assert (modifies
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))))
hh0 hh3);
mt_flush_to_modifies_rec_helper lv hs hh0;
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
V.loc_vector_within_included hs lv (lv + 1ul);
RV.rv_loc_elems_included hh2 hs (lv + 1ul) (V.size_of hs);
assert (loc_disjoint
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs)));
V.get_preserved hs lv
(loc_union
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
hh2 hh3;
assert (V.size_of (V.get hh3 hs lv) ==
Ghost.reveal j - offset_of i);
assert (RV.rv_inv hh3 hs);
mt_safe_elts_constr hh3 lv hs i (Ghost.reveal j);
assert (mt_safe_elts hh3 lv hs i (Ghost.reveal j));
// 3-1) Correctness
mt_safe_elts_spec hh2 (lv + 1ul) hs (pi / 2ul) (Ghost.reveal j / 2ul);
assert (S.equal (RV.as_seq hh3 hs)
(MTH.mt_flush_to_ (U32.v lv + 1) (RV.as_seq hh2 hs)
(U32.v pi / 2) (U32.v i / 2) (U32.v (Ghost.reveal j) / 2)));
mt_safe_elts_spec hh0 lv hs pi (Ghost.reveal j);
MTH.mt_flush_to_rec
(U32.v lv) (RV.as_seq hh0 hs)
(U32.v pi) (U32.v i) (U32.v (Ghost.reveal j));
assert (S.equal (RV.as_seq hh3 hs)
(MTH.mt_flush_to_ (U32.v lv) (RV.as_seq hh0 hs)
(U32.v pi) (U32.v i) (U32.v (Ghost.reveal j))))
end
#pop-options
// `mt_flush_to` flushes old hashes in the Merkle tree. It removes hash elements
// from `MT?.i` to **`offset_of (idx - 1)`**, but maintains the tree structure,
// i.e., the tree still holds some old internal hashes (compressed from old
// hashes) which are required to generate Merkle paths for remaining hashes.
//
// Note that `mt_flush_to` (and `mt_flush`) always remain at least one base hash
// elements. If there are `MT?.j` number of elements in the tree, because of the
// precondition `MT?.i <= idx < MT?.j` we still have `idx`-th element after
// flushing.
private inline_for_extraction
val mt_flush_to_pre_nst: mtv:merkle_tree -> idx:offset_t -> Tot bool
let mt_flush_to_pre_nst mtv idx =
offsets_connect (MT?.offset mtv) idx &&
([@inline_let] let idx = split_offset (MT?.offset mtv) idx in
idx >= MT?.i mtv &&
idx < MT?.j mtv)
val mt_flush_to_pre: mt:const_mt_p -> idx:offset_t -> HST.ST bool
(requires (fun h0 -> mt_safe h0 (CB.cast mt)))
(ensures (fun _ _ _ -> True))
let mt_flush_to_pre mt idx =
let mt = CB.cast mt in
let h0 = HST.get() in
let mtv = !*mt in
mt_flush_to_pre_nst mtv idx
#push-options "--z3rlimit 100 --initial_fuel 1 --max_fuel 1"
val mt_flush_to:
mt:mt_p ->
idx:offset_t ->
HST.ST unit
(requires (fun h0 -> mt_safe h0 mt /\ mt_flush_to_pre_nst (B.get h0 mt 0) idx))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (mt_loc mt) h0 h1 /\
mt_safe h1 mt /\
// correctness
(let mtv0 = B.get h0 mt 0 in
let mtv1 = B.get h1 mt 0 in
let off = MT?.offset mtv0 in
let idx = split_offset off idx in
MT?.hash_size mtv0 = MT?.hash_size mtv1 /\
MTH.mt_flush_to (mt_lift h0 mt) (U32.v idx) == mt_lift h1 mt)))
let mt_flush_to mt idx =
let hh0 = HST.get () in
let mtv = !*mt in
let offset = MT?.offset mtv in
let j = MT?.j mtv in
let hsz = MT?.hash_size mtv in
let idx = split_offset offset idx in
let hs = MT?.hs mtv in
mt_flush_to_ hsz 0ul hs (MT?.i mtv) idx (Ghost.hide (MT?.j mtv));
let hh1 = HST.get () in
RV.rv_loc_elems_included hh0 hs 0ul (V.size_of hs);
V.loc_vector_within_included hs 0ul (V.size_of hs);
RV.rv_inv_preserved
(MT?.rhs mtv)
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
hh0 hh1;
RV.as_seq_preserved
(MT?.rhs mtv)
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
hh0 hh1;
Rgl?.r_sep (hreg (MT?.hash_size mtv)) (MT?.mroot mtv)
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
hh0 hh1;
mt *= MT (MT?.hash_size mtv)
(MT?.offset mtv) idx (MT?.j mtv)
hs
(MT?.rhs_ok mtv) (MT?.rhs mtv)
(MT?.mroot mtv)
(MT?.hash_spec mtv) (MT?.hash_fun mtv);
let hh2 = HST.get () in
RV.rv_inv_preserved (MT?.hs mtv) (B.loc_buffer mt) hh1 hh2;
RV.rv_inv_preserved (MT?.rhs mtv) (B.loc_buffer mt) hh1 hh2;
RV.as_seq_preserved (MT?.hs mtv) (B.loc_buffer mt) hh1 hh2;
RV.as_seq_preserved (MT?.rhs mtv) (B.loc_buffer mt) hh1 hh2;
Rgl?.r_sep (hreg (MT?.hash_size mtv)) (MT?.mroot mtv) (B.loc_buffer mt) hh1 hh2;
mt_safe_elts_preserved 0ul hs idx (MT?.j mtv) (B.loc_buffer mt) hh1 hh2
#pop-options
private inline_for_extraction
val mt_flush_pre_nst: mt:merkle_tree -> Tot bool
let mt_flush_pre_nst mt = MT?.j mt > MT?.i mt
val mt_flush_pre: mt:const_mt_p -> HST.ST bool (requires (fun h0 -> mt_safe h0 (CB.cast mt))) (ensures (fun _ _ _ -> True))
let mt_flush_pre mt = mt_flush_pre_nst !*(CB.cast mt)
val mt_flush:
mt:mt_p ->
HST.ST unit
(requires (fun h0 -> mt_safe h0 mt /\ mt_flush_pre_nst (B.get h0 mt 0)))
(ensures (fun h0 _ h1 ->
let mtv0 = B.get h0 mt 0 in
let mtv1 = B.get h1 mt 0 in
// memory safety
modifies (mt_loc mt) h0 h1 /\
mt_safe h1 mt /\
// correctness
MT?.hash_size mtv0 = MT?.hash_size mtv1 /\
MTH.mt_flush (mt_lift h0 mt) == mt_lift h1 mt))
#push-options "--z3rlimit 200 --initial_fuel 1 --max_fuel 1"
let mt_flush mt =
let mtv = !*mt in
let off = MT?.offset mtv in
let j = MT?.j mtv in
let j1 = j - 1ul in
assert (j1 < uint32_32_max);
assert (off < uint64_max);
assert (UInt.fits (U64.v off + U32.v j1) 64);
let jo = join_offset off j1 in
mt_flush_to mt jo
#pop-options
/// Retraction
private
val mt_retract_to_:
#hsz:hash_size_t ->
hs:hash_vv hsz {V.size_of hs = merkle_tree_size_lg} ->
lv:uint32_t{lv < V.size_of hs} ->
i:index_t ->
s:index_t ->
j:index_t{i <= s && s <= j && v j < pow2 (U32.v (V.size_of hs) - v lv)}
-> HST.ST unit
(requires (fun h0 ->
RV.rv_inv h0 hs /\
mt_safe_elts h0 lv hs i j))
(ensures (fun h0 _ h1 ->
// memory safety
(modifies (loc_union
(RV.rv_loc_elems h0 hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
h0 h1) /\
RV.rv_inv h1 hs /\
mt_safe_elts h1 lv hs i s /\
// correctness
(mt_safe_elts_spec h0 lv hs i j;
S.equal (RV.as_seq h1 hs)
(MTH.mt_retract_to_
(RV.as_seq h0 hs) (U32.v lv)
(U32.v i) (U32.v s) (U32.v j)))
))
(decreases (U32.v merkle_tree_size_lg - U32.v lv))
#push-options "--z3rlimit 300 --initial_fuel 1 --max_fuel 1"
private
let rec mt_retract_to_ #hsz hs lv i s j =
let hh0 = HST.get () in
// Base conditions
mt_safe_elts_rec hh0 lv hs i j;
V.loc_vector_within_included hs 0ul lv;
V.loc_vector_within_included hs lv (lv + 1ul);
V.loc_vector_within_included hs (lv + 1ul) (V.size_of hs);
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
if lv >= V.size_of hs then ()
else begin
// 1) Retract hashes at level `lv`.
let hvec = V.index hs lv in
let old_len = j - offset_of i in
let new_len = s - offset_of i in
let retracted = RV.shrink hvec new_len in
let hh1 = HST.get () in
// 1-0) Basic disjointness conditions for `RV.assign`
V.forall2_forall_left hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.forall2_forall_right hh0 hs 0ul (V.size_of hs) lv
(fun b1 b2 -> HH.disjoint (Rgl?.region_of (hvreg hsz) b1)
(Rgl?.region_of (hvreg hsz) b2));
V.forall_preserved
hs 0ul lv
(fun b -> HH.disjoint (Rgl?.region_of (hvreg hsz) hvec)
(Rgl?.region_of (hvreg hsz) b))
(RV.loc_rvector hvec)
hh0 hh1;
V.forall_preserved
hs (lv + 1ul) (V.size_of hs)
(fun b -> HH.disjoint (Rgl?.region_of (hvreg hsz) hvec)
(Rgl?.region_of (hvreg hsz) b))
(RV.loc_rvector hvec)
hh0 hh1;
assert (Rgl?.region_of (hvreg hsz) hvec == Rgl?.region_of (hvreg hsz) retracted);
// 1-1) For the `modifies` postcondition.
assert (modifies (RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv)) hh0 hh1);
// 1-2) Preservation
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
// 1-3) For `mt_safe_elts`
assert (V.size_of retracted == new_len);
mt_safe_elts_preserved
(lv + 1ul) hs (i / 2ul) (j / 2ul)
(RV.loc_rvector (V.get hh0 hs lv)) hh0 hh1;
// 1-4) For the `rv_inv` postcondition
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs)) 0 (U32.v lv) (U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v lv);
RV.rv_elems_inv_preserved
hs 0ul lv (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs 0ul lv);
RV.rs_loc_elems_elem_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
0 (U32.v (V.size_of hs))
(U32.v lv + 1) (U32.v (V.size_of hs))
(U32.v lv);
RV.rs_loc_elems_parent_disj
(hvreg hsz) (V.as_seq hh0 hs) (V.frameOf hs)
(U32.v lv + 1) (U32.v (V.size_of hs));
RV.rv_elems_inv_preserved
hs (lv + 1ul) (V.size_of hs) (RV.loc_rvector (V.get hh0 hs lv))
hh0 hh1;
assert (RV.rv_elems_inv hh1 hs (lv + 1ul) (V.size_of hs));
assert (rv_itself_inv hh1 hs);
assert (elems_reg hh1 hs);
// 1-5) Correctness
assert (S.equal (RV.as_seq hh1 retracted)
(S.slice (RV.as_seq hh0 (V.get hh0 hs lv)) 0 (U32.v new_len)));
RV.assign hs lv retracted;
let hh2 = HST.get() in
// 2-1) For the `modifies` postcondition.
assert (modifies (V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2);
assert (modifies (loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul))) hh0 hh2);
// 2-2) Preservation
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
RV.rv_loc_elems_preserved
hs (lv + 1ul) (V.size_of hs)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-3) For `mt_safe_elts`
assert (V.size_of (V.get hh2 hs lv) == s - offset_of i);
mt_safe_elts_preserved
(lv + 1ul) hs (i / 2ul) (j / 2ul)
(V.loc_vector_within hs lv (lv + 1ul)) hh1 hh2;
// 2-4) Correctness
RV.as_seq_sub_preserved hs 0ul lv (loc_rvector retracted) hh0 hh1;
RV.as_seq_sub_preserved hs (lv + 1ul) merkle_tree_size_lg (loc_rvector retracted) hh0 hh1;
assert (S.equal (RV.as_seq hh2 hs)
(S.append
(RV.as_seq_sub hh0 hs 0ul lv)
(S.cons (RV.as_seq hh1 retracted)
(RV.as_seq_sub hh0 hs (lv + 1ul) merkle_tree_size_lg))));
as_seq_sub_upd hh0 hs lv (RV.as_seq hh1 retracted);
if lv + 1ul < V.size_of hs then
begin
assert (mt_safe_elts hh2 (lv + 1ul) hs (i / 2ul) (j / 2ul));
mt_safe_elts_spec hh2 (lv + 1ul) hs (i / 2ul) (j / 2ul);
mt_retract_to_ hs (lv + 1ul) (i / 2ul) (s / 2ul) (j / 2ul);
// 3-0) Memory safety brought from the postcondition of the recursion
let hh3 = HST.get () in
assert (modifies
(loc_union
(loc_union
(RV.rs_loc_elem (hvreg hsz) (V.as_seq hh0 hs) (U32.v lv))
(V.loc_vector_within hs lv (lv + 1ul)))
(loc_union
(RV.rv_loc_elems hh0 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs))))
hh0 hh3);
mt_flush_to_modifies_rec_helper lv hs hh0;
V.loc_vector_within_disjoint hs lv (lv + 1ul) (lv + 1ul) (V.size_of hs);
V.loc_vector_within_included hs lv (lv + 1ul);
RV.rv_loc_elems_included hh2 hs (lv + 1ul) (V.size_of hs);
assert (loc_disjoint
(V.loc_vector_within hs lv (lv + 1ul))
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs)));
V.get_preserved hs lv
(loc_union
(RV.rv_loc_elems hh2 hs (lv + 1ul) (V.size_of hs))
(V.loc_vector_within hs (lv + 1ul) (V.size_of hs)))
hh2 hh3;
assert (V.size_of (V.get hh3 hs lv) == s - offset_of i);
assert (RV.rv_inv hh3 hs);
mt_safe_elts_constr hh3 lv hs i s;
assert (mt_safe_elts hh3 lv hs i s);
// 3-1) Correctness
mt_safe_elts_spec hh2 (lv + 1ul) hs (i / 2ul) (j / 2ul);
assert (U32.v lv + 1 < S.length (RV.as_seq hh3 hs) ==>
S.equal (RV.as_seq hh3 hs)
(MTH.mt_retract_to_ (RV.as_seq hh2 hs) (U32.v lv + 1)
(U32.v i / 2) (U32.v s / 2) (U32.v j / 2)));
assert (RV.rv_inv hh0 hs);
assert (mt_safe_elts hh0 lv hs i j);
mt_safe_elts_spec hh0 lv hs i j;
assert (S.equal (RV.as_seq hh3 hs)
(MTH.mt_retract_to_ (RV.as_seq hh0 hs) (U32.v lv)
(U32.v i) (U32.v s) (U32.v j)))
end
else begin
let hh3 = HST.get() in
assert ((modifies (loc_union
(RV.rv_loc_elems hh0 hs lv (V.size_of hs))
(V.loc_vector_within hs lv (V.size_of hs)))
hh0 hh3));
assert (RV.rv_inv hh3 hs /\ mt_safe_elts hh3 lv hs i s);
mt_safe_elts_spec hh0 lv hs i j;
assert (S.equal (RV.as_seq hh3 hs)
(MTH.mt_retract_to_
(RV.as_seq hh0 hs) (U32.v lv)
(U32.v i) (U32.v s) (U32.v j)))
end
end
#pop-options
private inline_for_extraction
val mt_retract_to_pre_nst: mtv:merkle_tree -> r:offset_t -> Tot bool
let mt_retract_to_pre_nst mtv r =
offsets_connect (MT?.offset mtv) r &&
([@inline_let] let r = split_offset (MT?.offset mtv) r in
MT?.i mtv <= r && r < MT?.j mtv)
val mt_retract_to_pre: mt:const_mt_p -> r:offset_t -> HST.ST bool
(requires (fun h0 -> mt_safe h0 (CB.cast mt)))
(ensures (fun _ _ _ -> True))
let mt_retract_to_pre mt r =
let mt = CB.cast mt in
let h0 = HST.get() in
let mtv = !*mt in
mt_retract_to_pre_nst mtv r
#push-options "--z3rlimit 100"
val mt_retract_to:
mt:mt_p ->
r:offset_t ->
HST.ST unit
(requires (fun h0 -> mt_safe h0 mt /\ mt_retract_to_pre_nst (B.get h0 mt 0) r))
(ensures (fun h0 _ h1 ->
// memory safety
modifies (mt_loc mt) h0 h1 /\
mt_safe h1 mt /\
// correctness
(let mtv0 = B.get h0 mt 0 in
let mtv1 = B.get h1 mt 0 in
let off = MT?.offset mtv0 in
let r = split_offset off r in
MT?.hash_size mtv0 = MT?.hash_size mtv1 /\
MTH.mt_retract_to (mt_lift h0 mt) (U32.v r) == mt_lift h1 mt)))
let mt_retract_to mt r =
let hh0 = HST.get () in
let mtv = !*mt in
let offset = MT?.offset mtv in
let r = split_offset offset r in
let hs = MT?.hs mtv in
mt_retract_to_ hs 0ul (MT?.i mtv) (r + 1ul) (MT?.j mtv);
let hh1 = HST.get () in
RV.rv_loc_elems_included hh0 hs 0ul (V.size_of hs);
V.loc_vector_within_included hs 0ul (V.size_of hs);
RV.rv_inv_preserved
(MT?.rhs mtv)
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
hh0 hh1;
RV.as_seq_preserved
(MT?.rhs mtv)
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
hh0 hh1;
Rgl?.r_sep (hreg (MT?.hash_size mtv)) (MT?.mroot mtv)
(loc_union
(RV.rv_loc_elems hh0 hs 0ul (V.size_of hs))
(V.loc_vector_within hs 0ul (V.size_of hs)))
hh0 hh1;
mt *= MT (MT?.hash_size mtv) (MT?.offset mtv) (MT?.i mtv) (r+1ul) hs false (MT?.rhs mtv) (MT?.mroot mtv) (MT?.hash_spec mtv) (MT?.hash_fun mtv);
let hh2 = HST.get () in
RV.rv_inv_preserved (MT?.hs mtv) (B.loc_buffer mt) hh1 hh2;
RV.rv_inv_preserved (MT?.rhs mtv) (B.loc_buffer mt) hh1 hh2;
RV.as_seq_preserved (MT?.hs mtv) (B.loc_buffer mt) hh1 hh2;
RV.as_seq_preserved (MT?.rhs mtv) (B.loc_buffer mt) hh1 hh2;
Rgl?.r_sep (hreg (MT?.hash_size mtv)) (MT?.mroot mtv) (B.loc_buffer mt) hh1 hh2;
mt_safe_elts_preserved 0ul hs (MT?.i mtv) (r+1ul) (B.loc_buffer mt) hh1 hh2
#pop-options
/// Client-side verification
private
val mt_verify_:
#hsz:hash_size_t ->
#hash_spec:MTS.hash_fun_t #(U32.v hsz) ->
k:index_t ->
j:index_t{k <= j} ->
mtr:HH.rid ->
p:const_path_p ->
ppos:uint32_t ->
acc:hash #hsz ->
actd:bool ->
hash_fun:hash_fun_t #hsz #hash_spec ->
HST.ST unit
(requires (fun h0 ->
let p = CB.cast p in
path_safe h0 mtr p /\ Rgl?.r_inv (hreg hsz) h0 acc /\
Path?.hash_size (B.get h0 p 0) = hsz /\
HH.disjoint (B.frameOf p) (B.frameOf acc) /\
HH.disjoint mtr (B.frameOf acc) /\
// Below is a very relaxed condition,
// but sufficient to ensure (+) for uint32_t is sound.
ppos <= 64ul - mt_path_length 0ul k j actd /\
ppos + mt_path_length 0ul k j actd <= V.size_of (phashes h0 p)))
(ensures (fun h0 _ h1 ->
let p = CB.cast p in
// memory safety
modifies (B.loc_all_regions_from false (B.frameOf acc)) h0 h1 /\
Rgl?.r_inv (hreg hsz) h1 acc /\
// correctness
Rgl?.r_repr (hreg hsz) h1 acc ==
MTH.mt_verify_ #(U32.v hsz) #hash_spec (U32.v k) (U32.v j) (lift_path h0 mtr p)
(U32.v ppos) (Rgl?.r_repr (hreg hsz) h0 acc) actd))
#push-options "--z3rlimit 200 --initial_fuel 1 --max_fuel 1"
let rec mt_verify_ #hsz #hash_spec k j mtr p ppos acc actd hash_fun =
let ncp:path_p = CB.cast p in
let hh0 = HST.get () in
if j = 0ul then ()
else (let nactd = actd || (j % 2ul = 1ul) in
if k % 2ul = 0ul then begin
if j = k || (j = k + 1ul && not actd) then
mt_verify_ (k / 2ul) (j / 2ul) mtr p ppos acc nactd hash_fun
else begin
let ncpd = !*ncp in
let phash = V.index (Path?.hashes ncpd) ppos in
hash_fun acc phash acc;
let hh1 = HST.get () in
path_preserved mtr ncp
(B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
lift_path_index hh0 mtr ncp ppos;
assert (Rgl?.r_repr (hreg hsz) hh1 acc ==
hash_spec (Rgl?.r_repr (hreg hsz) hh0 acc)
(S.index (lift_path #hsz hh0 mtr ncp) (U32.v ppos)));
mt_verify_ (k / 2ul) (j / 2ul) mtr p (ppos + 1ul) acc nactd hash_fun
end
end
else begin
let ncpd = !*ncp in
let phash = V.index (Path?.hashes ncpd) ppos in
hash_fun phash acc acc;
let hh1 = HST.get () in
path_preserved mtr ncp
(B.loc_all_regions_from false (B.frameOf acc)) hh0 hh1;
lift_path_index hh0 mtr ncp ppos;
assert (Rgl?.r_repr (hreg hsz) hh1 acc ==
hash_spec (S.index (lift_path #hsz hh0 mtr ncp) (U32.v ppos))
(Rgl?.r_repr (hreg hsz) hh0 acc));
mt_verify_ (k / 2ul) (j / 2ul) mtr p (ppos + 1ul) acc nactd hash_fun
end)
#pop-options
private inline_for_extraction
val mt_verify_pre_nst: mt:merkle_tree -> k:offset_t -> j:offset_t -> p:path -> rt:(hash #(MT?.hash_size mt)) -> Tot bool | false | false | MerkleTree.Low.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 0,
"initial_ifuel": 0,
"max_fuel": 0,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 10,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mt_verify_pre_nst: mt:merkle_tree -> k:offset_t -> j:offset_t -> p:path -> rt:(hash #(MT?.hash_size mt)) -> Tot bool | [] | MerkleTree.Low.mt_verify_pre_nst | {
"file_name": "src/MerkleTree.Low.fst",
"git_rev": "3b0f086655c145aa23f58a97259ebf4cf112a4a3",
"git_url": "https://github.com/hacl-star/merkle-tree.git",
"project_name": "merkle-tree"
} |
mt: MerkleTree.Low.merkle_tree ->
k: MerkleTree.Low.offset_t ->
j: MerkleTree.Low.offset_t ->
p: MerkleTree.Low.path ->
rt: MerkleTree.Low.Datastructures.hash
-> Prims.bool | {
"end_col": 67,
"end_line": 2896,
"start_col": 2,
"start_line": 2889
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.