effect
stringclasses 48
values | original_source_type
stringlengths 0
23k
| opens_and_abbrevs
listlengths 2
92
| isa_cross_project_example
bool 1
class | source_definition
stringlengths 9
57.9k
| partial_definition
stringlengths 7
23.3k
| is_div
bool 2
classes | is_type
null | is_proof
bool 2
classes | completed_definiton
stringlengths 1
250k
| dependencies
dict | effect_flags
sequencelengths 0
2
| ideal_premises
sequencelengths 0
236
| mutual_with
sequencelengths 0
11
| file_context
stringlengths 0
407k
| interleaved
bool 1
class | is_simply_typed
bool 2
classes | file_name
stringlengths 5
48
| vconfig
dict | is_simple_lemma
null | source_type
stringlengths 10
23k
| proof_features
sequencelengths 0
1
| name
stringlengths 8
95
| source
dict | verbose_type
stringlengths 1
7.42k
| source_range
dict |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
FStar.Pervasives.Lemma | val fadd_mul_r5_eval_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fadd_mul_r5 acc f1 r r5) ==
map2 (Vec.pfmul) (map2 (Vec.pfadd) (feval5 acc) (feval5 f1)) (feval5 r))
[SMTPat (fadd_mul_r5 acc f1 r r5)] | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let fadd_mul_r5_eval_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_eval_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_eval_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp | val fadd_mul_r5_eval_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fadd_mul_r5 acc f1 r r5) ==
map2 (Vec.pfmul) (map2 (Vec.pfadd) (feval5 acc) (feval5 f1)) (feval5 r))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_eval_lemma #w acc f1 r r5 = | false | null | true | let acc1 = fadd5 acc f1 in
fadd5_eval_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_eval_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [
"lemma"
] | [
"Hacl.Spec.Poly1305.Field32xN.lanes",
"Hacl.Spec.Poly1305.Field32xN.felem5",
"Hacl.Spec.Poly1305.Field32xN.felem_fits5",
"FStar.Pervasives.Native.Mktuple5",
"Prims.nat",
"Prims.l_and",
"Prims.eq2",
"Hacl.Spec.Poly1305.Field32xN.precomp_r5",
"Hacl.Poly1305.Field32xN.Lemmas1.carry_wide_felem5_eval_lemma",
"Hacl.Spec.Poly1305.Field32xN.carry_wide_felem5",
"Prims.unit",
"Hacl.Spec.Poly1305.Field32xN.Lemmas.mul_felem5_eval_lemma",
"Hacl.Spec.Poly1305.Field32xN.felem_wide5",
"Hacl.Spec.Poly1305.Field32xN.mul_felem5",
"Hacl.Spec.Poly1305.Field32xN.Lemmas.fadd5_eval_lemma",
"Hacl.Spec.Poly1305.Field32xN.fadd5"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w)
val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)]
let fadd5_fits_lemma #w f1 f2 =
let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24
val fadd5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (feval5 (fadd5 f1 f2) == map2 Vec.pfadd (feval5 f1) (feval5 f2))
[SMTPat (fadd5 f1 f2)]
let fadd5_eval_lemma #w f1 f2 =
let o = fadd5 f1 f2 in
FStar.Classical.forall_intro (fadd5_eval_lemma_i f1 f2);
eq_intro (feval5 o) (map2 Vec.pfadd (feval5 f1) (feval5 f2))
val mul_felem5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_wide_fits5 (mul_felem5 #w f1 r r5) (126, 102, 78, 54, 30))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_fits_lemma #w f1 r r5 =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34)
val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i])
let mul_felem5_eval_lemma_i #w f1 r r5 i =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_eval_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
assert ((fas_nat5 (a0,a1,a2,a3,a4)).[i] == (uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i]);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_eval_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
assert ((fas_nat5 (a10,a11,a12,a13,a14)).[i] == (fas_nat5 (a0,a1,a2,a3,a4)).[i] + (uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i]);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_eval_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
assert ((fas_nat5 (a20,a21,a22,a23,a24)).[i] == (fas_nat5 (a10,a11,a12,a13,a14)).[i] + (uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i]);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
assert ((fas_nat5 (a30,a31,a32,a33,a34)).[i] == (fas_nat5 (a20,a21,a22,a23,a24)).[i] + (uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i]);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] == (fas_nat5 (a30,a31,a32,a33,a34)).[i] + (uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] ==
(uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i] +
(uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i] +
(uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i] +
(uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i] +
(uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
mul_felem5_eval_as_tup64 #w f1 r r5 i;
mul_felem5_lemma (as_tup64_i f1 i) (as_tup64_i r i)
val mul_felem5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (mul_felem5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_eval_lemma #w f1 r r5 =
let tmp = map2 (Vec.pfmul) (feval5 f1) (feval5 r) in
FStar.Classical.forall_intro (mul_felem5_eval_lemma_i #w f1 r r5);
eq_intro (feval5 (mul_felem5 #w f1 r r5)) tmp
val fmul_r5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_fits5 (fmul_r5 #w f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_fits_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_fits_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fmul_r5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fmul_r5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_eval_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_eval_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_fits_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_fits_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_fits_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fadd_mul_r5_eval_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fadd_mul_r5 acc f1 r r5) ==
map2 (Vec.pfmul) (map2 (Vec.pfadd) (feval5 acc) (feval5 f1)) (feval5 r))
[SMTPat (fadd_mul_r5 acc f1 r r5)] | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val fadd_mul_r5_eval_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fadd_mul_r5 acc f1 r r5) ==
map2 (Vec.pfmul) (map2 (Vec.pfadd) (feval5 acc) (feval5 f1)) (feval5 r))
[SMTPat (fadd_mul_r5 acc f1 r r5)] | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.fadd_mul_r5_eval_lemma | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
acc:
Hacl.Spec.Poly1305.Field32xN.felem5 w
{Hacl.Spec.Poly1305.Field32xN.felem_fits5 acc (2, 2, 2, 2, 2)} ->
f1:
Hacl.Spec.Poly1305.Field32xN.felem5 w
{Hacl.Spec.Poly1305.Field32xN.felem_fits5 f1 (1, 1, 1, 1, 1)} ->
r:
Hacl.Spec.Poly1305.Field32xN.felem5 w
{Hacl.Spec.Poly1305.Field32xN.felem_fits5 r (1, 1, 1, 1, 1)} ->
r5:
Hacl.Spec.Poly1305.Field32xN.felem5 w
{ Hacl.Spec.Poly1305.Field32xN.felem_fits5 r5 (5, 5, 5, 5, 5) /\
r5 == Hacl.Spec.Poly1305.Field32xN.precomp_r5 r }
-> FStar.Pervasives.Lemma
(ensures
Hacl.Spec.Poly1305.Field32xN.feval5 (Hacl.Spec.Poly1305.Field32xN.fadd_mul_r5 acc f1 r r5) ==
Lib.Sequence.map2 Hacl.Spec.Poly1305.Vec.pfmul
(Lib.Sequence.map2 Hacl.Spec.Poly1305.Vec.pfadd
(Hacl.Spec.Poly1305.Field32xN.feval5 acc)
(Hacl.Spec.Poly1305.Field32xN.feval5 f1))
(Hacl.Spec.Poly1305.Field32xN.feval5 r))
[SMTPat (Hacl.Spec.Poly1305.Field32xN.fadd_mul_r5 acc f1 r r5)] | {
"end_col": 37,
"end_line": 250,
"start_col": 43,
"start_line": 244
} |
FStar.Pervasives.Lemma | val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)] | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let fadd_mul_r5_fits_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_fits_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_fits_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp | val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_fits_lemma #w acc f1 r r5 = | false | null | true | let acc1 = fadd5 acc f1 in
fadd5_fits_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_fits_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [
"lemma"
] | [
"Hacl.Spec.Poly1305.Field32xN.lanes",
"Hacl.Spec.Poly1305.Field32xN.felem5",
"Hacl.Spec.Poly1305.Field32xN.felem_fits5",
"FStar.Pervasives.Native.Mktuple5",
"Prims.nat",
"Hacl.Poly1305.Field32xN.Lemmas1.carry_wide_felem5_fits_lemma",
"Hacl.Spec.Poly1305.Field32xN.carry_wide_felem5",
"Prims.unit",
"Hacl.Spec.Poly1305.Field32xN.Lemmas.mul_felem5_fits_lemma",
"Hacl.Spec.Poly1305.Field32xN.felem_wide5",
"Hacl.Spec.Poly1305.Field32xN.mul_felem5",
"Hacl.Spec.Poly1305.Field32xN.Lemmas.fadd5_fits_lemma",
"Hacl.Spec.Poly1305.Field32xN.fadd5"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w)
val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)]
let fadd5_fits_lemma #w f1 f2 =
let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24
val fadd5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (feval5 (fadd5 f1 f2) == map2 Vec.pfadd (feval5 f1) (feval5 f2))
[SMTPat (fadd5 f1 f2)]
let fadd5_eval_lemma #w f1 f2 =
let o = fadd5 f1 f2 in
FStar.Classical.forall_intro (fadd5_eval_lemma_i f1 f2);
eq_intro (feval5 o) (map2 Vec.pfadd (feval5 f1) (feval5 f2))
val mul_felem5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_wide_fits5 (mul_felem5 #w f1 r r5) (126, 102, 78, 54, 30))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_fits_lemma #w f1 r r5 =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34)
val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i])
let mul_felem5_eval_lemma_i #w f1 r r5 i =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_eval_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
assert ((fas_nat5 (a0,a1,a2,a3,a4)).[i] == (uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i]);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_eval_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
assert ((fas_nat5 (a10,a11,a12,a13,a14)).[i] == (fas_nat5 (a0,a1,a2,a3,a4)).[i] + (uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i]);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_eval_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
assert ((fas_nat5 (a20,a21,a22,a23,a24)).[i] == (fas_nat5 (a10,a11,a12,a13,a14)).[i] + (uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i]);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
assert ((fas_nat5 (a30,a31,a32,a33,a34)).[i] == (fas_nat5 (a20,a21,a22,a23,a24)).[i] + (uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i]);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] == (fas_nat5 (a30,a31,a32,a33,a34)).[i] + (uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] ==
(uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i] +
(uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i] +
(uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i] +
(uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i] +
(uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
mul_felem5_eval_as_tup64 #w f1 r r5 i;
mul_felem5_lemma (as_tup64_i f1 i) (as_tup64_i r i)
val mul_felem5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (mul_felem5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_eval_lemma #w f1 r r5 =
let tmp = map2 (Vec.pfmul) (feval5 f1) (feval5 r) in
FStar.Classical.forall_intro (mul_felem5_eval_lemma_i #w f1 r r5);
eq_intro (feval5 (mul_felem5 #w f1 r r5)) tmp
val fmul_r5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_fits5 (fmul_r5 #w f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_fits_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_fits_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fmul_r5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fmul_r5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_eval_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_eval_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)] | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)] | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.fadd_mul_r5_fits_lemma | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
acc:
Hacl.Spec.Poly1305.Field32xN.felem5 w
{Hacl.Spec.Poly1305.Field32xN.felem_fits5 acc (2, 2, 2, 2, 2)} ->
f1:
Hacl.Spec.Poly1305.Field32xN.felem5 w
{Hacl.Spec.Poly1305.Field32xN.felem_fits5 f1 (1, 1, 1, 1, 1)} ->
r:
Hacl.Spec.Poly1305.Field32xN.felem5 w
{Hacl.Spec.Poly1305.Field32xN.felem_fits5 r (1, 1, 1, 1, 1)} ->
r5:
Hacl.Spec.Poly1305.Field32xN.felem5 w
{Hacl.Spec.Poly1305.Field32xN.felem_fits5 r5 (5, 5, 5, 5, 5)}
-> FStar.Pervasives.Lemma
(ensures
Hacl.Spec.Poly1305.Field32xN.felem_fits5 (Hacl.Spec.Poly1305.Field32xN.fadd_mul_r5 acc
f1
r
r5)
(1, 2, 1, 1, 2)) [SMTPat (Hacl.Spec.Poly1305.Field32xN.fadd_mul_r5 acc f1 r r5)] | {
"end_col": 37,
"end_line": 231,
"start_col": 43,
"start_line": 225
} |
Prims.Pure | val fmul_r4_normalize51: a:felem5 4 ->
Pure (felem5 4)
(requires felem_fits5 a (1, 2, 1, 1, 2))
(ensures fun res ->
felem_fits5 res (4, 8, 4, 4, 8) /\
(feval5 res).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 a).[0] (feval5 a).[1]) (feval5 a).[2]) (feval5 a).[3]) | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let fmul_r4_normalize51 fa =
let (o0, o1, o2, o3, o4) = fa in
let v00 = vec_interleave_high_n 2 o0 o0 in
let v10 = vec_add_mod o0 v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
lemma_fmul_r4_normalize51 #1 o0;
let v01 = vec_interleave_high_n 2 o1 o1 in
let v11 = vec_add_mod o1 v01 in
let v11h = vec_interleave_high v11 v11 in
let v21 = vec_add_mod v11 v11h in
lemma_fmul_r4_normalize51 #2 o1;
let v02 = vec_interleave_high_n 2 o2 o2 in
let v12 = vec_add_mod o2 v02 in
let v12h = vec_interleave_high v12 v12 in
let v22 = vec_add_mod v12 v12h in
lemma_fmul_r4_normalize51 #1 o2;
let v03 = vec_interleave_high_n 2 o3 o3 in
let v13 = vec_add_mod o3 v03 in
let v13h = vec_interleave_high v13 v13 in
let v23 = vec_add_mod v13 v13h in
lemma_fmul_r4_normalize51 #1 o3;
let v04 = vec_interleave_high_n 2 o4 o4 in
let v14 = vec_add_mod o4 v04 in
let v14h = vec_interleave_high v14 v14 in
let v24 = vec_add_mod v14 v14h in
lemma_fmul_r4_normalize51 #2 o4;
let res = (v20, v21, v22, v23, v24) in
lemma_fmul_r4_normalize51_expand res fa;
res | val fmul_r4_normalize51: a:felem5 4 ->
Pure (felem5 4)
(requires felem_fits5 a (1, 2, 1, 1, 2))
(ensures fun res ->
felem_fits5 res (4, 8, 4, 4, 8) /\
(feval5 res).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 a).[0] (feval5 a).[1]) (feval5 a).[2]) (feval5 a).[3])
let fmul_r4_normalize51 fa = | false | null | false | let o0, o1, o2, o3, o4 = fa in
let v00 = vec_interleave_high_n 2 o0 o0 in
let v10 = vec_add_mod o0 v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
lemma_fmul_r4_normalize51 #1 o0;
let v01 = vec_interleave_high_n 2 o1 o1 in
let v11 = vec_add_mod o1 v01 in
let v11h = vec_interleave_high v11 v11 in
let v21 = vec_add_mod v11 v11h in
lemma_fmul_r4_normalize51 #2 o1;
let v02 = vec_interleave_high_n 2 o2 o2 in
let v12 = vec_add_mod o2 v02 in
let v12h = vec_interleave_high v12 v12 in
let v22 = vec_add_mod v12 v12h in
lemma_fmul_r4_normalize51 #1 o2;
let v03 = vec_interleave_high_n 2 o3 o3 in
let v13 = vec_add_mod o3 v03 in
let v13h = vec_interleave_high v13 v13 in
let v23 = vec_add_mod v13 v13h in
lemma_fmul_r4_normalize51 #1 o3;
let v04 = vec_interleave_high_n 2 o4 o4 in
let v14 = vec_add_mod o4 v04 in
let v14h = vec_interleave_high v14 v14 in
let v24 = vec_add_mod v14 v14h in
lemma_fmul_r4_normalize51 #2 o4;
let res = (v20, v21, v22, v23, v24) in
lemma_fmul_r4_normalize51_expand res fa;
res | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [] | [
"Hacl.Spec.Poly1305.Field32xN.felem5",
"Hacl.Spec.Poly1305.Field32xN.uint64xN",
"Prims.unit",
"Hacl.Spec.Poly1305.Field32xN.Lemmas.lemma_fmul_r4_normalize51_expand",
"FStar.Pervasives.Native.tuple5",
"FStar.Pervasives.Native.Mktuple5",
"Hacl.Spec.Poly1305.Field32xN.Lemmas.lemma_fmul_r4_normalize51",
"Lib.IntVector.vec_t",
"Lib.IntTypes.U64",
"Lib.IntVector.vec_add_mod",
"Lib.IntVector.vec_interleave_high",
"Lib.IntVector.vec_interleave_high_n"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w)
val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)]
let fadd5_fits_lemma #w f1 f2 =
let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24
val fadd5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (feval5 (fadd5 f1 f2) == map2 Vec.pfadd (feval5 f1) (feval5 f2))
[SMTPat (fadd5 f1 f2)]
let fadd5_eval_lemma #w f1 f2 =
let o = fadd5 f1 f2 in
FStar.Classical.forall_intro (fadd5_eval_lemma_i f1 f2);
eq_intro (feval5 o) (map2 Vec.pfadd (feval5 f1) (feval5 f2))
val mul_felem5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_wide_fits5 (mul_felem5 #w f1 r r5) (126, 102, 78, 54, 30))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_fits_lemma #w f1 r r5 =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34)
val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i])
let mul_felem5_eval_lemma_i #w f1 r r5 i =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_eval_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
assert ((fas_nat5 (a0,a1,a2,a3,a4)).[i] == (uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i]);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_eval_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
assert ((fas_nat5 (a10,a11,a12,a13,a14)).[i] == (fas_nat5 (a0,a1,a2,a3,a4)).[i] + (uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i]);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_eval_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
assert ((fas_nat5 (a20,a21,a22,a23,a24)).[i] == (fas_nat5 (a10,a11,a12,a13,a14)).[i] + (uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i]);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
assert ((fas_nat5 (a30,a31,a32,a33,a34)).[i] == (fas_nat5 (a20,a21,a22,a23,a24)).[i] + (uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i]);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] == (fas_nat5 (a30,a31,a32,a33,a34)).[i] + (uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] ==
(uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i] +
(uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i] +
(uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i] +
(uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i] +
(uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
mul_felem5_eval_as_tup64 #w f1 r r5 i;
mul_felem5_lemma (as_tup64_i f1 i) (as_tup64_i r i)
val mul_felem5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (mul_felem5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_eval_lemma #w f1 r r5 =
let tmp = map2 (Vec.pfmul) (feval5 f1) (feval5 r) in
FStar.Classical.forall_intro (mul_felem5_eval_lemma_i #w f1 r r5);
eq_intro (feval5 (mul_felem5 #w f1 r r5)) tmp
val fmul_r5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_fits5 (fmul_r5 #w f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_fits_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_fits_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fmul_r5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fmul_r5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_eval_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_eval_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_fits_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_fits_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_fits_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fadd_mul_r5_eval_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fadd_mul_r5 acc f1 r r5) ==
map2 (Vec.pfmul) (map2 (Vec.pfadd) (feval5 acc) (feval5 f1)) (feval5 r))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_eval_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_eval_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_eval_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val reduce_felem5_eval_lemma:
#w:lanes
-> f:felem5 w{felem_fits5 f (2, 2, 2, 2, 2)} ->
Lemma
(felem_fits5 (reduce_felem5 f) (1, 1, 1, 1, 1) /\
(feval5 f).[0] == (fas_nat5 (reduce_felem5 f)).[0])
[SMTPat (reduce_felem5 f)]
let reduce_felem5_eval_lemma #w f =
carry_full_felem5_eval_lemma f;
carry_full_felem5_fits_lemma f;
let f = carry_full_felem5 f in
carry_reduce_felem5_lemma #w f;
subtract_p5_felem5_lemma #w (carry_full_felem5 f)
val fmul_r2_normalize50:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures fun a ->
let fr21 = create2 (feval5 r2).[0] (feval5 r).[0] in
feval5 a == Vec.fmul (feval5 acc) fr21 /\
felem_fits5 a (1, 2, 1, 1, 2))
let fmul_r2_normalize50 (a0, a1, a2, a3, a4) (r0, r1, r2, r3, r4) (r20, r21, r22, r23, r24) =
let r210 = vec_interleave_low r20 r0 in
vec_interleave_low_lemma2 r20 r0;
let r211 = vec_interleave_low r21 r1 in
vec_interleave_low_lemma2 r21 r1;
let r212 = vec_interleave_low r22 r2 in
vec_interleave_low_lemma2 r22 r2;
let r213 = vec_interleave_low r23 r3 in
vec_interleave_low_lemma2 r23 r3;
let r214 = vec_interleave_low r24 r4 in
vec_interleave_low_lemma2 r24 r4;
let acc = (a0, a1, a2, a3, a4) in
let fr = (r0, r1, r2, r3, r4) in
let fr2 = (r20, r21, r22, r23, r24) in
assert ((feval5 fr2).[0] == Vec.pfmul ((feval5 fr).[0]) ((feval5 fr).[0]));
let fr21 = (r210, r211, r212, r213, r214) in
eq_intro (feval5 fr21) (create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (feval5 fr21 == create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (felem_fits5 fr21 (2, 2, 2, 2, 2));
let fr215 = precomp_r5 #2 fr21 in
let a = fmul_r5 #2 acc fr21 fr215 in
fmul_r5_eval_lemma acc fr21 fr215;
fmul_r5_fits_lemma acc fr21 fr215;
assert (feval5 a == Vec.fmul (feval5 acc) (feval5 fr21));
assert (felem_fits5 a (1, 2, 1, 1, 2));
a
#push-options "--z3rlimit 150"
val fmul_r2_normalize51:
a:felem5 2
-> fa1:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 a (1, 2, 1, 1, 2) /\
felem_fits5 fa1 (1, 2, 1, 1, 2) /\
feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1])
(ensures fun out ->
(feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1] /\
felem_fits5 out (2, 4, 2, 2, 4))
let fmul_r2_normalize51 a fa1 =
let (a0, a1, a2, a3, a4) = a in
let (a10, a11, a12, a13, a14) = fa1 in
let o0 = vec_add_mod a0 a10 in
let o1 = vec_add_mod a1 a11 in
let o2 = vec_add_mod a2 a12 in
let o3 = vec_add_mod a3 a13 in
let o4 = vec_add_mod a4 a14 in
let out = (o0, o1, o2, o3, o4) in
let (a0, a1, a2, a3, a4) = as_tup64_i a 0 in
let (a10, a11, a12, a13, a14) = as_tup64_i fa1 0 in
let (o0, o1, o2, o3, o4) = as_tup64_i out 0 in
FStar.Math.Lemmas.modulo_lemma (v a0 + v a10) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a1 + v a11) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a2 + v a12) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a3 + v a13) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a4 + v a14) (pow2 64);
assert (felem_fits5 out (2, 4, 2, 2, 4));
calc (==) {
((feval5 a).[0] + (feval5 a).[1]) % Vec.prime;
(==) { }
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime + as_nat5 (a10, a11, a12, a13, a14) % Vec.prime) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (a0, a1, a2, a3, a4)) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime;
FStar.Math.Lemmas.lemma_mod_plus_distr_r
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime }
(as_nat5 (a0, a1, a2, a3, a4) + as_nat5 (a10, a11, a12, a13, a14)) % Vec.prime;
(==) { }
(feval5 out).[0];
};
out
#pop-options
val fmul_r2_normalize5_lemma:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures
(let out = fmul_r2_normalize5 acc r r2 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_2 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r2_normalize5 acc r r2)]
let fmul_r2_normalize5_lemma acc r r2 =
let a = fmul_r2_normalize50 acc r r2 in
let (a0, a1, a2, a3, a4) = a in
let a10 = vec_interleave_high a0 a0 in
vec_interleave_high_lemma2 a0 a0;
let a11 = vec_interleave_high a1 a1 in
vec_interleave_high_lemma2 a1 a1;
let a12 = vec_interleave_high a2 a2 in
vec_interleave_high_lemma2 a2 a2;
let a13 = vec_interleave_high a3 a3 in
vec_interleave_high_lemma2 a3 a3;
let a14 = vec_interleave_high a4 a4 in
vec_interleave_high_lemma2 a4 a4;
let fa1 = (a10, a11, a12, a13, a14) in
eq_intro (feval5 fa1) (create2 (feval5 a).[1] (feval5 a).[1]);
assert (feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1]);
assert (felem_fits5 fa1 (1, 2, 1, 1, 2));
let out = fmul_r2_normalize51 a fa1 in
assert ((feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1]);
let res = carry_full_felem5 out in
carry_full_felem5_lemma out
val fmul_r4_normalize50:
acc:felem5 4
-> r:felem5 4
-> r2:felem5 4
-> r3:felem5 4
-> r4:felem5 4 ->
Pure (felem5 4)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
felem_fits5 r3 (2, 2, 2, 2, 2) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.fmul (feval5 r) (feval5 r) /\
feval5 r3 == Vec.fmul (feval5 r2) (feval5 r) /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures fun out ->
let fr4321 = create4 (feval5 r4).[0] (feval5 r3).[0] (feval5 r2).[0] (feval5 r).[0] in
feval5 out == Vec.fmul (feval5 acc) fr4321 /\
felem_fits5 out (1, 2, 1, 1, 2))
let fmul_r4_normalize50 acc fr fr2 fr3 fr4 =
let (r10, r11, r12, r13, r14) = fr in
let (r20, r21, r22, r23, r24) = fr2 in
let (r30, r31, r32, r33, r34) = fr3 in
let (r40, r41, r42, r43, r44) = fr4 in
let (a0, a1, a2, a3, a4) = acc in
let v12120 = vec_interleave_low r20 r10 in
vec_interleave_low_lemma_uint64_4 r20 r10;
let v34340 = vec_interleave_low r40 r30 in
vec_interleave_low_lemma_uint64_4 r40 r30;
let r12340 = vec_interleave_low_n 2 v34340 v12120 in
vec_interleave_low_n_lemma_uint64_4_2 v34340 v12120;
let v12121 = vec_interleave_low r21 r11 in
vec_interleave_low_lemma_uint64_4 r21 r11;
let v34341 = vec_interleave_low r41 r31 in
vec_interleave_low_lemma_uint64_4 r41 r31;
let r12341 = vec_interleave_low_n 2 v34341 v12121 in
vec_interleave_low_n_lemma_uint64_4_2 v34341 v12121;
let v12122 = vec_interleave_low r22 r12 in
vec_interleave_low_lemma_uint64_4 r22 r12;
let v34342 = vec_interleave_low r42 r32 in
vec_interleave_low_lemma_uint64_4 r42 r32;
let r12342 = vec_interleave_low_n 2 v34342 v12122 in
vec_interleave_low_n_lemma_uint64_4_2 v34342 v12122;
let v12123 = vec_interleave_low r23 r13 in
vec_interleave_low_lemma_uint64_4 r23 r13;
let v34343 = vec_interleave_low r43 r33 in
vec_interleave_low_lemma_uint64_4 r43 r33;
let r12343 = vec_interleave_low_n 2 v34343 v12123 in
vec_interleave_low_n_lemma_uint64_4_2 v34343 v12123;
let v12124 = vec_interleave_low r24 r14 in
vec_interleave_low_lemma_uint64_4 r24 r14;
let v34344 = vec_interleave_low r44 r34 in
vec_interleave_low_lemma_uint64_4 r44 r34;
let r12344 = vec_interleave_low_n 2 v34344 v12124 in
vec_interleave_low_n_lemma_uint64_4_2 v34344 v12124;
let fr1234 = (r12340, r12341, r12342, r12343, r12344) in
eq_intro (feval5 fr1234) (create4 (feval5 fr4).[0] (feval5 fr3).[0] (feval5 fr2).[0] (feval5 fr).[0]);
let fr12345 = precomp_r5 #4 fr1234 in
let out = fmul_r5 #4 acc fr1234 fr12345 in
fmul_r5_eval_lemma acc fr1234 fr12345;
fmul_r5_fits_lemma acc fr1234 fr12345;
out
val lemma_fmul_r4_normalize51:
#m:scale32{m <= 2}
-> o:uint64xN 4{felem_fits1 o m} ->
Lemma
(let v00 = vec_interleave_high_n 2 o o in
let v10 = vec_add_mod o v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
felem_fits1 v20 (4 * m) /\
(uint64xN_v v20).[0] == (uint64xN_v o).[0] + (uint64xN_v o).[1] + (uint64xN_v o).[2] + (uint64xN_v o).[3])
let lemma_fmul_r4_normalize51 #m o =
let v00 = vec_interleave_high_n 2 o o in
vec_interleave_high_n_lemma_uint64_4_2 o o;
let (o0, o1, o2, o3) = ((vec_v o).[0], (vec_v o).[1], (vec_v o).[2], (vec_v o).[3]) in
assert (vec_v v00 == create4 o2 o3 o2 o3);
let v10 = vec_add_mod o v00 in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v o1 + v o3) (pow2 64);
assert (v (vec_v v10).[0] == v o0 + v o2);
assert (v (vec_v v10).[1] == v o1 + v o3);
let v10h = vec_interleave_high v10 v10 in
vec_interleave_high_lemma_uint64_4 v10 v10;
assert (v (vec_v v10h).[0] == v (vec_v v10).[1]);
let v20 = vec_add_mod v10 v10h in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2 + v o1 + v o3) (pow2 64)
val lemma_fmul_r4_normalize51_expand:
v2:felem5 4
-> out:felem5 4 ->
Lemma
(requires
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(uint64xN_v v20).[0] == (uint64xN_v o0).[0] + (uint64xN_v o0).[1] + (uint64xN_v o0).[2] + (uint64xN_v o0).[3] /\
(uint64xN_v v21).[0] == (uint64xN_v o1).[0] + (uint64xN_v o1).[1] + (uint64xN_v o1).[2] + (uint64xN_v o1).[3] /\
(uint64xN_v v22).[0] == (uint64xN_v o2).[0] + (uint64xN_v o2).[1] + (uint64xN_v o2).[2] + (uint64xN_v o2).[3] /\
(uint64xN_v v23).[0] == (uint64xN_v o3).[0] + (uint64xN_v o3).[1] + (uint64xN_v o3).[2] + (uint64xN_v o3).[3] /\
(uint64xN_v v24).[0] == (uint64xN_v o4).[0] + (uint64xN_v o4).[1] + (uint64xN_v o4).[2] + (uint64xN_v o4).[3]))
(ensures
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(feval5 v2).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 out).[0] (feval5 out).[1]) (feval5 out).[2]) (feval5 out).[3]))
let lemma_fmul_r4_normalize51_expand v2 out =
let (v20, v21, v22, v23, v24) = as_tup64_i v2 0 in
let (o0, o1, o2, o3, o4) = out in
calc (==) {
as_nat5 (v20, v21, v22, v23, v24) % Vec.prime;
(==) { }
(as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1) + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1))
(as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) Vec.prime }
((as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1)) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.modulo_distributivity (as_nat5 (as_tup64_i out 0)) (as_nat5 (as_tup64_i out 1)) Vec.prime }
(((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2))
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2)) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime) (as_nat5 (as_tup64_i out 2)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r ((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime)
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime;
};
assert ((feval5 v2).[0] ==
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime)
val fmul_r4_normalize51: a:felem5 4 ->
Pure (felem5 4)
(requires felem_fits5 a (1, 2, 1, 1, 2))
(ensures fun res ->
felem_fits5 res (4, 8, 4, 4, 8) /\
(feval5 res).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 a).[0] (feval5 a).[1]) (feval5 a).[2]) (feval5 a).[3]) | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val fmul_r4_normalize51: a:felem5 4 ->
Pure (felem5 4)
(requires felem_fits5 a (1, 2, 1, 1, 2))
(ensures fun res ->
felem_fits5 res (4, 8, 4, 4, 8) /\
(feval5 res).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 a).[0] (feval5 a).[1]) (feval5 a).[2]) (feval5 a).[3]) | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.fmul_r4_normalize51 | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Hacl.Spec.Poly1305.Field32xN.felem5 4 -> Prims.Pure (Hacl.Spec.Poly1305.Field32xN.felem5 4) | {
"end_col": 5,
"end_line": 591,
"start_col": 28,
"start_line": 558
} |
FStar.Pervasives.Lemma | val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w)) | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w) | val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w = | false | null | true | let r = (zero w, zero w, zero w, zero w, zero w) in
let r0, r1, r2, r3, r4 = precomp_r5 r in
let aux (i: nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[ i ] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w) | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [
"lemma"
] | [
"Hacl.Spec.Poly1305.Field32xN.lanes",
"Hacl.Spec.Poly1305.Field32xN.uint64xN",
"Lib.IntVector.vecv_extensionality",
"Lib.IntTypes.U64",
"Lib.IntVector.vec_smul_mod",
"Hacl.Spec.Poly1305.Field32xN.zero",
"Lib.IntTypes.u64",
"Prims.unit",
"Lib.Sequence.eq_intro",
"Lib.IntTypes.uint_t",
"Lib.IntTypes.SEC",
"Lib.IntVector.vec_v",
"FStar.Classical.forall_intro",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThan",
"Prims.eq2",
"Prims.l_or",
"FStar.Seq.Base.index",
"Lib.Sequence.to_seq",
"Lib.IntTypes.range_t",
"Lib.IntTypes.v",
"Lib.Sequence.op_String_Access",
"Prims.l_True",
"Prims.squash",
"Lib.IntTypes.int_t",
"Lib.IntTypes.mk_int",
"Prims.int",
"Lib.IntTypes.range",
"Lib.Sequence.index",
"Prims.Nil",
"FStar.Pervasives.pattern",
"Hacl.Spec.Poly1305.Field32xN.felem5",
"Hacl.Spec.Poly1305.Field32xN.precomp_r5",
"FStar.Pervasives.Native.tuple5",
"FStar.Pervasives.Native.Mktuple5"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w)) | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w)) | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.precomp_r5_zeros | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | w: Hacl.Spec.Poly1305.Field32xN.lanes
-> FStar.Pervasives.Lemma
(ensures
(let r =
Hacl.Spec.Poly1305.Field32xN.zero w,
Hacl.Spec.Poly1305.Field32xN.zero w,
Hacl.Spec.Poly1305.Field32xN.zero w,
Hacl.Spec.Poly1305.Field32xN.zero w,
Hacl.Spec.Poly1305.Field32xN.zero w
in
Hacl.Spec.Poly1305.Field32xN.precomp_r5 r ==
FStar.Pervasives.Native.Mktuple5 (Hacl.Spec.Poly1305.Field32xN.zero w)
(Hacl.Spec.Poly1305.Field32xN.zero w)
(Hacl.Spec.Poly1305.Field32xN.zero w)
(Hacl.Spec.Poly1305.Field32xN.zero w)
(Hacl.Spec.Poly1305.Field32xN.zero w))) | {
"end_col": 62,
"end_line": 69,
"start_col": 24,
"start_line": 62
} |
Prims.Tot | val load_felem5_4_compact: lo:uint64xN 4 -> hi:uint64xN 4 -> felem5 4 | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let load_felem5_4_compact lo hi =
let mask26 = mask26 4 in
let t3 = vec_or (vec_shift_right lo 48ul) (vec_shift_left hi 16ul) in
let o0 = vec_and lo mask26 in
let o1 = vec_and (vec_shift_right lo 26ul) mask26 in
let o2 = vec_and (vec_shift_right t3 4ul) mask26 in
let o3 = vec_and (vec_shift_right t3 30ul) mask26 in
let o4 = vec_shift_right hi 40ul in
(o0, o1, o2, o3, o4) | val load_felem5_4_compact: lo:uint64xN 4 -> hi:uint64xN 4 -> felem5 4
let load_felem5_4_compact lo hi = | false | null | false | let mask26 = mask26 4 in
let t3 = vec_or (vec_shift_right lo 48ul) (vec_shift_left hi 16ul) in
let o0 = vec_and lo mask26 in
let o1 = vec_and (vec_shift_right lo 26ul) mask26 in
let o2 = vec_and (vec_shift_right t3 4ul) mask26 in
let o3 = vec_and (vec_shift_right t3 30ul) mask26 in
let o4 = vec_shift_right hi 40ul in
(o0, o1, o2, o3, o4) | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [
"total"
] | [
"Hacl.Spec.Poly1305.Field32xN.uint64xN",
"FStar.Pervasives.Native.Mktuple5",
"Lib.IntVector.vec_t",
"Lib.IntTypes.U64",
"Prims.eq2",
"Lib.Sequence.lseq",
"Lib.IntTypes.int_t",
"Lib.IntTypes.SEC",
"Lib.IntVector.vec_v",
"Lib.Sequence.map",
"Lib.IntTypes.shift_right_i",
"FStar.UInt32.uint_to_t",
"FStar.UInt32.t",
"Lib.IntVector.vec_shift_right",
"FStar.UInt32.__uint_to_t",
"Lib.IntVector.vec_and",
"Lib.Sequence.map2",
"Lib.IntTypes.logor",
"Lib.IntVector.vec_shift_left",
"Lib.IntVector.vec_or",
"Lib.Sequence.create",
"Lib.IntTypes.mk_int",
"Hacl.Spec.Poly1305.Field32xN.mask26",
"Hacl.Spec.Poly1305.Field32xN.felem5"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w)
val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)]
let fadd5_fits_lemma #w f1 f2 =
let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24
val fadd5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (feval5 (fadd5 f1 f2) == map2 Vec.pfadd (feval5 f1) (feval5 f2))
[SMTPat (fadd5 f1 f2)]
let fadd5_eval_lemma #w f1 f2 =
let o = fadd5 f1 f2 in
FStar.Classical.forall_intro (fadd5_eval_lemma_i f1 f2);
eq_intro (feval5 o) (map2 Vec.pfadd (feval5 f1) (feval5 f2))
val mul_felem5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_wide_fits5 (mul_felem5 #w f1 r r5) (126, 102, 78, 54, 30))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_fits_lemma #w f1 r r5 =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34)
val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i])
let mul_felem5_eval_lemma_i #w f1 r r5 i =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_eval_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
assert ((fas_nat5 (a0,a1,a2,a3,a4)).[i] == (uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i]);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_eval_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
assert ((fas_nat5 (a10,a11,a12,a13,a14)).[i] == (fas_nat5 (a0,a1,a2,a3,a4)).[i] + (uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i]);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_eval_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
assert ((fas_nat5 (a20,a21,a22,a23,a24)).[i] == (fas_nat5 (a10,a11,a12,a13,a14)).[i] + (uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i]);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
assert ((fas_nat5 (a30,a31,a32,a33,a34)).[i] == (fas_nat5 (a20,a21,a22,a23,a24)).[i] + (uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i]);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] == (fas_nat5 (a30,a31,a32,a33,a34)).[i] + (uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] ==
(uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i] +
(uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i] +
(uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i] +
(uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i] +
(uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
mul_felem5_eval_as_tup64 #w f1 r r5 i;
mul_felem5_lemma (as_tup64_i f1 i) (as_tup64_i r i)
val mul_felem5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (mul_felem5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_eval_lemma #w f1 r r5 =
let tmp = map2 (Vec.pfmul) (feval5 f1) (feval5 r) in
FStar.Classical.forall_intro (mul_felem5_eval_lemma_i #w f1 r r5);
eq_intro (feval5 (mul_felem5 #w f1 r r5)) tmp
val fmul_r5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_fits5 (fmul_r5 #w f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_fits_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_fits_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fmul_r5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fmul_r5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_eval_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_eval_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_fits_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_fits_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_fits_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fadd_mul_r5_eval_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fadd_mul_r5 acc f1 r r5) ==
map2 (Vec.pfmul) (map2 (Vec.pfadd) (feval5 acc) (feval5 f1)) (feval5 r))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_eval_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_eval_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_eval_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val reduce_felem5_eval_lemma:
#w:lanes
-> f:felem5 w{felem_fits5 f (2, 2, 2, 2, 2)} ->
Lemma
(felem_fits5 (reduce_felem5 f) (1, 1, 1, 1, 1) /\
(feval5 f).[0] == (fas_nat5 (reduce_felem5 f)).[0])
[SMTPat (reduce_felem5 f)]
let reduce_felem5_eval_lemma #w f =
carry_full_felem5_eval_lemma f;
carry_full_felem5_fits_lemma f;
let f = carry_full_felem5 f in
carry_reduce_felem5_lemma #w f;
subtract_p5_felem5_lemma #w (carry_full_felem5 f)
val fmul_r2_normalize50:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures fun a ->
let fr21 = create2 (feval5 r2).[0] (feval5 r).[0] in
feval5 a == Vec.fmul (feval5 acc) fr21 /\
felem_fits5 a (1, 2, 1, 1, 2))
let fmul_r2_normalize50 (a0, a1, a2, a3, a4) (r0, r1, r2, r3, r4) (r20, r21, r22, r23, r24) =
let r210 = vec_interleave_low r20 r0 in
vec_interleave_low_lemma2 r20 r0;
let r211 = vec_interleave_low r21 r1 in
vec_interleave_low_lemma2 r21 r1;
let r212 = vec_interleave_low r22 r2 in
vec_interleave_low_lemma2 r22 r2;
let r213 = vec_interleave_low r23 r3 in
vec_interleave_low_lemma2 r23 r3;
let r214 = vec_interleave_low r24 r4 in
vec_interleave_low_lemma2 r24 r4;
let acc = (a0, a1, a2, a3, a4) in
let fr = (r0, r1, r2, r3, r4) in
let fr2 = (r20, r21, r22, r23, r24) in
assert ((feval5 fr2).[0] == Vec.pfmul ((feval5 fr).[0]) ((feval5 fr).[0]));
let fr21 = (r210, r211, r212, r213, r214) in
eq_intro (feval5 fr21) (create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (feval5 fr21 == create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (felem_fits5 fr21 (2, 2, 2, 2, 2));
let fr215 = precomp_r5 #2 fr21 in
let a = fmul_r5 #2 acc fr21 fr215 in
fmul_r5_eval_lemma acc fr21 fr215;
fmul_r5_fits_lemma acc fr21 fr215;
assert (feval5 a == Vec.fmul (feval5 acc) (feval5 fr21));
assert (felem_fits5 a (1, 2, 1, 1, 2));
a
#push-options "--z3rlimit 150"
val fmul_r2_normalize51:
a:felem5 2
-> fa1:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 a (1, 2, 1, 1, 2) /\
felem_fits5 fa1 (1, 2, 1, 1, 2) /\
feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1])
(ensures fun out ->
(feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1] /\
felem_fits5 out (2, 4, 2, 2, 4))
let fmul_r2_normalize51 a fa1 =
let (a0, a1, a2, a3, a4) = a in
let (a10, a11, a12, a13, a14) = fa1 in
let o0 = vec_add_mod a0 a10 in
let o1 = vec_add_mod a1 a11 in
let o2 = vec_add_mod a2 a12 in
let o3 = vec_add_mod a3 a13 in
let o4 = vec_add_mod a4 a14 in
let out = (o0, o1, o2, o3, o4) in
let (a0, a1, a2, a3, a4) = as_tup64_i a 0 in
let (a10, a11, a12, a13, a14) = as_tup64_i fa1 0 in
let (o0, o1, o2, o3, o4) = as_tup64_i out 0 in
FStar.Math.Lemmas.modulo_lemma (v a0 + v a10) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a1 + v a11) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a2 + v a12) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a3 + v a13) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a4 + v a14) (pow2 64);
assert (felem_fits5 out (2, 4, 2, 2, 4));
calc (==) {
((feval5 a).[0] + (feval5 a).[1]) % Vec.prime;
(==) { }
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime + as_nat5 (a10, a11, a12, a13, a14) % Vec.prime) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (a0, a1, a2, a3, a4)) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime;
FStar.Math.Lemmas.lemma_mod_plus_distr_r
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime }
(as_nat5 (a0, a1, a2, a3, a4) + as_nat5 (a10, a11, a12, a13, a14)) % Vec.prime;
(==) { }
(feval5 out).[0];
};
out
#pop-options
val fmul_r2_normalize5_lemma:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures
(let out = fmul_r2_normalize5 acc r r2 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_2 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r2_normalize5 acc r r2)]
let fmul_r2_normalize5_lemma acc r r2 =
let a = fmul_r2_normalize50 acc r r2 in
let (a0, a1, a2, a3, a4) = a in
let a10 = vec_interleave_high a0 a0 in
vec_interleave_high_lemma2 a0 a0;
let a11 = vec_interleave_high a1 a1 in
vec_interleave_high_lemma2 a1 a1;
let a12 = vec_interleave_high a2 a2 in
vec_interleave_high_lemma2 a2 a2;
let a13 = vec_interleave_high a3 a3 in
vec_interleave_high_lemma2 a3 a3;
let a14 = vec_interleave_high a4 a4 in
vec_interleave_high_lemma2 a4 a4;
let fa1 = (a10, a11, a12, a13, a14) in
eq_intro (feval5 fa1) (create2 (feval5 a).[1] (feval5 a).[1]);
assert (feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1]);
assert (felem_fits5 fa1 (1, 2, 1, 1, 2));
let out = fmul_r2_normalize51 a fa1 in
assert ((feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1]);
let res = carry_full_felem5 out in
carry_full_felem5_lemma out
val fmul_r4_normalize50:
acc:felem5 4
-> r:felem5 4
-> r2:felem5 4
-> r3:felem5 4
-> r4:felem5 4 ->
Pure (felem5 4)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
felem_fits5 r3 (2, 2, 2, 2, 2) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.fmul (feval5 r) (feval5 r) /\
feval5 r3 == Vec.fmul (feval5 r2) (feval5 r) /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures fun out ->
let fr4321 = create4 (feval5 r4).[0] (feval5 r3).[0] (feval5 r2).[0] (feval5 r).[0] in
feval5 out == Vec.fmul (feval5 acc) fr4321 /\
felem_fits5 out (1, 2, 1, 1, 2))
let fmul_r4_normalize50 acc fr fr2 fr3 fr4 =
let (r10, r11, r12, r13, r14) = fr in
let (r20, r21, r22, r23, r24) = fr2 in
let (r30, r31, r32, r33, r34) = fr3 in
let (r40, r41, r42, r43, r44) = fr4 in
let (a0, a1, a2, a3, a4) = acc in
let v12120 = vec_interleave_low r20 r10 in
vec_interleave_low_lemma_uint64_4 r20 r10;
let v34340 = vec_interleave_low r40 r30 in
vec_interleave_low_lemma_uint64_4 r40 r30;
let r12340 = vec_interleave_low_n 2 v34340 v12120 in
vec_interleave_low_n_lemma_uint64_4_2 v34340 v12120;
let v12121 = vec_interleave_low r21 r11 in
vec_interleave_low_lemma_uint64_4 r21 r11;
let v34341 = vec_interleave_low r41 r31 in
vec_interleave_low_lemma_uint64_4 r41 r31;
let r12341 = vec_interleave_low_n 2 v34341 v12121 in
vec_interleave_low_n_lemma_uint64_4_2 v34341 v12121;
let v12122 = vec_interleave_low r22 r12 in
vec_interleave_low_lemma_uint64_4 r22 r12;
let v34342 = vec_interleave_low r42 r32 in
vec_interleave_low_lemma_uint64_4 r42 r32;
let r12342 = vec_interleave_low_n 2 v34342 v12122 in
vec_interleave_low_n_lemma_uint64_4_2 v34342 v12122;
let v12123 = vec_interleave_low r23 r13 in
vec_interleave_low_lemma_uint64_4 r23 r13;
let v34343 = vec_interleave_low r43 r33 in
vec_interleave_low_lemma_uint64_4 r43 r33;
let r12343 = vec_interleave_low_n 2 v34343 v12123 in
vec_interleave_low_n_lemma_uint64_4_2 v34343 v12123;
let v12124 = vec_interleave_low r24 r14 in
vec_interleave_low_lemma_uint64_4 r24 r14;
let v34344 = vec_interleave_low r44 r34 in
vec_interleave_low_lemma_uint64_4 r44 r34;
let r12344 = vec_interleave_low_n 2 v34344 v12124 in
vec_interleave_low_n_lemma_uint64_4_2 v34344 v12124;
let fr1234 = (r12340, r12341, r12342, r12343, r12344) in
eq_intro (feval5 fr1234) (create4 (feval5 fr4).[0] (feval5 fr3).[0] (feval5 fr2).[0] (feval5 fr).[0]);
let fr12345 = precomp_r5 #4 fr1234 in
let out = fmul_r5 #4 acc fr1234 fr12345 in
fmul_r5_eval_lemma acc fr1234 fr12345;
fmul_r5_fits_lemma acc fr1234 fr12345;
out
val lemma_fmul_r4_normalize51:
#m:scale32{m <= 2}
-> o:uint64xN 4{felem_fits1 o m} ->
Lemma
(let v00 = vec_interleave_high_n 2 o o in
let v10 = vec_add_mod o v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
felem_fits1 v20 (4 * m) /\
(uint64xN_v v20).[0] == (uint64xN_v o).[0] + (uint64xN_v o).[1] + (uint64xN_v o).[2] + (uint64xN_v o).[3])
let lemma_fmul_r4_normalize51 #m o =
let v00 = vec_interleave_high_n 2 o o in
vec_interleave_high_n_lemma_uint64_4_2 o o;
let (o0, o1, o2, o3) = ((vec_v o).[0], (vec_v o).[1], (vec_v o).[2], (vec_v o).[3]) in
assert (vec_v v00 == create4 o2 o3 o2 o3);
let v10 = vec_add_mod o v00 in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v o1 + v o3) (pow2 64);
assert (v (vec_v v10).[0] == v o0 + v o2);
assert (v (vec_v v10).[1] == v o1 + v o3);
let v10h = vec_interleave_high v10 v10 in
vec_interleave_high_lemma_uint64_4 v10 v10;
assert (v (vec_v v10h).[0] == v (vec_v v10).[1]);
let v20 = vec_add_mod v10 v10h in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2 + v o1 + v o3) (pow2 64)
val lemma_fmul_r4_normalize51_expand:
v2:felem5 4
-> out:felem5 4 ->
Lemma
(requires
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(uint64xN_v v20).[0] == (uint64xN_v o0).[0] + (uint64xN_v o0).[1] + (uint64xN_v o0).[2] + (uint64xN_v o0).[3] /\
(uint64xN_v v21).[0] == (uint64xN_v o1).[0] + (uint64xN_v o1).[1] + (uint64xN_v o1).[2] + (uint64xN_v o1).[3] /\
(uint64xN_v v22).[0] == (uint64xN_v o2).[0] + (uint64xN_v o2).[1] + (uint64xN_v o2).[2] + (uint64xN_v o2).[3] /\
(uint64xN_v v23).[0] == (uint64xN_v o3).[0] + (uint64xN_v o3).[1] + (uint64xN_v o3).[2] + (uint64xN_v o3).[3] /\
(uint64xN_v v24).[0] == (uint64xN_v o4).[0] + (uint64xN_v o4).[1] + (uint64xN_v o4).[2] + (uint64xN_v o4).[3]))
(ensures
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(feval5 v2).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 out).[0] (feval5 out).[1]) (feval5 out).[2]) (feval5 out).[3]))
let lemma_fmul_r4_normalize51_expand v2 out =
let (v20, v21, v22, v23, v24) = as_tup64_i v2 0 in
let (o0, o1, o2, o3, o4) = out in
calc (==) {
as_nat5 (v20, v21, v22, v23, v24) % Vec.prime;
(==) { }
(as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1) + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1))
(as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) Vec.prime }
((as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1)) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.modulo_distributivity (as_nat5 (as_tup64_i out 0)) (as_nat5 (as_tup64_i out 1)) Vec.prime }
(((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2))
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2)) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime) (as_nat5 (as_tup64_i out 2)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r ((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime)
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime;
};
assert ((feval5 v2).[0] ==
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime)
val fmul_r4_normalize51: a:felem5 4 ->
Pure (felem5 4)
(requires felem_fits5 a (1, 2, 1, 1, 2))
(ensures fun res ->
felem_fits5 res (4, 8, 4, 4, 8) /\
(feval5 res).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 a).[0] (feval5 a).[1]) (feval5 a).[2]) (feval5 a).[3])
let fmul_r4_normalize51 fa =
let (o0, o1, o2, o3, o4) = fa in
let v00 = vec_interleave_high_n 2 o0 o0 in
let v10 = vec_add_mod o0 v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
lemma_fmul_r4_normalize51 #1 o0;
let v01 = vec_interleave_high_n 2 o1 o1 in
let v11 = vec_add_mod o1 v01 in
let v11h = vec_interleave_high v11 v11 in
let v21 = vec_add_mod v11 v11h in
lemma_fmul_r4_normalize51 #2 o1;
let v02 = vec_interleave_high_n 2 o2 o2 in
let v12 = vec_add_mod o2 v02 in
let v12h = vec_interleave_high v12 v12 in
let v22 = vec_add_mod v12 v12h in
lemma_fmul_r4_normalize51 #1 o2;
let v03 = vec_interleave_high_n 2 o3 o3 in
let v13 = vec_add_mod o3 v03 in
let v13h = vec_interleave_high v13 v13 in
let v23 = vec_add_mod v13 v13h in
lemma_fmul_r4_normalize51 #1 o3;
let v04 = vec_interleave_high_n 2 o4 o4 in
let v14 = vec_add_mod o4 v04 in
let v14h = vec_interleave_high v14 v14 in
let v24 = vec_add_mod v14 v14h in
lemma_fmul_r4_normalize51 #2 o4;
let res = (v20, v21, v22, v23, v24) in
lemma_fmul_r4_normalize51_expand res fa;
res
val fmul_r4_normalize5_lemma:
acc:felem5 4
-> r:felem5 4
-> r_5:felem5 4
-> r4:felem5 4 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
r_5 == precomp_r5 r /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures
(let out = fmul_r4_normalize5 acc r r_5 r4 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_4 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r4_normalize5 acc r r_5 r4)]
#restart-solver
#push-options "--z3rlimit 500"
let fmul_r4_normalize5_lemma acc fr fr_5 fr4 =
let fr2 = fmul_r5 #4 fr fr fr_5 in
let fr3 = fmul_r5 #4 fr2 fr fr_5 in
let out = fmul_r4_normalize50 acc fr fr2 fr3 fr4 in
let v2 = fmul_r4_normalize51 out in
let res = carry_full_felem5 v2 in
carry_full_felem5_lemma v2
#pop-options
val load_felem5_lemma:
#w:lanes
-> lo:uint64xN w
-> hi:uint64xN w ->
Lemma
(let f = load_felem5 #w lo hi in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == createi #Vec.pfelem w
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]))
let load_felem5_lemma #w lo hi =
let f = load_felem5 #w lo hi in
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 128 < Vec.prime);
let res = createi #Vec.pfelem w
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]) in
match w with
| 1 ->
load_felem5_lemma_i #w lo hi 0;
eq_intro (feval5 f) res
| 2 ->
load_felem5_lemma_i #w lo hi 0;
load_felem5_lemma_i #w lo hi 1;
eq_intro (feval5 f) res
| 4 ->
load_felem5_lemma_i #w lo hi 0;
load_felem5_lemma_i #w lo hi 1;
load_felem5_lemma_i #w lo hi 2;
load_felem5_lemma_i #w lo hi 3;
eq_intro (feval5 f) res
val load_felem5_4_interleave: lo:uint64xN 4 -> hi:uint64xN 4 -> Lemma
(let m0 = vec_interleave_low_n 2 lo hi in
let m1 = vec_interleave_high_n 2 lo hi in
let m2 = cast U64 4 (vec_shift_right (cast U128 2 m0) 48ul) in
let m3 = cast U64 4 (vec_shift_right (cast U128 2 m1) 48ul) in
let m4 = vec_interleave_high m0 m1 in
let t0 = vec_interleave_low m0 m1 in
let t3 = vec_interleave_low m2 m3 in
vec_v m4 == create4 (vec_v lo).[1] (vec_v lo).[3] (vec_v hi).[1] (vec_v hi).[3] /\
vec_v t0 == create4 (vec_v lo).[0] (vec_v lo).[2] (vec_v hi).[0] (vec_v hi).[2] /\
t3 == vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul))
let load_felem5_4_interleave lo hi =
let m0 = vec_interleave_low_n 2 lo hi in
vec_interleave_low_n_lemma_uint64_4_2 lo hi;
//assert (vec_v m0 == create4 (vec_v lo).[0] (vec_v lo).[1] (vec_v hi).[0] (vec_v hi).[1]);
let m1 = vec_interleave_high_n 2 lo hi in
vec_interleave_high_n_lemma_uint64_4_2 lo hi;
//assert (vec_v m1 == create4 (vec_v lo).[2] (vec_v lo).[3] (vec_v hi).[2] (vec_v hi).[3]);
let m4 = vec_interleave_high m0 m1 in
vec_interleave_high_lemma_uint64_4 m0 m1;
//assert (vec_v m4 == create4 (vec_v m0).[1] (vec_v m1).[1] (vec_v m0).[3] (vec_v m1).[3]);
assert (vec_v m4 == create4 (vec_v lo).[1] (vec_v lo).[3] (vec_v hi).[1] (vec_v hi).[3]);
let t0 = vec_interleave_low m0 m1 in
vec_interleave_low_lemma_uint64_4 m0 m1;
//assert (vec_v t0 == create4 (vec_v m0).[0] (vec_v m1).[0] (vec_v m0).[2] (vec_v m1).[2]);
assert (vec_v t0 == create4 (vec_v lo).[0] (vec_v lo).[2] (vec_v hi).[0] (vec_v hi).[2]);
let m2 = cast U64 4 (vec_shift_right (cast U128 2 m0) 48ul) in
vec_shift_right_uint128_small2 m0 48ul;
assert ((vec_v m2).[0] == (((vec_v lo).[0] >>. 48ul) |. ((vec_v lo).[1] <<. 16ul)));
assert ((vec_v m2).[2] == (((vec_v hi).[0] >>. 48ul) |. ((vec_v hi).[1] <<. 16ul)));
let m3 = cast U64 4 (vec_shift_right (cast U128 2 m1) 48ul) in
vec_shift_right_uint128_small2 m1 48ul;
assert ((vec_v m3).[0] == (((vec_v lo).[2] >>. 48ul) |. ((vec_v lo).[3] <<. 16ul)));
assert ((vec_v m3).[2] == (((vec_v hi).[2] >>. 48ul) |. ((vec_v hi).[3] <<. 16ul)));
let t3 = vec_interleave_low m2 m3 in
vec_interleave_low_lemma_uint64_4 m2 m3;
eq_intro (vec_v t3) (vec_v (vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul)));
vecv_extensionality t3 (vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul))
noextract | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val load_felem5_4_compact: lo:uint64xN 4 -> hi:uint64xN 4 -> felem5 4 | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.load_felem5_4_compact | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | lo: Hacl.Spec.Poly1305.Field32xN.uint64xN 4 -> hi: Hacl.Spec.Poly1305.Field32xN.uint64xN 4
-> Hacl.Spec.Poly1305.Field32xN.felem5 4 | {
"end_col": 22,
"end_line": 709,
"start_col": 33,
"start_line": 701
} |
FStar.Pervasives.Lemma | val load_felem5_4_compact_lemma_i: lo:uint64xN 4 -> hi:uint64xN 4 -> i:nat{i < 4} ->
Lemma
(let f = as_tup64_i (load_felem5_4_compact lo hi) i in
tup64_fits5 f (1, 1, 1, 1, 1) /\
as_nat5 f < pow2 128 /\
as_nat5 f % Vec.prime == (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]) | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let load_felem5_4_compact_lemma_i lo hi i =
assert (as_tup64_i (load_felem5_4_compact lo hi) i == load_tup64_4_compact (vec_v lo).[i] (vec_v hi).[i]);
load_tup64_4_compact_lemma (vec_v lo).[i] (vec_v hi).[i] | val load_felem5_4_compact_lemma_i: lo:uint64xN 4 -> hi:uint64xN 4 -> i:nat{i < 4} ->
Lemma
(let f = as_tup64_i (load_felem5_4_compact lo hi) i in
tup64_fits5 f (1, 1, 1, 1, 1) /\
as_nat5 f < pow2 128 /\
as_nat5 f % Vec.prime == (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i])
let load_felem5_4_compact_lemma_i lo hi i = | false | null | true | assert (as_tup64_i (load_felem5_4_compact lo hi) i ==
load_tup64_4_compact (vec_v lo).[ i ] (vec_v hi).[ i ]);
load_tup64_4_compact_lemma (vec_v lo).[ i ] (vec_v hi).[ i ] | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [
"lemma"
] | [
"Hacl.Spec.Poly1305.Field32xN.uint64xN",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThan",
"Hacl.Poly1305.Field32xN.Lemmas2.load_tup64_4_compact_lemma",
"Lib.Sequence.op_String_Access",
"Lib.IntTypes.uint_t",
"Lib.IntTypes.U64",
"Lib.IntTypes.SEC",
"Lib.IntVector.vec_v",
"Prims.unit",
"Prims._assert",
"Prims.eq2",
"Hacl.Spec.Poly1305.Field32xN.tup64_5",
"Hacl.Spec.Poly1305.Field32xN.as_tup64_i",
"Hacl.Spec.Poly1305.Field32xN.Lemmas.load_felem5_4_compact",
"Hacl.Poly1305.Field32xN.Lemmas2.load_tup64_4_compact"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w)
val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)]
let fadd5_fits_lemma #w f1 f2 =
let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24
val fadd5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (feval5 (fadd5 f1 f2) == map2 Vec.pfadd (feval5 f1) (feval5 f2))
[SMTPat (fadd5 f1 f2)]
let fadd5_eval_lemma #w f1 f2 =
let o = fadd5 f1 f2 in
FStar.Classical.forall_intro (fadd5_eval_lemma_i f1 f2);
eq_intro (feval5 o) (map2 Vec.pfadd (feval5 f1) (feval5 f2))
val mul_felem5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_wide_fits5 (mul_felem5 #w f1 r r5) (126, 102, 78, 54, 30))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_fits_lemma #w f1 r r5 =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34)
val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i])
let mul_felem5_eval_lemma_i #w f1 r r5 i =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_eval_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
assert ((fas_nat5 (a0,a1,a2,a3,a4)).[i] == (uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i]);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_eval_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
assert ((fas_nat5 (a10,a11,a12,a13,a14)).[i] == (fas_nat5 (a0,a1,a2,a3,a4)).[i] + (uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i]);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_eval_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
assert ((fas_nat5 (a20,a21,a22,a23,a24)).[i] == (fas_nat5 (a10,a11,a12,a13,a14)).[i] + (uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i]);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
assert ((fas_nat5 (a30,a31,a32,a33,a34)).[i] == (fas_nat5 (a20,a21,a22,a23,a24)).[i] + (uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i]);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] == (fas_nat5 (a30,a31,a32,a33,a34)).[i] + (uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] ==
(uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i] +
(uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i] +
(uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i] +
(uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i] +
(uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
mul_felem5_eval_as_tup64 #w f1 r r5 i;
mul_felem5_lemma (as_tup64_i f1 i) (as_tup64_i r i)
val mul_felem5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (mul_felem5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_eval_lemma #w f1 r r5 =
let tmp = map2 (Vec.pfmul) (feval5 f1) (feval5 r) in
FStar.Classical.forall_intro (mul_felem5_eval_lemma_i #w f1 r r5);
eq_intro (feval5 (mul_felem5 #w f1 r r5)) tmp
val fmul_r5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_fits5 (fmul_r5 #w f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_fits_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_fits_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fmul_r5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fmul_r5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_eval_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_eval_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_fits_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_fits_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_fits_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fadd_mul_r5_eval_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fadd_mul_r5 acc f1 r r5) ==
map2 (Vec.pfmul) (map2 (Vec.pfadd) (feval5 acc) (feval5 f1)) (feval5 r))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_eval_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_eval_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_eval_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val reduce_felem5_eval_lemma:
#w:lanes
-> f:felem5 w{felem_fits5 f (2, 2, 2, 2, 2)} ->
Lemma
(felem_fits5 (reduce_felem5 f) (1, 1, 1, 1, 1) /\
(feval5 f).[0] == (fas_nat5 (reduce_felem5 f)).[0])
[SMTPat (reduce_felem5 f)]
let reduce_felem5_eval_lemma #w f =
carry_full_felem5_eval_lemma f;
carry_full_felem5_fits_lemma f;
let f = carry_full_felem5 f in
carry_reduce_felem5_lemma #w f;
subtract_p5_felem5_lemma #w (carry_full_felem5 f)
val fmul_r2_normalize50:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures fun a ->
let fr21 = create2 (feval5 r2).[0] (feval5 r).[0] in
feval5 a == Vec.fmul (feval5 acc) fr21 /\
felem_fits5 a (1, 2, 1, 1, 2))
let fmul_r2_normalize50 (a0, a1, a2, a3, a4) (r0, r1, r2, r3, r4) (r20, r21, r22, r23, r24) =
let r210 = vec_interleave_low r20 r0 in
vec_interleave_low_lemma2 r20 r0;
let r211 = vec_interleave_low r21 r1 in
vec_interleave_low_lemma2 r21 r1;
let r212 = vec_interleave_low r22 r2 in
vec_interleave_low_lemma2 r22 r2;
let r213 = vec_interleave_low r23 r3 in
vec_interleave_low_lemma2 r23 r3;
let r214 = vec_interleave_low r24 r4 in
vec_interleave_low_lemma2 r24 r4;
let acc = (a0, a1, a2, a3, a4) in
let fr = (r0, r1, r2, r3, r4) in
let fr2 = (r20, r21, r22, r23, r24) in
assert ((feval5 fr2).[0] == Vec.pfmul ((feval5 fr).[0]) ((feval5 fr).[0]));
let fr21 = (r210, r211, r212, r213, r214) in
eq_intro (feval5 fr21) (create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (feval5 fr21 == create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (felem_fits5 fr21 (2, 2, 2, 2, 2));
let fr215 = precomp_r5 #2 fr21 in
let a = fmul_r5 #2 acc fr21 fr215 in
fmul_r5_eval_lemma acc fr21 fr215;
fmul_r5_fits_lemma acc fr21 fr215;
assert (feval5 a == Vec.fmul (feval5 acc) (feval5 fr21));
assert (felem_fits5 a (1, 2, 1, 1, 2));
a
#push-options "--z3rlimit 150"
val fmul_r2_normalize51:
a:felem5 2
-> fa1:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 a (1, 2, 1, 1, 2) /\
felem_fits5 fa1 (1, 2, 1, 1, 2) /\
feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1])
(ensures fun out ->
(feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1] /\
felem_fits5 out (2, 4, 2, 2, 4))
let fmul_r2_normalize51 a fa1 =
let (a0, a1, a2, a3, a4) = a in
let (a10, a11, a12, a13, a14) = fa1 in
let o0 = vec_add_mod a0 a10 in
let o1 = vec_add_mod a1 a11 in
let o2 = vec_add_mod a2 a12 in
let o3 = vec_add_mod a3 a13 in
let o4 = vec_add_mod a4 a14 in
let out = (o0, o1, o2, o3, o4) in
let (a0, a1, a2, a3, a4) = as_tup64_i a 0 in
let (a10, a11, a12, a13, a14) = as_tup64_i fa1 0 in
let (o0, o1, o2, o3, o4) = as_tup64_i out 0 in
FStar.Math.Lemmas.modulo_lemma (v a0 + v a10) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a1 + v a11) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a2 + v a12) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a3 + v a13) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a4 + v a14) (pow2 64);
assert (felem_fits5 out (2, 4, 2, 2, 4));
calc (==) {
((feval5 a).[0] + (feval5 a).[1]) % Vec.prime;
(==) { }
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime + as_nat5 (a10, a11, a12, a13, a14) % Vec.prime) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (a0, a1, a2, a3, a4)) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime;
FStar.Math.Lemmas.lemma_mod_plus_distr_r
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime }
(as_nat5 (a0, a1, a2, a3, a4) + as_nat5 (a10, a11, a12, a13, a14)) % Vec.prime;
(==) { }
(feval5 out).[0];
};
out
#pop-options
val fmul_r2_normalize5_lemma:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures
(let out = fmul_r2_normalize5 acc r r2 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_2 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r2_normalize5 acc r r2)]
let fmul_r2_normalize5_lemma acc r r2 =
let a = fmul_r2_normalize50 acc r r2 in
let (a0, a1, a2, a3, a4) = a in
let a10 = vec_interleave_high a0 a0 in
vec_interleave_high_lemma2 a0 a0;
let a11 = vec_interleave_high a1 a1 in
vec_interleave_high_lemma2 a1 a1;
let a12 = vec_interleave_high a2 a2 in
vec_interleave_high_lemma2 a2 a2;
let a13 = vec_interleave_high a3 a3 in
vec_interleave_high_lemma2 a3 a3;
let a14 = vec_interleave_high a4 a4 in
vec_interleave_high_lemma2 a4 a4;
let fa1 = (a10, a11, a12, a13, a14) in
eq_intro (feval5 fa1) (create2 (feval5 a).[1] (feval5 a).[1]);
assert (feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1]);
assert (felem_fits5 fa1 (1, 2, 1, 1, 2));
let out = fmul_r2_normalize51 a fa1 in
assert ((feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1]);
let res = carry_full_felem5 out in
carry_full_felem5_lemma out
val fmul_r4_normalize50:
acc:felem5 4
-> r:felem5 4
-> r2:felem5 4
-> r3:felem5 4
-> r4:felem5 4 ->
Pure (felem5 4)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
felem_fits5 r3 (2, 2, 2, 2, 2) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.fmul (feval5 r) (feval5 r) /\
feval5 r3 == Vec.fmul (feval5 r2) (feval5 r) /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures fun out ->
let fr4321 = create4 (feval5 r4).[0] (feval5 r3).[0] (feval5 r2).[0] (feval5 r).[0] in
feval5 out == Vec.fmul (feval5 acc) fr4321 /\
felem_fits5 out (1, 2, 1, 1, 2))
let fmul_r4_normalize50 acc fr fr2 fr3 fr4 =
let (r10, r11, r12, r13, r14) = fr in
let (r20, r21, r22, r23, r24) = fr2 in
let (r30, r31, r32, r33, r34) = fr3 in
let (r40, r41, r42, r43, r44) = fr4 in
let (a0, a1, a2, a3, a4) = acc in
let v12120 = vec_interleave_low r20 r10 in
vec_interleave_low_lemma_uint64_4 r20 r10;
let v34340 = vec_interleave_low r40 r30 in
vec_interleave_low_lemma_uint64_4 r40 r30;
let r12340 = vec_interleave_low_n 2 v34340 v12120 in
vec_interleave_low_n_lemma_uint64_4_2 v34340 v12120;
let v12121 = vec_interleave_low r21 r11 in
vec_interleave_low_lemma_uint64_4 r21 r11;
let v34341 = vec_interleave_low r41 r31 in
vec_interleave_low_lemma_uint64_4 r41 r31;
let r12341 = vec_interleave_low_n 2 v34341 v12121 in
vec_interleave_low_n_lemma_uint64_4_2 v34341 v12121;
let v12122 = vec_interleave_low r22 r12 in
vec_interleave_low_lemma_uint64_4 r22 r12;
let v34342 = vec_interleave_low r42 r32 in
vec_interleave_low_lemma_uint64_4 r42 r32;
let r12342 = vec_interleave_low_n 2 v34342 v12122 in
vec_interleave_low_n_lemma_uint64_4_2 v34342 v12122;
let v12123 = vec_interleave_low r23 r13 in
vec_interleave_low_lemma_uint64_4 r23 r13;
let v34343 = vec_interleave_low r43 r33 in
vec_interleave_low_lemma_uint64_4 r43 r33;
let r12343 = vec_interleave_low_n 2 v34343 v12123 in
vec_interleave_low_n_lemma_uint64_4_2 v34343 v12123;
let v12124 = vec_interleave_low r24 r14 in
vec_interleave_low_lemma_uint64_4 r24 r14;
let v34344 = vec_interleave_low r44 r34 in
vec_interleave_low_lemma_uint64_4 r44 r34;
let r12344 = vec_interleave_low_n 2 v34344 v12124 in
vec_interleave_low_n_lemma_uint64_4_2 v34344 v12124;
let fr1234 = (r12340, r12341, r12342, r12343, r12344) in
eq_intro (feval5 fr1234) (create4 (feval5 fr4).[0] (feval5 fr3).[0] (feval5 fr2).[0] (feval5 fr).[0]);
let fr12345 = precomp_r5 #4 fr1234 in
let out = fmul_r5 #4 acc fr1234 fr12345 in
fmul_r5_eval_lemma acc fr1234 fr12345;
fmul_r5_fits_lemma acc fr1234 fr12345;
out
val lemma_fmul_r4_normalize51:
#m:scale32{m <= 2}
-> o:uint64xN 4{felem_fits1 o m} ->
Lemma
(let v00 = vec_interleave_high_n 2 o o in
let v10 = vec_add_mod o v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
felem_fits1 v20 (4 * m) /\
(uint64xN_v v20).[0] == (uint64xN_v o).[0] + (uint64xN_v o).[1] + (uint64xN_v o).[2] + (uint64xN_v o).[3])
let lemma_fmul_r4_normalize51 #m o =
let v00 = vec_interleave_high_n 2 o o in
vec_interleave_high_n_lemma_uint64_4_2 o o;
let (o0, o1, o2, o3) = ((vec_v o).[0], (vec_v o).[1], (vec_v o).[2], (vec_v o).[3]) in
assert (vec_v v00 == create4 o2 o3 o2 o3);
let v10 = vec_add_mod o v00 in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v o1 + v o3) (pow2 64);
assert (v (vec_v v10).[0] == v o0 + v o2);
assert (v (vec_v v10).[1] == v o1 + v o3);
let v10h = vec_interleave_high v10 v10 in
vec_interleave_high_lemma_uint64_4 v10 v10;
assert (v (vec_v v10h).[0] == v (vec_v v10).[1]);
let v20 = vec_add_mod v10 v10h in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2 + v o1 + v o3) (pow2 64)
val lemma_fmul_r4_normalize51_expand:
v2:felem5 4
-> out:felem5 4 ->
Lemma
(requires
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(uint64xN_v v20).[0] == (uint64xN_v o0).[0] + (uint64xN_v o0).[1] + (uint64xN_v o0).[2] + (uint64xN_v o0).[3] /\
(uint64xN_v v21).[0] == (uint64xN_v o1).[0] + (uint64xN_v o1).[1] + (uint64xN_v o1).[2] + (uint64xN_v o1).[3] /\
(uint64xN_v v22).[0] == (uint64xN_v o2).[0] + (uint64xN_v o2).[1] + (uint64xN_v o2).[2] + (uint64xN_v o2).[3] /\
(uint64xN_v v23).[0] == (uint64xN_v o3).[0] + (uint64xN_v o3).[1] + (uint64xN_v o3).[2] + (uint64xN_v o3).[3] /\
(uint64xN_v v24).[0] == (uint64xN_v o4).[0] + (uint64xN_v o4).[1] + (uint64xN_v o4).[2] + (uint64xN_v o4).[3]))
(ensures
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(feval5 v2).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 out).[0] (feval5 out).[1]) (feval5 out).[2]) (feval5 out).[3]))
let lemma_fmul_r4_normalize51_expand v2 out =
let (v20, v21, v22, v23, v24) = as_tup64_i v2 0 in
let (o0, o1, o2, o3, o4) = out in
calc (==) {
as_nat5 (v20, v21, v22, v23, v24) % Vec.prime;
(==) { }
(as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1) + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1))
(as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) Vec.prime }
((as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1)) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.modulo_distributivity (as_nat5 (as_tup64_i out 0)) (as_nat5 (as_tup64_i out 1)) Vec.prime }
(((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2))
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2)) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime) (as_nat5 (as_tup64_i out 2)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r ((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime)
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime;
};
assert ((feval5 v2).[0] ==
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime)
val fmul_r4_normalize51: a:felem5 4 ->
Pure (felem5 4)
(requires felem_fits5 a (1, 2, 1, 1, 2))
(ensures fun res ->
felem_fits5 res (4, 8, 4, 4, 8) /\
(feval5 res).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 a).[0] (feval5 a).[1]) (feval5 a).[2]) (feval5 a).[3])
let fmul_r4_normalize51 fa =
let (o0, o1, o2, o3, o4) = fa in
let v00 = vec_interleave_high_n 2 o0 o0 in
let v10 = vec_add_mod o0 v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
lemma_fmul_r4_normalize51 #1 o0;
let v01 = vec_interleave_high_n 2 o1 o1 in
let v11 = vec_add_mod o1 v01 in
let v11h = vec_interleave_high v11 v11 in
let v21 = vec_add_mod v11 v11h in
lemma_fmul_r4_normalize51 #2 o1;
let v02 = vec_interleave_high_n 2 o2 o2 in
let v12 = vec_add_mod o2 v02 in
let v12h = vec_interleave_high v12 v12 in
let v22 = vec_add_mod v12 v12h in
lemma_fmul_r4_normalize51 #1 o2;
let v03 = vec_interleave_high_n 2 o3 o3 in
let v13 = vec_add_mod o3 v03 in
let v13h = vec_interleave_high v13 v13 in
let v23 = vec_add_mod v13 v13h in
lemma_fmul_r4_normalize51 #1 o3;
let v04 = vec_interleave_high_n 2 o4 o4 in
let v14 = vec_add_mod o4 v04 in
let v14h = vec_interleave_high v14 v14 in
let v24 = vec_add_mod v14 v14h in
lemma_fmul_r4_normalize51 #2 o4;
let res = (v20, v21, v22, v23, v24) in
lemma_fmul_r4_normalize51_expand res fa;
res
val fmul_r4_normalize5_lemma:
acc:felem5 4
-> r:felem5 4
-> r_5:felem5 4
-> r4:felem5 4 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
r_5 == precomp_r5 r /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures
(let out = fmul_r4_normalize5 acc r r_5 r4 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_4 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r4_normalize5 acc r r_5 r4)]
#restart-solver
#push-options "--z3rlimit 500"
let fmul_r4_normalize5_lemma acc fr fr_5 fr4 =
let fr2 = fmul_r5 #4 fr fr fr_5 in
let fr3 = fmul_r5 #4 fr2 fr fr_5 in
let out = fmul_r4_normalize50 acc fr fr2 fr3 fr4 in
let v2 = fmul_r4_normalize51 out in
let res = carry_full_felem5 v2 in
carry_full_felem5_lemma v2
#pop-options
val load_felem5_lemma:
#w:lanes
-> lo:uint64xN w
-> hi:uint64xN w ->
Lemma
(let f = load_felem5 #w lo hi in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == createi #Vec.pfelem w
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]))
let load_felem5_lemma #w lo hi =
let f = load_felem5 #w lo hi in
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 128 < Vec.prime);
let res = createi #Vec.pfelem w
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]) in
match w with
| 1 ->
load_felem5_lemma_i #w lo hi 0;
eq_intro (feval5 f) res
| 2 ->
load_felem5_lemma_i #w lo hi 0;
load_felem5_lemma_i #w lo hi 1;
eq_intro (feval5 f) res
| 4 ->
load_felem5_lemma_i #w lo hi 0;
load_felem5_lemma_i #w lo hi 1;
load_felem5_lemma_i #w lo hi 2;
load_felem5_lemma_i #w lo hi 3;
eq_intro (feval5 f) res
val load_felem5_4_interleave: lo:uint64xN 4 -> hi:uint64xN 4 -> Lemma
(let m0 = vec_interleave_low_n 2 lo hi in
let m1 = vec_interleave_high_n 2 lo hi in
let m2 = cast U64 4 (vec_shift_right (cast U128 2 m0) 48ul) in
let m3 = cast U64 4 (vec_shift_right (cast U128 2 m1) 48ul) in
let m4 = vec_interleave_high m0 m1 in
let t0 = vec_interleave_low m0 m1 in
let t3 = vec_interleave_low m2 m3 in
vec_v m4 == create4 (vec_v lo).[1] (vec_v lo).[3] (vec_v hi).[1] (vec_v hi).[3] /\
vec_v t0 == create4 (vec_v lo).[0] (vec_v lo).[2] (vec_v hi).[0] (vec_v hi).[2] /\
t3 == vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul))
let load_felem5_4_interleave lo hi =
let m0 = vec_interleave_low_n 2 lo hi in
vec_interleave_low_n_lemma_uint64_4_2 lo hi;
//assert (vec_v m0 == create4 (vec_v lo).[0] (vec_v lo).[1] (vec_v hi).[0] (vec_v hi).[1]);
let m1 = vec_interleave_high_n 2 lo hi in
vec_interleave_high_n_lemma_uint64_4_2 lo hi;
//assert (vec_v m1 == create4 (vec_v lo).[2] (vec_v lo).[3] (vec_v hi).[2] (vec_v hi).[3]);
let m4 = vec_interleave_high m0 m1 in
vec_interleave_high_lemma_uint64_4 m0 m1;
//assert (vec_v m4 == create4 (vec_v m0).[1] (vec_v m1).[1] (vec_v m0).[3] (vec_v m1).[3]);
assert (vec_v m4 == create4 (vec_v lo).[1] (vec_v lo).[3] (vec_v hi).[1] (vec_v hi).[3]);
let t0 = vec_interleave_low m0 m1 in
vec_interleave_low_lemma_uint64_4 m0 m1;
//assert (vec_v t0 == create4 (vec_v m0).[0] (vec_v m1).[0] (vec_v m0).[2] (vec_v m1).[2]);
assert (vec_v t0 == create4 (vec_v lo).[0] (vec_v lo).[2] (vec_v hi).[0] (vec_v hi).[2]);
let m2 = cast U64 4 (vec_shift_right (cast U128 2 m0) 48ul) in
vec_shift_right_uint128_small2 m0 48ul;
assert ((vec_v m2).[0] == (((vec_v lo).[0] >>. 48ul) |. ((vec_v lo).[1] <<. 16ul)));
assert ((vec_v m2).[2] == (((vec_v hi).[0] >>. 48ul) |. ((vec_v hi).[1] <<. 16ul)));
let m3 = cast U64 4 (vec_shift_right (cast U128 2 m1) 48ul) in
vec_shift_right_uint128_small2 m1 48ul;
assert ((vec_v m3).[0] == (((vec_v lo).[2] >>. 48ul) |. ((vec_v lo).[3] <<. 16ul)));
assert ((vec_v m3).[2] == (((vec_v hi).[2] >>. 48ul) |. ((vec_v hi).[3] <<. 16ul)));
let t3 = vec_interleave_low m2 m3 in
vec_interleave_low_lemma_uint64_4 m2 m3;
eq_intro (vec_v t3) (vec_v (vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul)));
vecv_extensionality t3 (vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul))
noextract
val load_felem5_4_compact: lo:uint64xN 4 -> hi:uint64xN 4 -> felem5 4
let load_felem5_4_compact lo hi =
let mask26 = mask26 4 in
let t3 = vec_or (vec_shift_right lo 48ul) (vec_shift_left hi 16ul) in
let o0 = vec_and lo mask26 in
let o1 = vec_and (vec_shift_right lo 26ul) mask26 in
let o2 = vec_and (vec_shift_right t3 4ul) mask26 in
let o3 = vec_and (vec_shift_right t3 30ul) mask26 in
let o4 = vec_shift_right hi 40ul in
(o0, o1, o2, o3, o4)
val load_felem5_4_compact_lemma_i: lo:uint64xN 4 -> hi:uint64xN 4 -> i:nat{i < 4} ->
Lemma
(let f = as_tup64_i (load_felem5_4_compact lo hi) i in
tup64_fits5 f (1, 1, 1, 1, 1) /\
as_nat5 f < pow2 128 /\
as_nat5 f % Vec.prime == (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]) | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val load_felem5_4_compact_lemma_i: lo:uint64xN 4 -> hi:uint64xN 4 -> i:nat{i < 4} ->
Lemma
(let f = as_tup64_i (load_felem5_4_compact lo hi) i in
tup64_fits5 f (1, 1, 1, 1, 1) /\
as_nat5 f < pow2 128 /\
as_nat5 f % Vec.prime == (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]) | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.load_felem5_4_compact_lemma_i | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
lo: Hacl.Spec.Poly1305.Field32xN.uint64xN 4 ->
hi: Hacl.Spec.Poly1305.Field32xN.uint64xN 4 ->
i: Prims.nat{i < 4}
-> FStar.Pervasives.Lemma
(ensures
(let f =
Hacl.Spec.Poly1305.Field32xN.as_tup64_i (Hacl.Spec.Poly1305.Field32xN.Lemmas.load_felem5_4_compact
lo
hi)
i
in
Hacl.Spec.Poly1305.Field32xN.tup64_fits5 f (1, 1, 1, 1, 1) /\
Hacl.Spec.Poly1305.Field32xN.as_nat5 f < Prims.pow2 128 /\
Hacl.Spec.Poly1305.Field32xN.as_nat5 f % Hacl.Spec.Poly1305.Vec.prime ==
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v hi).[ i ] * Prims.pow2 64 +
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v lo).[ i ])) | {
"end_col": 58,
"end_line": 720,
"start_col": 2,
"start_line": 719
} |
FStar.Pervasives.Lemma | val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)] | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let fadd5_fits_lemma #w f1 f2 =
let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24 | val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)]
let fadd5_fits_lemma #w f1 f2 = | false | null | true | let f10, f11, f12, f13, f14 = f1 in
let f20, f21, f22, f23, f24 = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24 | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [
"lemma"
] | [
"Hacl.Spec.Poly1305.Field32xN.lanes",
"Hacl.Spec.Poly1305.Field32xN.felem5",
"Hacl.Spec.Poly1305.Field32xN.felem_fits5",
"FStar.Pervasives.Native.Mktuple5",
"Prims.nat",
"Hacl.Spec.Poly1305.Field32xN.uint64xN",
"Lib.IntVector.vec_add_mod_lemma",
"Lib.IntTypes.U64",
"Prims.unit",
"Hacl.Spec.Poly1305.Field32xN.fadd5"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w)
val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)] | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)] | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.fadd5_fits_lemma | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
f1:
Hacl.Spec.Poly1305.Field32xN.felem5 w
{Hacl.Spec.Poly1305.Field32xN.felem_fits5 f1 (2, 2, 2, 2, 2)} ->
f2:
Hacl.Spec.Poly1305.Field32xN.felem5 w
{Hacl.Spec.Poly1305.Field32xN.felem_fits5 f2 (1, 1, 1, 1, 1)}
-> FStar.Pervasives.Lemma
(ensures
Hacl.Spec.Poly1305.Field32xN.felem_fits5 (Hacl.Spec.Poly1305.Field32xN.fadd5 f1 f2)
(3, 3, 3, 3, 3)) [SMTPat (Hacl.Spec.Poly1305.Field32xN.fadd5 f1 f2)] | {
"end_col": 27,
"end_line": 87,
"start_col": 31,
"start_line": 79
} |
FStar.Pervasives.Lemma | val load_acc5_2_lemma:
f:felem5 2{felem_fits5 f (2, 2, 2, 2, 2)}
-> e:felem5 2{felem_fits5 e (1, 1, 1, 1, 1)} ->
Lemma
(let res = load_acc5_2 f e in
felem_fits5 res (3, 3, 3, 3, 3) /\
feval5 res == Vec.fadd (create2 (feval5 f).[0] 0) (feval5 e))
[SMTPat (load_acc5_2 f e)] | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let load_acc5_2_lemma f e =
let (f0, f1, f2, f3, f4) = f in
let r0 = vec_set f0 1ul (u64 0) in
let r1 = vec_set f1 1ul (u64 0) in
let r2 = vec_set f2 1ul (u64 0) in
let r3 = vec_set f3 1ul (u64 0) in
let r4 = vec_set f4 1ul (u64 0) in
let r = (r0, r1, r2, r3, r4) in
//assert ((feval5 r).[0] == (feval5 f).[0]);
assert ((feval5 r).[1] == 0);
eq_intro (feval5 r) (create2 (feval5 f).[0] 0) | val load_acc5_2_lemma:
f:felem5 2{felem_fits5 f (2, 2, 2, 2, 2)}
-> e:felem5 2{felem_fits5 e (1, 1, 1, 1, 1)} ->
Lemma
(let res = load_acc5_2 f e in
felem_fits5 res (3, 3, 3, 3, 3) /\
feval5 res == Vec.fadd (create2 (feval5 f).[0] 0) (feval5 e))
[SMTPat (load_acc5_2 f e)]
let load_acc5_2_lemma f e = | false | null | true | let f0, f1, f2, f3, f4 = f in
let r0 = vec_set f0 1ul (u64 0) in
let r1 = vec_set f1 1ul (u64 0) in
let r2 = vec_set f2 1ul (u64 0) in
let r3 = vec_set f3 1ul (u64 0) in
let r4 = vec_set f4 1ul (u64 0) in
let r = (r0, r1, r2, r3, r4) in
assert ((feval5 r).[ 1 ] == 0);
eq_intro (feval5 r) (create2 (feval5 f).[ 0 ] 0) | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [
"lemma"
] | [
"Hacl.Spec.Poly1305.Field32xN.felem5",
"Hacl.Spec.Poly1305.Field32xN.felem_fits5",
"FStar.Pervasives.Native.Mktuple5",
"Prims.nat",
"Hacl.Spec.Poly1305.Field32xN.uint64xN",
"Lib.Sequence.eq_intro",
"Hacl.Spec.Poly1305.Vec.pfelem",
"Hacl.Spec.Poly1305.Field32xN.feval5",
"Lib.Sequence.create2",
"Lib.Sequence.op_String_Access",
"Prims.unit",
"Prims._assert",
"Prims.eq2",
"Prims.int",
"FStar.Pervasives.Native.tuple5",
"Lib.IntVector.vec_t",
"Lib.IntTypes.U64",
"Lib.Sequence.lseq",
"Lib.IntTypes.int_t",
"Lib.IntTypes.SEC",
"Lib.IntVector.vec_v",
"Lib.Sequence.upd",
"Lib.IntTypes.v",
"Lib.IntTypes.U32",
"Lib.IntTypes.PUB",
"FStar.UInt32.uint_to_t",
"FStar.UInt32.t",
"Lib.IntTypes.mk_int",
"Lib.IntVector.vec_set",
"FStar.UInt32.__uint_to_t",
"Lib.IntTypes.u64"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w)
val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)]
let fadd5_fits_lemma #w f1 f2 =
let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24
val fadd5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (feval5 (fadd5 f1 f2) == map2 Vec.pfadd (feval5 f1) (feval5 f2))
[SMTPat (fadd5 f1 f2)]
let fadd5_eval_lemma #w f1 f2 =
let o = fadd5 f1 f2 in
FStar.Classical.forall_intro (fadd5_eval_lemma_i f1 f2);
eq_intro (feval5 o) (map2 Vec.pfadd (feval5 f1) (feval5 f2))
val mul_felem5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_wide_fits5 (mul_felem5 #w f1 r r5) (126, 102, 78, 54, 30))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_fits_lemma #w f1 r r5 =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34)
val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i])
let mul_felem5_eval_lemma_i #w f1 r r5 i =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_eval_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
assert ((fas_nat5 (a0,a1,a2,a3,a4)).[i] == (uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i]);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_eval_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
assert ((fas_nat5 (a10,a11,a12,a13,a14)).[i] == (fas_nat5 (a0,a1,a2,a3,a4)).[i] + (uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i]);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_eval_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
assert ((fas_nat5 (a20,a21,a22,a23,a24)).[i] == (fas_nat5 (a10,a11,a12,a13,a14)).[i] + (uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i]);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
assert ((fas_nat5 (a30,a31,a32,a33,a34)).[i] == (fas_nat5 (a20,a21,a22,a23,a24)).[i] + (uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i]);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] == (fas_nat5 (a30,a31,a32,a33,a34)).[i] + (uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] ==
(uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i] +
(uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i] +
(uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i] +
(uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i] +
(uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
mul_felem5_eval_as_tup64 #w f1 r r5 i;
mul_felem5_lemma (as_tup64_i f1 i) (as_tup64_i r i)
val mul_felem5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (mul_felem5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_eval_lemma #w f1 r r5 =
let tmp = map2 (Vec.pfmul) (feval5 f1) (feval5 r) in
FStar.Classical.forall_intro (mul_felem5_eval_lemma_i #w f1 r r5);
eq_intro (feval5 (mul_felem5 #w f1 r r5)) tmp
val fmul_r5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_fits5 (fmul_r5 #w f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_fits_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_fits_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fmul_r5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fmul_r5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_eval_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_eval_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_fits_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_fits_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_fits_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fadd_mul_r5_eval_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fadd_mul_r5 acc f1 r r5) ==
map2 (Vec.pfmul) (map2 (Vec.pfadd) (feval5 acc) (feval5 f1)) (feval5 r))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_eval_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_eval_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_eval_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val reduce_felem5_eval_lemma:
#w:lanes
-> f:felem5 w{felem_fits5 f (2, 2, 2, 2, 2)} ->
Lemma
(felem_fits5 (reduce_felem5 f) (1, 1, 1, 1, 1) /\
(feval5 f).[0] == (fas_nat5 (reduce_felem5 f)).[0])
[SMTPat (reduce_felem5 f)]
let reduce_felem5_eval_lemma #w f =
carry_full_felem5_eval_lemma f;
carry_full_felem5_fits_lemma f;
let f = carry_full_felem5 f in
carry_reduce_felem5_lemma #w f;
subtract_p5_felem5_lemma #w (carry_full_felem5 f)
val fmul_r2_normalize50:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures fun a ->
let fr21 = create2 (feval5 r2).[0] (feval5 r).[0] in
feval5 a == Vec.fmul (feval5 acc) fr21 /\
felem_fits5 a (1, 2, 1, 1, 2))
let fmul_r2_normalize50 (a0, a1, a2, a3, a4) (r0, r1, r2, r3, r4) (r20, r21, r22, r23, r24) =
let r210 = vec_interleave_low r20 r0 in
vec_interleave_low_lemma2 r20 r0;
let r211 = vec_interleave_low r21 r1 in
vec_interleave_low_lemma2 r21 r1;
let r212 = vec_interleave_low r22 r2 in
vec_interleave_low_lemma2 r22 r2;
let r213 = vec_interleave_low r23 r3 in
vec_interleave_low_lemma2 r23 r3;
let r214 = vec_interleave_low r24 r4 in
vec_interleave_low_lemma2 r24 r4;
let acc = (a0, a1, a2, a3, a4) in
let fr = (r0, r1, r2, r3, r4) in
let fr2 = (r20, r21, r22, r23, r24) in
assert ((feval5 fr2).[0] == Vec.pfmul ((feval5 fr).[0]) ((feval5 fr).[0]));
let fr21 = (r210, r211, r212, r213, r214) in
eq_intro (feval5 fr21) (create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (feval5 fr21 == create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (felem_fits5 fr21 (2, 2, 2, 2, 2));
let fr215 = precomp_r5 #2 fr21 in
let a = fmul_r5 #2 acc fr21 fr215 in
fmul_r5_eval_lemma acc fr21 fr215;
fmul_r5_fits_lemma acc fr21 fr215;
assert (feval5 a == Vec.fmul (feval5 acc) (feval5 fr21));
assert (felem_fits5 a (1, 2, 1, 1, 2));
a
#push-options "--z3rlimit 150"
val fmul_r2_normalize51:
a:felem5 2
-> fa1:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 a (1, 2, 1, 1, 2) /\
felem_fits5 fa1 (1, 2, 1, 1, 2) /\
feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1])
(ensures fun out ->
(feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1] /\
felem_fits5 out (2, 4, 2, 2, 4))
let fmul_r2_normalize51 a fa1 =
let (a0, a1, a2, a3, a4) = a in
let (a10, a11, a12, a13, a14) = fa1 in
let o0 = vec_add_mod a0 a10 in
let o1 = vec_add_mod a1 a11 in
let o2 = vec_add_mod a2 a12 in
let o3 = vec_add_mod a3 a13 in
let o4 = vec_add_mod a4 a14 in
let out = (o0, o1, o2, o3, o4) in
let (a0, a1, a2, a3, a4) = as_tup64_i a 0 in
let (a10, a11, a12, a13, a14) = as_tup64_i fa1 0 in
let (o0, o1, o2, o3, o4) = as_tup64_i out 0 in
FStar.Math.Lemmas.modulo_lemma (v a0 + v a10) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a1 + v a11) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a2 + v a12) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a3 + v a13) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a4 + v a14) (pow2 64);
assert (felem_fits5 out (2, 4, 2, 2, 4));
calc (==) {
((feval5 a).[0] + (feval5 a).[1]) % Vec.prime;
(==) { }
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime + as_nat5 (a10, a11, a12, a13, a14) % Vec.prime) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (a0, a1, a2, a3, a4)) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime;
FStar.Math.Lemmas.lemma_mod_plus_distr_r
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime }
(as_nat5 (a0, a1, a2, a3, a4) + as_nat5 (a10, a11, a12, a13, a14)) % Vec.prime;
(==) { }
(feval5 out).[0];
};
out
#pop-options
val fmul_r2_normalize5_lemma:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures
(let out = fmul_r2_normalize5 acc r r2 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_2 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r2_normalize5 acc r r2)]
let fmul_r2_normalize5_lemma acc r r2 =
let a = fmul_r2_normalize50 acc r r2 in
let (a0, a1, a2, a3, a4) = a in
let a10 = vec_interleave_high a0 a0 in
vec_interleave_high_lemma2 a0 a0;
let a11 = vec_interleave_high a1 a1 in
vec_interleave_high_lemma2 a1 a1;
let a12 = vec_interleave_high a2 a2 in
vec_interleave_high_lemma2 a2 a2;
let a13 = vec_interleave_high a3 a3 in
vec_interleave_high_lemma2 a3 a3;
let a14 = vec_interleave_high a4 a4 in
vec_interleave_high_lemma2 a4 a4;
let fa1 = (a10, a11, a12, a13, a14) in
eq_intro (feval5 fa1) (create2 (feval5 a).[1] (feval5 a).[1]);
assert (feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1]);
assert (felem_fits5 fa1 (1, 2, 1, 1, 2));
let out = fmul_r2_normalize51 a fa1 in
assert ((feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1]);
let res = carry_full_felem5 out in
carry_full_felem5_lemma out
val fmul_r4_normalize50:
acc:felem5 4
-> r:felem5 4
-> r2:felem5 4
-> r3:felem5 4
-> r4:felem5 4 ->
Pure (felem5 4)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
felem_fits5 r3 (2, 2, 2, 2, 2) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.fmul (feval5 r) (feval5 r) /\
feval5 r3 == Vec.fmul (feval5 r2) (feval5 r) /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures fun out ->
let fr4321 = create4 (feval5 r4).[0] (feval5 r3).[0] (feval5 r2).[0] (feval5 r).[0] in
feval5 out == Vec.fmul (feval5 acc) fr4321 /\
felem_fits5 out (1, 2, 1, 1, 2))
let fmul_r4_normalize50 acc fr fr2 fr3 fr4 =
let (r10, r11, r12, r13, r14) = fr in
let (r20, r21, r22, r23, r24) = fr2 in
let (r30, r31, r32, r33, r34) = fr3 in
let (r40, r41, r42, r43, r44) = fr4 in
let (a0, a1, a2, a3, a4) = acc in
let v12120 = vec_interleave_low r20 r10 in
vec_interleave_low_lemma_uint64_4 r20 r10;
let v34340 = vec_interleave_low r40 r30 in
vec_interleave_low_lemma_uint64_4 r40 r30;
let r12340 = vec_interleave_low_n 2 v34340 v12120 in
vec_interleave_low_n_lemma_uint64_4_2 v34340 v12120;
let v12121 = vec_interleave_low r21 r11 in
vec_interleave_low_lemma_uint64_4 r21 r11;
let v34341 = vec_interleave_low r41 r31 in
vec_interleave_low_lemma_uint64_4 r41 r31;
let r12341 = vec_interleave_low_n 2 v34341 v12121 in
vec_interleave_low_n_lemma_uint64_4_2 v34341 v12121;
let v12122 = vec_interleave_low r22 r12 in
vec_interleave_low_lemma_uint64_4 r22 r12;
let v34342 = vec_interleave_low r42 r32 in
vec_interleave_low_lemma_uint64_4 r42 r32;
let r12342 = vec_interleave_low_n 2 v34342 v12122 in
vec_interleave_low_n_lemma_uint64_4_2 v34342 v12122;
let v12123 = vec_interleave_low r23 r13 in
vec_interleave_low_lemma_uint64_4 r23 r13;
let v34343 = vec_interleave_low r43 r33 in
vec_interleave_low_lemma_uint64_4 r43 r33;
let r12343 = vec_interleave_low_n 2 v34343 v12123 in
vec_interleave_low_n_lemma_uint64_4_2 v34343 v12123;
let v12124 = vec_interleave_low r24 r14 in
vec_interleave_low_lemma_uint64_4 r24 r14;
let v34344 = vec_interleave_low r44 r34 in
vec_interleave_low_lemma_uint64_4 r44 r34;
let r12344 = vec_interleave_low_n 2 v34344 v12124 in
vec_interleave_low_n_lemma_uint64_4_2 v34344 v12124;
let fr1234 = (r12340, r12341, r12342, r12343, r12344) in
eq_intro (feval5 fr1234) (create4 (feval5 fr4).[0] (feval5 fr3).[0] (feval5 fr2).[0] (feval5 fr).[0]);
let fr12345 = precomp_r5 #4 fr1234 in
let out = fmul_r5 #4 acc fr1234 fr12345 in
fmul_r5_eval_lemma acc fr1234 fr12345;
fmul_r5_fits_lemma acc fr1234 fr12345;
out
val lemma_fmul_r4_normalize51:
#m:scale32{m <= 2}
-> o:uint64xN 4{felem_fits1 o m} ->
Lemma
(let v00 = vec_interleave_high_n 2 o o in
let v10 = vec_add_mod o v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
felem_fits1 v20 (4 * m) /\
(uint64xN_v v20).[0] == (uint64xN_v o).[0] + (uint64xN_v o).[1] + (uint64xN_v o).[2] + (uint64xN_v o).[3])
let lemma_fmul_r4_normalize51 #m o =
let v00 = vec_interleave_high_n 2 o o in
vec_interleave_high_n_lemma_uint64_4_2 o o;
let (o0, o1, o2, o3) = ((vec_v o).[0], (vec_v o).[1], (vec_v o).[2], (vec_v o).[3]) in
assert (vec_v v00 == create4 o2 o3 o2 o3);
let v10 = vec_add_mod o v00 in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v o1 + v o3) (pow2 64);
assert (v (vec_v v10).[0] == v o0 + v o2);
assert (v (vec_v v10).[1] == v o1 + v o3);
let v10h = vec_interleave_high v10 v10 in
vec_interleave_high_lemma_uint64_4 v10 v10;
assert (v (vec_v v10h).[0] == v (vec_v v10).[1]);
let v20 = vec_add_mod v10 v10h in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2 + v o1 + v o3) (pow2 64)
val lemma_fmul_r4_normalize51_expand:
v2:felem5 4
-> out:felem5 4 ->
Lemma
(requires
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(uint64xN_v v20).[0] == (uint64xN_v o0).[0] + (uint64xN_v o0).[1] + (uint64xN_v o0).[2] + (uint64xN_v o0).[3] /\
(uint64xN_v v21).[0] == (uint64xN_v o1).[0] + (uint64xN_v o1).[1] + (uint64xN_v o1).[2] + (uint64xN_v o1).[3] /\
(uint64xN_v v22).[0] == (uint64xN_v o2).[0] + (uint64xN_v o2).[1] + (uint64xN_v o2).[2] + (uint64xN_v o2).[3] /\
(uint64xN_v v23).[0] == (uint64xN_v o3).[0] + (uint64xN_v o3).[1] + (uint64xN_v o3).[2] + (uint64xN_v o3).[3] /\
(uint64xN_v v24).[0] == (uint64xN_v o4).[0] + (uint64xN_v o4).[1] + (uint64xN_v o4).[2] + (uint64xN_v o4).[3]))
(ensures
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(feval5 v2).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 out).[0] (feval5 out).[1]) (feval5 out).[2]) (feval5 out).[3]))
let lemma_fmul_r4_normalize51_expand v2 out =
let (v20, v21, v22, v23, v24) = as_tup64_i v2 0 in
let (o0, o1, o2, o3, o4) = out in
calc (==) {
as_nat5 (v20, v21, v22, v23, v24) % Vec.prime;
(==) { }
(as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1) + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1))
(as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) Vec.prime }
((as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1)) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.modulo_distributivity (as_nat5 (as_tup64_i out 0)) (as_nat5 (as_tup64_i out 1)) Vec.prime }
(((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2))
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2)) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime) (as_nat5 (as_tup64_i out 2)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r ((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime)
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime;
};
assert ((feval5 v2).[0] ==
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime)
val fmul_r4_normalize51: a:felem5 4 ->
Pure (felem5 4)
(requires felem_fits5 a (1, 2, 1, 1, 2))
(ensures fun res ->
felem_fits5 res (4, 8, 4, 4, 8) /\
(feval5 res).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 a).[0] (feval5 a).[1]) (feval5 a).[2]) (feval5 a).[3])
let fmul_r4_normalize51 fa =
let (o0, o1, o2, o3, o4) = fa in
let v00 = vec_interleave_high_n 2 o0 o0 in
let v10 = vec_add_mod o0 v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
lemma_fmul_r4_normalize51 #1 o0;
let v01 = vec_interleave_high_n 2 o1 o1 in
let v11 = vec_add_mod o1 v01 in
let v11h = vec_interleave_high v11 v11 in
let v21 = vec_add_mod v11 v11h in
lemma_fmul_r4_normalize51 #2 o1;
let v02 = vec_interleave_high_n 2 o2 o2 in
let v12 = vec_add_mod o2 v02 in
let v12h = vec_interleave_high v12 v12 in
let v22 = vec_add_mod v12 v12h in
lemma_fmul_r4_normalize51 #1 o2;
let v03 = vec_interleave_high_n 2 o3 o3 in
let v13 = vec_add_mod o3 v03 in
let v13h = vec_interleave_high v13 v13 in
let v23 = vec_add_mod v13 v13h in
lemma_fmul_r4_normalize51 #1 o3;
let v04 = vec_interleave_high_n 2 o4 o4 in
let v14 = vec_add_mod o4 v04 in
let v14h = vec_interleave_high v14 v14 in
let v24 = vec_add_mod v14 v14h in
lemma_fmul_r4_normalize51 #2 o4;
let res = (v20, v21, v22, v23, v24) in
lemma_fmul_r4_normalize51_expand res fa;
res
val fmul_r4_normalize5_lemma:
acc:felem5 4
-> r:felem5 4
-> r_5:felem5 4
-> r4:felem5 4 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
r_5 == precomp_r5 r /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures
(let out = fmul_r4_normalize5 acc r r_5 r4 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_4 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r4_normalize5 acc r r_5 r4)]
#restart-solver
#push-options "--z3rlimit 500"
let fmul_r4_normalize5_lemma acc fr fr_5 fr4 =
let fr2 = fmul_r5 #4 fr fr fr_5 in
let fr3 = fmul_r5 #4 fr2 fr fr_5 in
let out = fmul_r4_normalize50 acc fr fr2 fr3 fr4 in
let v2 = fmul_r4_normalize51 out in
let res = carry_full_felem5 v2 in
carry_full_felem5_lemma v2
#pop-options
val load_felem5_lemma:
#w:lanes
-> lo:uint64xN w
-> hi:uint64xN w ->
Lemma
(let f = load_felem5 #w lo hi in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == createi #Vec.pfelem w
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]))
let load_felem5_lemma #w lo hi =
let f = load_felem5 #w lo hi in
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 128 < Vec.prime);
let res = createi #Vec.pfelem w
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]) in
match w with
| 1 ->
load_felem5_lemma_i #w lo hi 0;
eq_intro (feval5 f) res
| 2 ->
load_felem5_lemma_i #w lo hi 0;
load_felem5_lemma_i #w lo hi 1;
eq_intro (feval5 f) res
| 4 ->
load_felem5_lemma_i #w lo hi 0;
load_felem5_lemma_i #w lo hi 1;
load_felem5_lemma_i #w lo hi 2;
load_felem5_lemma_i #w lo hi 3;
eq_intro (feval5 f) res
val load_felem5_4_interleave: lo:uint64xN 4 -> hi:uint64xN 4 -> Lemma
(let m0 = vec_interleave_low_n 2 lo hi in
let m1 = vec_interleave_high_n 2 lo hi in
let m2 = cast U64 4 (vec_shift_right (cast U128 2 m0) 48ul) in
let m3 = cast U64 4 (vec_shift_right (cast U128 2 m1) 48ul) in
let m4 = vec_interleave_high m0 m1 in
let t0 = vec_interleave_low m0 m1 in
let t3 = vec_interleave_low m2 m3 in
vec_v m4 == create4 (vec_v lo).[1] (vec_v lo).[3] (vec_v hi).[1] (vec_v hi).[3] /\
vec_v t0 == create4 (vec_v lo).[0] (vec_v lo).[2] (vec_v hi).[0] (vec_v hi).[2] /\
t3 == vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul))
let load_felem5_4_interleave lo hi =
let m0 = vec_interleave_low_n 2 lo hi in
vec_interleave_low_n_lemma_uint64_4_2 lo hi;
//assert (vec_v m0 == create4 (vec_v lo).[0] (vec_v lo).[1] (vec_v hi).[0] (vec_v hi).[1]);
let m1 = vec_interleave_high_n 2 lo hi in
vec_interleave_high_n_lemma_uint64_4_2 lo hi;
//assert (vec_v m1 == create4 (vec_v lo).[2] (vec_v lo).[3] (vec_v hi).[2] (vec_v hi).[3]);
let m4 = vec_interleave_high m0 m1 in
vec_interleave_high_lemma_uint64_4 m0 m1;
//assert (vec_v m4 == create4 (vec_v m0).[1] (vec_v m1).[1] (vec_v m0).[3] (vec_v m1).[3]);
assert (vec_v m4 == create4 (vec_v lo).[1] (vec_v lo).[3] (vec_v hi).[1] (vec_v hi).[3]);
let t0 = vec_interleave_low m0 m1 in
vec_interleave_low_lemma_uint64_4 m0 m1;
//assert (vec_v t0 == create4 (vec_v m0).[0] (vec_v m1).[0] (vec_v m0).[2] (vec_v m1).[2]);
assert (vec_v t0 == create4 (vec_v lo).[0] (vec_v lo).[2] (vec_v hi).[0] (vec_v hi).[2]);
let m2 = cast U64 4 (vec_shift_right (cast U128 2 m0) 48ul) in
vec_shift_right_uint128_small2 m0 48ul;
assert ((vec_v m2).[0] == (((vec_v lo).[0] >>. 48ul) |. ((vec_v lo).[1] <<. 16ul)));
assert ((vec_v m2).[2] == (((vec_v hi).[0] >>. 48ul) |. ((vec_v hi).[1] <<. 16ul)));
let m3 = cast U64 4 (vec_shift_right (cast U128 2 m1) 48ul) in
vec_shift_right_uint128_small2 m1 48ul;
assert ((vec_v m3).[0] == (((vec_v lo).[2] >>. 48ul) |. ((vec_v lo).[3] <<. 16ul)));
assert ((vec_v m3).[2] == (((vec_v hi).[2] >>. 48ul) |. ((vec_v hi).[3] <<. 16ul)));
let t3 = vec_interleave_low m2 m3 in
vec_interleave_low_lemma_uint64_4 m2 m3;
eq_intro (vec_v t3) (vec_v (vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul)));
vecv_extensionality t3 (vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul))
noextract
val load_felem5_4_compact: lo:uint64xN 4 -> hi:uint64xN 4 -> felem5 4
let load_felem5_4_compact lo hi =
let mask26 = mask26 4 in
let t3 = vec_or (vec_shift_right lo 48ul) (vec_shift_left hi 16ul) in
let o0 = vec_and lo mask26 in
let o1 = vec_and (vec_shift_right lo 26ul) mask26 in
let o2 = vec_and (vec_shift_right t3 4ul) mask26 in
let o3 = vec_and (vec_shift_right t3 30ul) mask26 in
let o4 = vec_shift_right hi 40ul in
(o0, o1, o2, o3, o4)
val load_felem5_4_compact_lemma_i: lo:uint64xN 4 -> hi:uint64xN 4 -> i:nat{i < 4} ->
Lemma
(let f = as_tup64_i (load_felem5_4_compact lo hi) i in
tup64_fits5 f (1, 1, 1, 1, 1) /\
as_nat5 f < pow2 128 /\
as_nat5 f % Vec.prime == (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i])
let load_felem5_4_compact_lemma_i lo hi i =
assert (as_tup64_i (load_felem5_4_compact lo hi) i == load_tup64_4_compact (vec_v lo).[i] (vec_v hi).[i]);
load_tup64_4_compact_lemma (vec_v lo).[i] (vec_v hi).[i]
val load_felem5_4_lemma: lo:uint64xN 4 -> hi:uint64xN 4 ->
Lemma
(let f = load_felem5_4_compact lo hi in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == createi #Vec.pfelem 4 (fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]))
let load_felem5_4_lemma lo hi =
let f = load_felem5_4_compact lo hi in
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 128 < Vec.prime);
let res = createi #Vec.pfelem 4
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]) in
load_felem5_4_compact_lemma_i lo hi 0;
load_felem5_4_compact_lemma_i lo hi 1;
load_felem5_4_compact_lemma_i lo hi 2;
load_felem5_4_compact_lemma_i lo hi 3;
eq_intro (feval5 f) res
val load_felem5_le: b:lseq uint8 64 -> Lemma
(let lo0 = vec_from_bytes_le U64 4 (sub b 0 32) in
let hi0 = vec_from_bytes_le U64 4 (sub b 32 32) in
let f = load_felem5_4 lo0 hi0 in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == Vec.load_elem4 b)
let load_felem5_le b =
let lo0 = vec_from_bytes_le U64 4 (sub b 0 32) in
let hi0 = vec_from_bytes_le U64 4 (sub b 32 32) in
let lo1 = vec_interleave_low_n 2 lo0 hi0 in
let hi1 = vec_interleave_high_n 2 lo0 hi0 in
let lo = vec_interleave_low lo1 hi1 in
let hi = vec_interleave_high lo1 hi1 in
let out = load_felem5_4_compact lo hi in
load_felem5_4_interleave lo0 hi0;
assert (out == load_felem5_4 lo0 hi0);
load_felem5_4_lemma lo hi;
Hacl.Impl.Poly1305.Lemmas.uints_from_bytes_le_lemma64_4 b;
eq_intro (feval5 out) (Vec.load_elem4 b)
val load_acc5_2_lemma:
f:felem5 2{felem_fits5 f (2, 2, 2, 2, 2)}
-> e:felem5 2{felem_fits5 e (1, 1, 1, 1, 1)} ->
Lemma
(let res = load_acc5_2 f e in
felem_fits5 res (3, 3, 3, 3, 3) /\
feval5 res == Vec.fadd (create2 (feval5 f).[0] 0) (feval5 e))
[SMTPat (load_acc5_2 f e)] | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val load_acc5_2_lemma:
f:felem5 2{felem_fits5 f (2, 2, 2, 2, 2)}
-> e:felem5 2{felem_fits5 e (1, 1, 1, 1, 1)} ->
Lemma
(let res = load_acc5_2 f e in
felem_fits5 res (3, 3, 3, 3, 3) /\
feval5 res == Vec.fadd (create2 (feval5 f).[0] 0) (feval5 e))
[SMTPat (load_acc5_2 f e)] | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.load_acc5_2_lemma | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
f:
Hacl.Spec.Poly1305.Field32xN.felem5 2
{Hacl.Spec.Poly1305.Field32xN.felem_fits5 f (2, 2, 2, 2, 2)} ->
e:
Hacl.Spec.Poly1305.Field32xN.felem5 2
{Hacl.Spec.Poly1305.Field32xN.felem_fits5 e (1, 1, 1, 1, 1)}
-> FStar.Pervasives.Lemma
(ensures
(let res = Hacl.Spec.Poly1305.Field32xN.load_acc5_2 f e in
Hacl.Spec.Poly1305.Field32xN.felem_fits5 res (3, 3, 3, 3, 3) /\
Hacl.Spec.Poly1305.Field32xN.feval5 res ==
Hacl.Spec.Poly1305.Vec.fadd (Lib.Sequence.create2 (Hacl.Spec.Poly1305.Field32xN.feval5 f).[
0 ]
0)
(Hacl.Spec.Poly1305.Field32xN.feval5 e)))
[SMTPat (Hacl.Spec.Poly1305.Field32xN.load_acc5_2 f e)] | {
"end_col": 48,
"end_line": 786,
"start_col": 27,
"start_line": 776
} |
FStar.Pervasives.Lemma | val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)] | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r) | val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r = | false | null | true | FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r) | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [
"lemma"
] | [
"Hacl.Spec.Poly1305.Field32xN.lanes",
"Hacl.Spec.Poly1305.Field32xN.felem5",
"Hacl.Spec.Poly1305.Field32xN.felem_fits5",
"FStar.Pervasives.Native.Mktuple5",
"Prims.nat",
"FStar.Classical.forall_intro",
"Prims.b2t",
"Prims.op_LessThan",
"Hacl.Spec.Poly1305.Field32xN.as_tup64_i",
"Hacl.Spec.Poly1305.Field32xN.precomp_r5",
"Lib.IntTypes.uint64",
"Prims.l_and",
"Prims.eq2",
"Lib.IntTypes.int_t",
"Lib.IntTypes.U64",
"Lib.IntTypes.SEC",
"Lib.IntTypes.op_Star_Bang",
"Lib.IntTypes.u64",
"Hacl.Poly1305.Field32xN.Lemmas0.precomp_r5_as_tup64",
"Prims.unit"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)] | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)] | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.precomp_r5_fits_lemma2 | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
r:
Hacl.Spec.Poly1305.Field32xN.felem5 w
{Hacl.Spec.Poly1305.Field32xN.felem_fits5 r (2, 2, 2, 2, 2)}
-> FStar.Pervasives.Lemma
(ensures
Hacl.Spec.Poly1305.Field32xN.felem_fits5 (Hacl.Spec.Poly1305.Field32xN.precomp_r5 r)
(10, 10, 10, 10, 10)) [SMTPat (Hacl.Spec.Poly1305.Field32xN.precomp_r5 r)] | {
"end_col": 57,
"end_line": 55,
"start_col": 2,
"start_line": 55
} |
Prims.Pure | val fmul_r2_normalize50:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures fun a ->
let fr21 = create2 (feval5 r2).[0] (feval5 r).[0] in
feval5 a == Vec.fmul (feval5 acc) fr21 /\
felem_fits5 a (1, 2, 1, 1, 2)) | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let fmul_r2_normalize50 (a0, a1, a2, a3, a4) (r0, r1, r2, r3, r4) (r20, r21, r22, r23, r24) =
let r210 = vec_interleave_low r20 r0 in
vec_interleave_low_lemma2 r20 r0;
let r211 = vec_interleave_low r21 r1 in
vec_interleave_low_lemma2 r21 r1;
let r212 = vec_interleave_low r22 r2 in
vec_interleave_low_lemma2 r22 r2;
let r213 = vec_interleave_low r23 r3 in
vec_interleave_low_lemma2 r23 r3;
let r214 = vec_interleave_low r24 r4 in
vec_interleave_low_lemma2 r24 r4;
let acc = (a0, a1, a2, a3, a4) in
let fr = (r0, r1, r2, r3, r4) in
let fr2 = (r20, r21, r22, r23, r24) in
assert ((feval5 fr2).[0] == Vec.pfmul ((feval5 fr).[0]) ((feval5 fr).[0]));
let fr21 = (r210, r211, r212, r213, r214) in
eq_intro (feval5 fr21) (create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (feval5 fr21 == create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (felem_fits5 fr21 (2, 2, 2, 2, 2));
let fr215 = precomp_r5 #2 fr21 in
let a = fmul_r5 #2 acc fr21 fr215 in
fmul_r5_eval_lemma acc fr21 fr215;
fmul_r5_fits_lemma acc fr21 fr215;
assert (feval5 a == Vec.fmul (feval5 acc) (feval5 fr21));
assert (felem_fits5 a (1, 2, 1, 1, 2));
a | val fmul_r2_normalize50:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures fun a ->
let fr21 = create2 (feval5 r2).[0] (feval5 r).[0] in
feval5 a == Vec.fmul (feval5 acc) fr21 /\
felem_fits5 a (1, 2, 1, 1, 2))
let fmul_r2_normalize50 (a0, a1, a2, a3, a4) (r0, r1, r2, r3, r4) (r20, r21, r22, r23, r24) = | false | null | false | let r210 = vec_interleave_low r20 r0 in
vec_interleave_low_lemma2 r20 r0;
let r211 = vec_interleave_low r21 r1 in
vec_interleave_low_lemma2 r21 r1;
let r212 = vec_interleave_low r22 r2 in
vec_interleave_low_lemma2 r22 r2;
let r213 = vec_interleave_low r23 r3 in
vec_interleave_low_lemma2 r23 r3;
let r214 = vec_interleave_low r24 r4 in
vec_interleave_low_lemma2 r24 r4;
let acc = (a0, a1, a2, a3, a4) in
let fr = (r0, r1, r2, r3, r4) in
let fr2 = (r20, r21, r22, r23, r24) in
assert ((feval5 fr2).[ 0 ] == Vec.pfmul ((feval5 fr).[ 0 ]) ((feval5 fr).[ 0 ]));
let fr21 = (r210, r211, r212, r213, r214) in
eq_intro (feval5 fr21) (create2 (feval5 fr2).[ 0 ] (feval5 fr).[ 0 ]);
assert (feval5 fr21 == create2 (feval5 fr2).[ 0 ] (feval5 fr).[ 0 ]);
assert (felem_fits5 fr21 (2, 2, 2, 2, 2));
let fr215 = precomp_r5 #2 fr21 in
let a = fmul_r5 #2 acc fr21 fr215 in
fmul_r5_eval_lemma acc fr21 fr215;
fmul_r5_fits_lemma acc fr21 fr215;
assert (feval5 a == Vec.fmul (feval5 acc) (feval5 fr21));
assert (felem_fits5 a (1, 2, 1, 1, 2));
a | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [] | [
"Hacl.Spec.Poly1305.Field32xN.felem5",
"FStar.Pervasives.Native.Mktuple3",
"FStar.Pervasives.Native.tuple5",
"Hacl.Spec.Poly1305.Field32xN.uint64xN",
"Prims.unit",
"Prims._assert",
"Hacl.Spec.Poly1305.Field32xN.felem_fits5",
"FStar.Pervasives.Native.Mktuple5",
"Prims.nat",
"Prims.eq2",
"Lib.Sequence.lseq",
"Hacl.Spec.Poly1305.Vec.pfelem",
"Hacl.Spec.Poly1305.Field32xN.feval5",
"Hacl.Spec.Poly1305.Vec.fmul",
"Hacl.Spec.Poly1305.Field32xN.Lemmas.fmul_r5_fits_lemma",
"Hacl.Spec.Poly1305.Field32xN.Lemmas.fmul_r5_eval_lemma",
"Hacl.Spec.Poly1305.Field32xN.fmul_r5",
"Hacl.Spec.Poly1305.Field32xN.precomp_r5",
"Lib.Sequence.create2",
"Lib.Sequence.op_String_Access",
"Lib.Sequence.eq_intro",
"Hacl.Spec.Poly1305.Vec.pfmul",
"Lib.IntVector.vec_interleave_low_lemma2",
"Lib.IntTypes.U64",
"Lib.IntVector.vec_t",
"Lib.IntVector.vec_interleave_low"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w)
val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)]
let fadd5_fits_lemma #w f1 f2 =
let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24
val fadd5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (feval5 (fadd5 f1 f2) == map2 Vec.pfadd (feval5 f1) (feval5 f2))
[SMTPat (fadd5 f1 f2)]
let fadd5_eval_lemma #w f1 f2 =
let o = fadd5 f1 f2 in
FStar.Classical.forall_intro (fadd5_eval_lemma_i f1 f2);
eq_intro (feval5 o) (map2 Vec.pfadd (feval5 f1) (feval5 f2))
val mul_felem5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_wide_fits5 (mul_felem5 #w f1 r r5) (126, 102, 78, 54, 30))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_fits_lemma #w f1 r r5 =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34)
val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i])
let mul_felem5_eval_lemma_i #w f1 r r5 i =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_eval_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
assert ((fas_nat5 (a0,a1,a2,a3,a4)).[i] == (uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i]);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_eval_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
assert ((fas_nat5 (a10,a11,a12,a13,a14)).[i] == (fas_nat5 (a0,a1,a2,a3,a4)).[i] + (uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i]);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_eval_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
assert ((fas_nat5 (a20,a21,a22,a23,a24)).[i] == (fas_nat5 (a10,a11,a12,a13,a14)).[i] + (uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i]);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
assert ((fas_nat5 (a30,a31,a32,a33,a34)).[i] == (fas_nat5 (a20,a21,a22,a23,a24)).[i] + (uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i]);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] == (fas_nat5 (a30,a31,a32,a33,a34)).[i] + (uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] ==
(uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i] +
(uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i] +
(uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i] +
(uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i] +
(uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
mul_felem5_eval_as_tup64 #w f1 r r5 i;
mul_felem5_lemma (as_tup64_i f1 i) (as_tup64_i r i)
val mul_felem5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (mul_felem5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_eval_lemma #w f1 r r5 =
let tmp = map2 (Vec.pfmul) (feval5 f1) (feval5 r) in
FStar.Classical.forall_intro (mul_felem5_eval_lemma_i #w f1 r r5);
eq_intro (feval5 (mul_felem5 #w f1 r r5)) tmp
val fmul_r5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_fits5 (fmul_r5 #w f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_fits_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_fits_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fmul_r5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fmul_r5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_eval_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_eval_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_fits_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_fits_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_fits_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fadd_mul_r5_eval_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fadd_mul_r5 acc f1 r r5) ==
map2 (Vec.pfmul) (map2 (Vec.pfadd) (feval5 acc) (feval5 f1)) (feval5 r))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_eval_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_eval_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_eval_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val reduce_felem5_eval_lemma:
#w:lanes
-> f:felem5 w{felem_fits5 f (2, 2, 2, 2, 2)} ->
Lemma
(felem_fits5 (reduce_felem5 f) (1, 1, 1, 1, 1) /\
(feval5 f).[0] == (fas_nat5 (reduce_felem5 f)).[0])
[SMTPat (reduce_felem5 f)]
let reduce_felem5_eval_lemma #w f =
carry_full_felem5_eval_lemma f;
carry_full_felem5_fits_lemma f;
let f = carry_full_felem5 f in
carry_reduce_felem5_lemma #w f;
subtract_p5_felem5_lemma #w (carry_full_felem5 f)
val fmul_r2_normalize50:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures fun a ->
let fr21 = create2 (feval5 r2).[0] (feval5 r).[0] in
feval5 a == Vec.fmul (feval5 acc) fr21 /\
felem_fits5 a (1, 2, 1, 1, 2)) | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val fmul_r2_normalize50:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures fun a ->
let fr21 = create2 (feval5 r2).[0] (feval5 r).[0] in
feval5 a == Vec.fmul (feval5 acc) fr21 /\
felem_fits5 a (1, 2, 1, 1, 2)) | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.fmul_r2_normalize50 | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
acc: Hacl.Spec.Poly1305.Field32xN.felem5 2 ->
r: Hacl.Spec.Poly1305.Field32xN.felem5 2 ->
r2: Hacl.Spec.Poly1305.Field32xN.felem5 2
-> Prims.Pure (Hacl.Spec.Poly1305.Field32xN.felem5 2) | {
"end_col": 3,
"end_line": 312,
"start_col": 93,
"start_line": 284
} |
FStar.Pervasives.Lemma | val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)] | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r) | val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r = | false | null | true | FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r) | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [
"lemma"
] | [
"Hacl.Spec.Poly1305.Field32xN.lanes",
"Hacl.Spec.Poly1305.Field32xN.felem5",
"Hacl.Spec.Poly1305.Field32xN.felem_fits5",
"FStar.Pervasives.Native.Mktuple5",
"Prims.nat",
"FStar.Classical.forall_intro",
"Prims.b2t",
"Prims.op_LessThan",
"Hacl.Spec.Poly1305.Field32xN.as_tup64_i",
"Hacl.Spec.Poly1305.Field32xN.precomp_r5",
"Lib.IntTypes.uint64",
"Prims.l_and",
"Prims.eq2",
"Lib.IntTypes.int_t",
"Lib.IntTypes.U64",
"Lib.IntTypes.SEC",
"Lib.IntTypes.op_Star_Bang",
"Lib.IntTypes.u64",
"Hacl.Poly1305.Field32xN.Lemmas0.precomp_r5_as_tup64",
"Prims.unit"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)] | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)] | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.precomp_r5_fits_lemma | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
r:
Hacl.Spec.Poly1305.Field32xN.felem5 w
{Hacl.Spec.Poly1305.Field32xN.felem_fits5 r (1, 1, 1, 1, 1)}
-> FStar.Pervasives.Lemma
(ensures
Hacl.Spec.Poly1305.Field32xN.felem_fits5 (Hacl.Spec.Poly1305.Field32xN.precomp_r5 r)
(5, 5, 5, 5, 5)) [SMTPat (Hacl.Spec.Poly1305.Field32xN.precomp_r5 r)] | {
"end_col": 57,
"end_line": 45,
"start_col": 2,
"start_line": 45
} |
FStar.Pervasives.Lemma | val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i]) | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mul_felem5_eval_lemma_i #w f1 r r5 i =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_eval_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
assert ((fas_nat5 (a0,a1,a2,a3,a4)).[i] == (uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i]);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_eval_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
assert ((fas_nat5 (a10,a11,a12,a13,a14)).[i] == (fas_nat5 (a0,a1,a2,a3,a4)).[i] + (uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i]);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_eval_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
assert ((fas_nat5 (a20,a21,a22,a23,a24)).[i] == (fas_nat5 (a10,a11,a12,a13,a14)).[i] + (uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i]);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
assert ((fas_nat5 (a30,a31,a32,a33,a34)).[i] == (fas_nat5 (a20,a21,a22,a23,a24)).[i] + (uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i]);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] == (fas_nat5 (a30,a31,a32,a33,a34)).[i] + (uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] ==
(uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i] +
(uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i] +
(uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i] +
(uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i] +
(uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
mul_felem5_eval_as_tup64 #w f1 r r5 i;
mul_felem5_lemma (as_tup64_i f1 i) (as_tup64_i r i) | val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i])
let mul_felem5_eval_lemma_i #w f1 r r5 i = | false | null | true | let r0, r1, r2, r3, r4 = r in
let f10, f11, f12, f13, f14 = f1 in
let r50, r51, r52, r53, r54 = r5 in
let a0, a1, a2, a3, a4 = smul_felem5 #w f10 (r0, r1, r2, r3, r4) in
smul_felem5_eval_lemma #w #3 #(2, 2, 2, 2, 2) f10 (r0, r1, r2, r3, r4);
smul_felem5_fits_lemma #w #3 #(2, 2, 2, 2, 2) f10 (r0, r1, r2, r3, r4);
assert ((fas_nat5 (a0, a1, a2, a3, a4)).[ i ] ==
(uint64xN_v f10).[ i ] * (fas_nat5 (r0, r1, r2, r3, r4)).[ i ]);
let a10, a11, a12, a13, a14 = smul_add_felem5 #w f11 (r54, r0, r1, r2, r3) (a0, a1, a2, a3, a4) in
smul_add_felem5_eval_lemma #w
#3
#(10, 2, 2, 2, 2)
#(6, 6, 6, 6, 6)
f11
(r54, r0, r1, r2, r3)
(a0, a1, a2, a3, a4);
smul_add_felem5_fits_lemma #w
#3
#(10, 2, 2, 2, 2)
#(6, 6, 6, 6, 6)
f11
(r54, r0, r1, r2, r3)
(a0, a1, a2, a3, a4);
assert ((fas_nat5 (a10, a11, a12, a13, a14)).[ i ] ==
(fas_nat5 (a0, a1, a2, a3, a4)).[ i ] +
(uint64xN_v f11).[ i ] * (fas_nat5 (r54, r0, r1, r2, r3)).[ i ]);
let a20, a21, a22, a23, a24 =
smul_add_felem5 #w f12 (r53, r54, r0, r1, r2) (a10, a11, a12, a13, a14)
in
smul_add_felem5_eval_lemma #w
#3
#(10, 10, 2, 2, 2)
#(36, 12, 12, 12, 12)
f12
(r53, r54, r0, r1, r2)
(a10, a11, a12, a13, a14);
smul_add_felem5_fits_lemma #w
#3
#(10, 10, 2, 2, 2)
#(36, 12, 12, 12, 12)
f12
(r53, r54, r0, r1, r2)
(a10, a11, a12, a13, a14);
assert ((fas_nat5 (a20, a21, a22, a23, a24)).[ i ] ==
(fas_nat5 (a10, a11, a12, a13, a14)).[ i ] +
(uint64xN_v f12).[ i ] * (fas_nat5 (r53, r54, r0, r1, r2)).[ i ]);
let a30, a31, a32, a33, a34 =
smul_add_felem5 #w f13 (r52, r53, r54, r0, r1) (a20, a21, a22, a23, a24)
in
smul_add_felem5_eval_lemma #w
#3
#(10, 10, 10, 2, 2)
#(66, 42, 18, 18, 18)
f13
(r52, r53, r54, r0, r1)
(a20, a21, a22, a23, a24);
smul_add_felem5_fits_lemma #w
#3
#(10, 10, 10, 2, 2)
#(66, 42, 18, 18, 18)
f13
(r52, r53, r54, r0, r1)
(a20, a21, a22, a23, a24);
assert ((fas_nat5 (a30, a31, a32, a33, a34)).[ i ] ==
(fas_nat5 (a20, a21, a22, a23, a24)).[ i ] +
(uint64xN_v f13).[ i ] * (fas_nat5 (r52, r53, r54, r0, r1)).[ i ]);
let a40, a41, a42, a43, a44 =
smul_add_felem5 #w f14 (r51, r52, r53, r54, r0) (a30, a31, a32, a33, a34)
in
smul_add_felem5_eval_lemma #w
#3
#(10, 10, 10, 10, 2)
#(96, 72, 48, 24, 24)
f14
(r51, r52, r53, r54, r0)
(a30, a31, a32, a33, a34);
smul_add_felem5_fits_lemma #w
#3
#(10, 10, 10, 10, 2)
#(96, 72, 48, 24, 24)
f14
(r51, r52, r53, r54, r0)
(a30, a31, a32, a33, a34);
assert ((fas_nat5 (a40, a41, a42, a43, a44)).[ i ] ==
(fas_nat5 (a30, a31, a32, a33, a34)).[ i ] +
(uint64xN_v f14).[ i ] * (fas_nat5 (r51, r52, r53, r54, r0)).[ i ]);
assert ((fas_nat5 (a40, a41, a42, a43, a44)).[ i ] ==
(uint64xN_v f10).[ i ] * (fas_nat5 (r0, r1, r2, r3, r4)).[ i ] +
(uint64xN_v f11).[ i ] * (fas_nat5 (r54, r0, r1, r2, r3)).[ i ] +
(uint64xN_v f12).[ i ] * (fas_nat5 (r53, r54, r0, r1, r2)).[ i ] +
(uint64xN_v f13).[ i ] * (fas_nat5 (r52, r53, r54, r0, r1)).[ i ] +
(uint64xN_v f14).[ i ] * (fas_nat5 (r51, r52, r53, r54, r0)).[ i ]);
mul_felem5_eval_as_tup64 #w f1 r r5 i;
mul_felem5_lemma (as_tup64_i f1 i) (as_tup64_i r i) | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [
"lemma"
] | [
"Hacl.Spec.Poly1305.Field32xN.lanes",
"Hacl.Spec.Poly1305.Field32xN.felem5",
"Hacl.Spec.Poly1305.Field32xN.felem_fits5",
"FStar.Pervasives.Native.Mktuple5",
"Prims.nat",
"Prims.l_and",
"Prims.eq2",
"Hacl.Spec.Poly1305.Field32xN.precomp_r5",
"Prims.b2t",
"Prims.op_LessThan",
"Hacl.Spec.Poly1305.Field32xN.uint64xN",
"Hacl.Poly1305.Field32xN.Lemmas0.mul_felem5_lemma",
"Hacl.Spec.Poly1305.Field32xN.as_tup64_i",
"Prims.unit",
"Hacl.Poly1305.Field32xN.Lemmas0.mul_felem5_eval_as_tup64",
"Prims._assert",
"Prims.int",
"Lib.Sequence.op_String_Access",
"Hacl.Spec.Poly1305.Field32xN.fas_nat5",
"Prims.op_Addition",
"FStar.Mul.op_Star",
"Hacl.Spec.Poly1305.Field32xN.uint64xN_v",
"Hacl.Poly1305.Field32xN.Lemmas0.smul_add_felem5_fits_lemma",
"Hacl.Poly1305.Field32xN.Lemmas0.smul_add_felem5_eval_lemma",
"Hacl.Spec.Poly1305.Field32xN.felem_wide5",
"Hacl.Spec.Poly1305.Field32xN.smul_add_felem5",
"Hacl.Poly1305.Field32xN.Lemmas0.smul_felem5_fits_lemma",
"Hacl.Poly1305.Field32xN.Lemmas0.smul_felem5_eval_lemma",
"Hacl.Spec.Poly1305.Field32xN.smul_felem5"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w)
val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)]
let fadd5_fits_lemma #w f1 f2 =
let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24
val fadd5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (feval5 (fadd5 f1 f2) == map2 Vec.pfadd (feval5 f1) (feval5 f2))
[SMTPat (fadd5 f1 f2)]
let fadd5_eval_lemma #w f1 f2 =
let o = fadd5 f1 f2 in
FStar.Classical.forall_intro (fadd5_eval_lemma_i f1 f2);
eq_intro (feval5 o) (map2 Vec.pfadd (feval5 f1) (feval5 f2))
val mul_felem5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_wide_fits5 (mul_felem5 #w f1 r r5) (126, 102, 78, 54, 30))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_fits_lemma #w f1 r r5 =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34)
val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i]) | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i]) | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.mul_felem5_eval_lemma_i | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
f1:
Hacl.Spec.Poly1305.Field32xN.felem5 w
{Hacl.Spec.Poly1305.Field32xN.felem_fits5 f1 (3, 3, 3, 3, 3)} ->
r:
Hacl.Spec.Poly1305.Field32xN.felem5 w
{Hacl.Spec.Poly1305.Field32xN.felem_fits5 r (2, 2, 2, 2, 2)} ->
r5:
Hacl.Spec.Poly1305.Field32xN.felem5 w
{ Hacl.Spec.Poly1305.Field32xN.felem_fits5 r5 (10, 10, 10, 10, 10) /\
r5 == Hacl.Spec.Poly1305.Field32xN.precomp_r5 r } ->
i: Prims.nat{i < w}
-> FStar.Pervasives.Lemma
(ensures
(Hacl.Spec.Poly1305.Field32xN.feval5 (Hacl.Spec.Poly1305.Field32xN.mul_felem5 f1 r r5)).[ i ] ==
Hacl.Spec.Poly1305.Vec.pfmul (Hacl.Spec.Poly1305.Field32xN.feval5 f1).[ i ]
(Hacl.Spec.Poly1305.Field32xN.feval5 r).[ i ]) | {
"end_col": 53,
"end_line": 169,
"start_col": 42,
"start_line": 136
} |
FStar.Pervasives.Lemma | val load_acc5_4_lemma:
f:felem5 4{felem_fits5 f (2, 2, 2, 2, 2)}
-> e:felem5 4{felem_fits5 e (1, 1, 1, 1, 1)} ->
Lemma
(let res = load_acc5_4 f e in
felem_fits5 res (3, 3, 3, 3, 3) /\
feval5 res == Vec.fadd (create4 (feval5 f).[0] 0 0 0) (feval5 e))
[SMTPat (load_acc5_4 f e)] | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let load_acc5_4_lemma f e =
let (f0, f1, f2, f3, f4) = f in
let (r0, r1, r2, r3, r4) = (zero 4, zero 4, zero 4, zero 4, zero 4) in
let r = (r0, r1, r2, r3, r4) in
assert ((feval5 r).[1] == 0);
assert ((feval5 r).[2] == 0);
assert ((feval5 r).[3] == 0);
let r0 = vec_set r0 0ul (vec_get f0 0ul) in
let r1 = vec_set r1 0ul (vec_get f1 0ul) in
let r2 = vec_set r2 0ul (vec_get f2 0ul) in
let r3 = vec_set r3 0ul (vec_get f3 0ul) in
let r4 = vec_set r4 0ul (vec_get f4 0ul) in
let r = (r0, r1, r2, r3, r4) in
assert ((feval5 r).[0] == (feval5 f).[0]);
assert ((feval5 r).[1] == 0);
assert ((feval5 r).[2] == 0);
assert ((feval5 r).[3] == 0);
eq_intro (feval5 r) (create4 (feval5 f).[0] 0 0 0) | val load_acc5_4_lemma:
f:felem5 4{felem_fits5 f (2, 2, 2, 2, 2)}
-> e:felem5 4{felem_fits5 e (1, 1, 1, 1, 1)} ->
Lemma
(let res = load_acc5_4 f e in
felem_fits5 res (3, 3, 3, 3, 3) /\
feval5 res == Vec.fadd (create4 (feval5 f).[0] 0 0 0) (feval5 e))
[SMTPat (load_acc5_4 f e)]
let load_acc5_4_lemma f e = | false | null | true | let f0, f1, f2, f3, f4 = f in
let r0, r1, r2, r3, r4 = (zero 4, zero 4, zero 4, zero 4, zero 4) in
let r = (r0, r1, r2, r3, r4) in
assert ((feval5 r).[ 1 ] == 0);
assert ((feval5 r).[ 2 ] == 0);
assert ((feval5 r).[ 3 ] == 0);
let r0 = vec_set r0 0ul (vec_get f0 0ul) in
let r1 = vec_set r1 0ul (vec_get f1 0ul) in
let r2 = vec_set r2 0ul (vec_get f2 0ul) in
let r3 = vec_set r3 0ul (vec_get f3 0ul) in
let r4 = vec_set r4 0ul (vec_get f4 0ul) in
let r = (r0, r1, r2, r3, r4) in
assert ((feval5 r).[ 0 ] == (feval5 f).[ 0 ]);
assert ((feval5 r).[ 1 ] == 0);
assert ((feval5 r).[ 2 ] == 0);
assert ((feval5 r).[ 3 ] == 0);
eq_intro (feval5 r) (create4 (feval5 f).[ 0 ] 0 0 0) | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [
"lemma"
] | [
"Hacl.Spec.Poly1305.Field32xN.felem5",
"Hacl.Spec.Poly1305.Field32xN.felem_fits5",
"FStar.Pervasives.Native.Mktuple5",
"Prims.nat",
"Hacl.Spec.Poly1305.Field32xN.uint64xN",
"Lib.IntVector.vec_t",
"Lib.IntTypes.U64",
"Lib.Sequence.eq_intro",
"Hacl.Spec.Poly1305.Vec.pfelem",
"Hacl.Spec.Poly1305.Field32xN.feval5",
"Lib.Sequence.create4",
"Lib.Sequence.op_String_Access",
"Prims.unit",
"Prims._assert",
"Prims.eq2",
"Prims.int",
"Prims.l_or",
"FStar.Seq.Base.index",
"Lib.Sequence.to_seq",
"FStar.Pervasives.Native.tuple5",
"Lib.Sequence.lseq",
"Lib.IntTypes.int_t",
"Lib.IntTypes.SEC",
"Lib.IntVector.vec_v",
"Lib.Sequence.upd",
"Lib.IntTypes.v",
"Lib.IntTypes.U32",
"Lib.IntTypes.PUB",
"FStar.UInt32.uint_to_t",
"FStar.UInt32.t",
"Lib.IntVector.vec_get",
"Lib.IntVector.vec_set",
"FStar.UInt32.__uint_to_t",
"Hacl.Spec.Poly1305.Field32xN.zero"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w)
val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)]
let fadd5_fits_lemma #w f1 f2 =
let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24
val fadd5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (feval5 (fadd5 f1 f2) == map2 Vec.pfadd (feval5 f1) (feval5 f2))
[SMTPat (fadd5 f1 f2)]
let fadd5_eval_lemma #w f1 f2 =
let o = fadd5 f1 f2 in
FStar.Classical.forall_intro (fadd5_eval_lemma_i f1 f2);
eq_intro (feval5 o) (map2 Vec.pfadd (feval5 f1) (feval5 f2))
val mul_felem5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_wide_fits5 (mul_felem5 #w f1 r r5) (126, 102, 78, 54, 30))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_fits_lemma #w f1 r r5 =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34)
val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i])
let mul_felem5_eval_lemma_i #w f1 r r5 i =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_eval_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
assert ((fas_nat5 (a0,a1,a2,a3,a4)).[i] == (uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i]);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_eval_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
assert ((fas_nat5 (a10,a11,a12,a13,a14)).[i] == (fas_nat5 (a0,a1,a2,a3,a4)).[i] + (uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i]);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_eval_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
assert ((fas_nat5 (a20,a21,a22,a23,a24)).[i] == (fas_nat5 (a10,a11,a12,a13,a14)).[i] + (uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i]);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
assert ((fas_nat5 (a30,a31,a32,a33,a34)).[i] == (fas_nat5 (a20,a21,a22,a23,a24)).[i] + (uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i]);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] == (fas_nat5 (a30,a31,a32,a33,a34)).[i] + (uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] ==
(uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i] +
(uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i] +
(uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i] +
(uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i] +
(uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
mul_felem5_eval_as_tup64 #w f1 r r5 i;
mul_felem5_lemma (as_tup64_i f1 i) (as_tup64_i r i)
val mul_felem5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (mul_felem5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_eval_lemma #w f1 r r5 =
let tmp = map2 (Vec.pfmul) (feval5 f1) (feval5 r) in
FStar.Classical.forall_intro (mul_felem5_eval_lemma_i #w f1 r r5);
eq_intro (feval5 (mul_felem5 #w f1 r r5)) tmp
val fmul_r5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_fits5 (fmul_r5 #w f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_fits_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_fits_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fmul_r5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fmul_r5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_eval_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_eval_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_fits_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_fits_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_fits_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fadd_mul_r5_eval_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fadd_mul_r5 acc f1 r r5) ==
map2 (Vec.pfmul) (map2 (Vec.pfadd) (feval5 acc) (feval5 f1)) (feval5 r))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_eval_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_eval_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_eval_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val reduce_felem5_eval_lemma:
#w:lanes
-> f:felem5 w{felem_fits5 f (2, 2, 2, 2, 2)} ->
Lemma
(felem_fits5 (reduce_felem5 f) (1, 1, 1, 1, 1) /\
(feval5 f).[0] == (fas_nat5 (reduce_felem5 f)).[0])
[SMTPat (reduce_felem5 f)]
let reduce_felem5_eval_lemma #w f =
carry_full_felem5_eval_lemma f;
carry_full_felem5_fits_lemma f;
let f = carry_full_felem5 f in
carry_reduce_felem5_lemma #w f;
subtract_p5_felem5_lemma #w (carry_full_felem5 f)
val fmul_r2_normalize50:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures fun a ->
let fr21 = create2 (feval5 r2).[0] (feval5 r).[0] in
feval5 a == Vec.fmul (feval5 acc) fr21 /\
felem_fits5 a (1, 2, 1, 1, 2))
let fmul_r2_normalize50 (a0, a1, a2, a3, a4) (r0, r1, r2, r3, r4) (r20, r21, r22, r23, r24) =
let r210 = vec_interleave_low r20 r0 in
vec_interleave_low_lemma2 r20 r0;
let r211 = vec_interleave_low r21 r1 in
vec_interleave_low_lemma2 r21 r1;
let r212 = vec_interleave_low r22 r2 in
vec_interleave_low_lemma2 r22 r2;
let r213 = vec_interleave_low r23 r3 in
vec_interleave_low_lemma2 r23 r3;
let r214 = vec_interleave_low r24 r4 in
vec_interleave_low_lemma2 r24 r4;
let acc = (a0, a1, a2, a3, a4) in
let fr = (r0, r1, r2, r3, r4) in
let fr2 = (r20, r21, r22, r23, r24) in
assert ((feval5 fr2).[0] == Vec.pfmul ((feval5 fr).[0]) ((feval5 fr).[0]));
let fr21 = (r210, r211, r212, r213, r214) in
eq_intro (feval5 fr21) (create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (feval5 fr21 == create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (felem_fits5 fr21 (2, 2, 2, 2, 2));
let fr215 = precomp_r5 #2 fr21 in
let a = fmul_r5 #2 acc fr21 fr215 in
fmul_r5_eval_lemma acc fr21 fr215;
fmul_r5_fits_lemma acc fr21 fr215;
assert (feval5 a == Vec.fmul (feval5 acc) (feval5 fr21));
assert (felem_fits5 a (1, 2, 1, 1, 2));
a
#push-options "--z3rlimit 150"
val fmul_r2_normalize51:
a:felem5 2
-> fa1:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 a (1, 2, 1, 1, 2) /\
felem_fits5 fa1 (1, 2, 1, 1, 2) /\
feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1])
(ensures fun out ->
(feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1] /\
felem_fits5 out (2, 4, 2, 2, 4))
let fmul_r2_normalize51 a fa1 =
let (a0, a1, a2, a3, a4) = a in
let (a10, a11, a12, a13, a14) = fa1 in
let o0 = vec_add_mod a0 a10 in
let o1 = vec_add_mod a1 a11 in
let o2 = vec_add_mod a2 a12 in
let o3 = vec_add_mod a3 a13 in
let o4 = vec_add_mod a4 a14 in
let out = (o0, o1, o2, o3, o4) in
let (a0, a1, a2, a3, a4) = as_tup64_i a 0 in
let (a10, a11, a12, a13, a14) = as_tup64_i fa1 0 in
let (o0, o1, o2, o3, o4) = as_tup64_i out 0 in
FStar.Math.Lemmas.modulo_lemma (v a0 + v a10) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a1 + v a11) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a2 + v a12) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a3 + v a13) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a4 + v a14) (pow2 64);
assert (felem_fits5 out (2, 4, 2, 2, 4));
calc (==) {
((feval5 a).[0] + (feval5 a).[1]) % Vec.prime;
(==) { }
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime + as_nat5 (a10, a11, a12, a13, a14) % Vec.prime) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (a0, a1, a2, a3, a4)) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime;
FStar.Math.Lemmas.lemma_mod_plus_distr_r
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime }
(as_nat5 (a0, a1, a2, a3, a4) + as_nat5 (a10, a11, a12, a13, a14)) % Vec.prime;
(==) { }
(feval5 out).[0];
};
out
#pop-options
val fmul_r2_normalize5_lemma:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures
(let out = fmul_r2_normalize5 acc r r2 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_2 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r2_normalize5 acc r r2)]
let fmul_r2_normalize5_lemma acc r r2 =
let a = fmul_r2_normalize50 acc r r2 in
let (a0, a1, a2, a3, a4) = a in
let a10 = vec_interleave_high a0 a0 in
vec_interleave_high_lemma2 a0 a0;
let a11 = vec_interleave_high a1 a1 in
vec_interleave_high_lemma2 a1 a1;
let a12 = vec_interleave_high a2 a2 in
vec_interleave_high_lemma2 a2 a2;
let a13 = vec_interleave_high a3 a3 in
vec_interleave_high_lemma2 a3 a3;
let a14 = vec_interleave_high a4 a4 in
vec_interleave_high_lemma2 a4 a4;
let fa1 = (a10, a11, a12, a13, a14) in
eq_intro (feval5 fa1) (create2 (feval5 a).[1] (feval5 a).[1]);
assert (feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1]);
assert (felem_fits5 fa1 (1, 2, 1, 1, 2));
let out = fmul_r2_normalize51 a fa1 in
assert ((feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1]);
let res = carry_full_felem5 out in
carry_full_felem5_lemma out
val fmul_r4_normalize50:
acc:felem5 4
-> r:felem5 4
-> r2:felem5 4
-> r3:felem5 4
-> r4:felem5 4 ->
Pure (felem5 4)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
felem_fits5 r3 (2, 2, 2, 2, 2) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.fmul (feval5 r) (feval5 r) /\
feval5 r3 == Vec.fmul (feval5 r2) (feval5 r) /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures fun out ->
let fr4321 = create4 (feval5 r4).[0] (feval5 r3).[0] (feval5 r2).[0] (feval5 r).[0] in
feval5 out == Vec.fmul (feval5 acc) fr4321 /\
felem_fits5 out (1, 2, 1, 1, 2))
let fmul_r4_normalize50 acc fr fr2 fr3 fr4 =
let (r10, r11, r12, r13, r14) = fr in
let (r20, r21, r22, r23, r24) = fr2 in
let (r30, r31, r32, r33, r34) = fr3 in
let (r40, r41, r42, r43, r44) = fr4 in
let (a0, a1, a2, a3, a4) = acc in
let v12120 = vec_interleave_low r20 r10 in
vec_interleave_low_lemma_uint64_4 r20 r10;
let v34340 = vec_interleave_low r40 r30 in
vec_interleave_low_lemma_uint64_4 r40 r30;
let r12340 = vec_interleave_low_n 2 v34340 v12120 in
vec_interleave_low_n_lemma_uint64_4_2 v34340 v12120;
let v12121 = vec_interleave_low r21 r11 in
vec_interleave_low_lemma_uint64_4 r21 r11;
let v34341 = vec_interleave_low r41 r31 in
vec_interleave_low_lemma_uint64_4 r41 r31;
let r12341 = vec_interleave_low_n 2 v34341 v12121 in
vec_interleave_low_n_lemma_uint64_4_2 v34341 v12121;
let v12122 = vec_interleave_low r22 r12 in
vec_interleave_low_lemma_uint64_4 r22 r12;
let v34342 = vec_interleave_low r42 r32 in
vec_interleave_low_lemma_uint64_4 r42 r32;
let r12342 = vec_interleave_low_n 2 v34342 v12122 in
vec_interleave_low_n_lemma_uint64_4_2 v34342 v12122;
let v12123 = vec_interleave_low r23 r13 in
vec_interleave_low_lemma_uint64_4 r23 r13;
let v34343 = vec_interleave_low r43 r33 in
vec_interleave_low_lemma_uint64_4 r43 r33;
let r12343 = vec_interleave_low_n 2 v34343 v12123 in
vec_interleave_low_n_lemma_uint64_4_2 v34343 v12123;
let v12124 = vec_interleave_low r24 r14 in
vec_interleave_low_lemma_uint64_4 r24 r14;
let v34344 = vec_interleave_low r44 r34 in
vec_interleave_low_lemma_uint64_4 r44 r34;
let r12344 = vec_interleave_low_n 2 v34344 v12124 in
vec_interleave_low_n_lemma_uint64_4_2 v34344 v12124;
let fr1234 = (r12340, r12341, r12342, r12343, r12344) in
eq_intro (feval5 fr1234) (create4 (feval5 fr4).[0] (feval5 fr3).[0] (feval5 fr2).[0] (feval5 fr).[0]);
let fr12345 = precomp_r5 #4 fr1234 in
let out = fmul_r5 #4 acc fr1234 fr12345 in
fmul_r5_eval_lemma acc fr1234 fr12345;
fmul_r5_fits_lemma acc fr1234 fr12345;
out
val lemma_fmul_r4_normalize51:
#m:scale32{m <= 2}
-> o:uint64xN 4{felem_fits1 o m} ->
Lemma
(let v00 = vec_interleave_high_n 2 o o in
let v10 = vec_add_mod o v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
felem_fits1 v20 (4 * m) /\
(uint64xN_v v20).[0] == (uint64xN_v o).[0] + (uint64xN_v o).[1] + (uint64xN_v o).[2] + (uint64xN_v o).[3])
let lemma_fmul_r4_normalize51 #m o =
let v00 = vec_interleave_high_n 2 o o in
vec_interleave_high_n_lemma_uint64_4_2 o o;
let (o0, o1, o2, o3) = ((vec_v o).[0], (vec_v o).[1], (vec_v o).[2], (vec_v o).[3]) in
assert (vec_v v00 == create4 o2 o3 o2 o3);
let v10 = vec_add_mod o v00 in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v o1 + v o3) (pow2 64);
assert (v (vec_v v10).[0] == v o0 + v o2);
assert (v (vec_v v10).[1] == v o1 + v o3);
let v10h = vec_interleave_high v10 v10 in
vec_interleave_high_lemma_uint64_4 v10 v10;
assert (v (vec_v v10h).[0] == v (vec_v v10).[1]);
let v20 = vec_add_mod v10 v10h in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2 + v o1 + v o3) (pow2 64)
val lemma_fmul_r4_normalize51_expand:
v2:felem5 4
-> out:felem5 4 ->
Lemma
(requires
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(uint64xN_v v20).[0] == (uint64xN_v o0).[0] + (uint64xN_v o0).[1] + (uint64xN_v o0).[2] + (uint64xN_v o0).[3] /\
(uint64xN_v v21).[0] == (uint64xN_v o1).[0] + (uint64xN_v o1).[1] + (uint64xN_v o1).[2] + (uint64xN_v o1).[3] /\
(uint64xN_v v22).[0] == (uint64xN_v o2).[0] + (uint64xN_v o2).[1] + (uint64xN_v o2).[2] + (uint64xN_v o2).[3] /\
(uint64xN_v v23).[0] == (uint64xN_v o3).[0] + (uint64xN_v o3).[1] + (uint64xN_v o3).[2] + (uint64xN_v o3).[3] /\
(uint64xN_v v24).[0] == (uint64xN_v o4).[0] + (uint64xN_v o4).[1] + (uint64xN_v o4).[2] + (uint64xN_v o4).[3]))
(ensures
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(feval5 v2).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 out).[0] (feval5 out).[1]) (feval5 out).[2]) (feval5 out).[3]))
let lemma_fmul_r4_normalize51_expand v2 out =
let (v20, v21, v22, v23, v24) = as_tup64_i v2 0 in
let (o0, o1, o2, o3, o4) = out in
calc (==) {
as_nat5 (v20, v21, v22, v23, v24) % Vec.prime;
(==) { }
(as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1) + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1))
(as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) Vec.prime }
((as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1)) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.modulo_distributivity (as_nat5 (as_tup64_i out 0)) (as_nat5 (as_tup64_i out 1)) Vec.prime }
(((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2))
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2)) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime) (as_nat5 (as_tup64_i out 2)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r ((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime)
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime;
};
assert ((feval5 v2).[0] ==
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime)
val fmul_r4_normalize51: a:felem5 4 ->
Pure (felem5 4)
(requires felem_fits5 a (1, 2, 1, 1, 2))
(ensures fun res ->
felem_fits5 res (4, 8, 4, 4, 8) /\
(feval5 res).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 a).[0] (feval5 a).[1]) (feval5 a).[2]) (feval5 a).[3])
let fmul_r4_normalize51 fa =
let (o0, o1, o2, o3, o4) = fa in
let v00 = vec_interleave_high_n 2 o0 o0 in
let v10 = vec_add_mod o0 v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
lemma_fmul_r4_normalize51 #1 o0;
let v01 = vec_interleave_high_n 2 o1 o1 in
let v11 = vec_add_mod o1 v01 in
let v11h = vec_interleave_high v11 v11 in
let v21 = vec_add_mod v11 v11h in
lemma_fmul_r4_normalize51 #2 o1;
let v02 = vec_interleave_high_n 2 o2 o2 in
let v12 = vec_add_mod o2 v02 in
let v12h = vec_interleave_high v12 v12 in
let v22 = vec_add_mod v12 v12h in
lemma_fmul_r4_normalize51 #1 o2;
let v03 = vec_interleave_high_n 2 o3 o3 in
let v13 = vec_add_mod o3 v03 in
let v13h = vec_interleave_high v13 v13 in
let v23 = vec_add_mod v13 v13h in
lemma_fmul_r4_normalize51 #1 o3;
let v04 = vec_interleave_high_n 2 o4 o4 in
let v14 = vec_add_mod o4 v04 in
let v14h = vec_interleave_high v14 v14 in
let v24 = vec_add_mod v14 v14h in
lemma_fmul_r4_normalize51 #2 o4;
let res = (v20, v21, v22, v23, v24) in
lemma_fmul_r4_normalize51_expand res fa;
res
val fmul_r4_normalize5_lemma:
acc:felem5 4
-> r:felem5 4
-> r_5:felem5 4
-> r4:felem5 4 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
r_5 == precomp_r5 r /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures
(let out = fmul_r4_normalize5 acc r r_5 r4 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_4 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r4_normalize5 acc r r_5 r4)]
#restart-solver
#push-options "--z3rlimit 500"
let fmul_r4_normalize5_lemma acc fr fr_5 fr4 =
let fr2 = fmul_r5 #4 fr fr fr_5 in
let fr3 = fmul_r5 #4 fr2 fr fr_5 in
let out = fmul_r4_normalize50 acc fr fr2 fr3 fr4 in
let v2 = fmul_r4_normalize51 out in
let res = carry_full_felem5 v2 in
carry_full_felem5_lemma v2
#pop-options
val load_felem5_lemma:
#w:lanes
-> lo:uint64xN w
-> hi:uint64xN w ->
Lemma
(let f = load_felem5 #w lo hi in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == createi #Vec.pfelem w
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]))
let load_felem5_lemma #w lo hi =
let f = load_felem5 #w lo hi in
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 128 < Vec.prime);
let res = createi #Vec.pfelem w
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]) in
match w with
| 1 ->
load_felem5_lemma_i #w lo hi 0;
eq_intro (feval5 f) res
| 2 ->
load_felem5_lemma_i #w lo hi 0;
load_felem5_lemma_i #w lo hi 1;
eq_intro (feval5 f) res
| 4 ->
load_felem5_lemma_i #w lo hi 0;
load_felem5_lemma_i #w lo hi 1;
load_felem5_lemma_i #w lo hi 2;
load_felem5_lemma_i #w lo hi 3;
eq_intro (feval5 f) res
val load_felem5_4_interleave: lo:uint64xN 4 -> hi:uint64xN 4 -> Lemma
(let m0 = vec_interleave_low_n 2 lo hi in
let m1 = vec_interleave_high_n 2 lo hi in
let m2 = cast U64 4 (vec_shift_right (cast U128 2 m0) 48ul) in
let m3 = cast U64 4 (vec_shift_right (cast U128 2 m1) 48ul) in
let m4 = vec_interleave_high m0 m1 in
let t0 = vec_interleave_low m0 m1 in
let t3 = vec_interleave_low m2 m3 in
vec_v m4 == create4 (vec_v lo).[1] (vec_v lo).[3] (vec_v hi).[1] (vec_v hi).[3] /\
vec_v t0 == create4 (vec_v lo).[0] (vec_v lo).[2] (vec_v hi).[0] (vec_v hi).[2] /\
t3 == vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul))
let load_felem5_4_interleave lo hi =
let m0 = vec_interleave_low_n 2 lo hi in
vec_interleave_low_n_lemma_uint64_4_2 lo hi;
//assert (vec_v m0 == create4 (vec_v lo).[0] (vec_v lo).[1] (vec_v hi).[0] (vec_v hi).[1]);
let m1 = vec_interleave_high_n 2 lo hi in
vec_interleave_high_n_lemma_uint64_4_2 lo hi;
//assert (vec_v m1 == create4 (vec_v lo).[2] (vec_v lo).[3] (vec_v hi).[2] (vec_v hi).[3]);
let m4 = vec_interleave_high m0 m1 in
vec_interleave_high_lemma_uint64_4 m0 m1;
//assert (vec_v m4 == create4 (vec_v m0).[1] (vec_v m1).[1] (vec_v m0).[3] (vec_v m1).[3]);
assert (vec_v m4 == create4 (vec_v lo).[1] (vec_v lo).[3] (vec_v hi).[1] (vec_v hi).[3]);
let t0 = vec_interleave_low m0 m1 in
vec_interleave_low_lemma_uint64_4 m0 m1;
//assert (vec_v t0 == create4 (vec_v m0).[0] (vec_v m1).[0] (vec_v m0).[2] (vec_v m1).[2]);
assert (vec_v t0 == create4 (vec_v lo).[0] (vec_v lo).[2] (vec_v hi).[0] (vec_v hi).[2]);
let m2 = cast U64 4 (vec_shift_right (cast U128 2 m0) 48ul) in
vec_shift_right_uint128_small2 m0 48ul;
assert ((vec_v m2).[0] == (((vec_v lo).[0] >>. 48ul) |. ((vec_v lo).[1] <<. 16ul)));
assert ((vec_v m2).[2] == (((vec_v hi).[0] >>. 48ul) |. ((vec_v hi).[1] <<. 16ul)));
let m3 = cast U64 4 (vec_shift_right (cast U128 2 m1) 48ul) in
vec_shift_right_uint128_small2 m1 48ul;
assert ((vec_v m3).[0] == (((vec_v lo).[2] >>. 48ul) |. ((vec_v lo).[3] <<. 16ul)));
assert ((vec_v m3).[2] == (((vec_v hi).[2] >>. 48ul) |. ((vec_v hi).[3] <<. 16ul)));
let t3 = vec_interleave_low m2 m3 in
vec_interleave_low_lemma_uint64_4 m2 m3;
eq_intro (vec_v t3) (vec_v (vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul)));
vecv_extensionality t3 (vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul))
noextract
val load_felem5_4_compact: lo:uint64xN 4 -> hi:uint64xN 4 -> felem5 4
let load_felem5_4_compact lo hi =
let mask26 = mask26 4 in
let t3 = vec_or (vec_shift_right lo 48ul) (vec_shift_left hi 16ul) in
let o0 = vec_and lo mask26 in
let o1 = vec_and (vec_shift_right lo 26ul) mask26 in
let o2 = vec_and (vec_shift_right t3 4ul) mask26 in
let o3 = vec_and (vec_shift_right t3 30ul) mask26 in
let o4 = vec_shift_right hi 40ul in
(o0, o1, o2, o3, o4)
val load_felem5_4_compact_lemma_i: lo:uint64xN 4 -> hi:uint64xN 4 -> i:nat{i < 4} ->
Lemma
(let f = as_tup64_i (load_felem5_4_compact lo hi) i in
tup64_fits5 f (1, 1, 1, 1, 1) /\
as_nat5 f < pow2 128 /\
as_nat5 f % Vec.prime == (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i])
let load_felem5_4_compact_lemma_i lo hi i =
assert (as_tup64_i (load_felem5_4_compact lo hi) i == load_tup64_4_compact (vec_v lo).[i] (vec_v hi).[i]);
load_tup64_4_compact_lemma (vec_v lo).[i] (vec_v hi).[i]
val load_felem5_4_lemma: lo:uint64xN 4 -> hi:uint64xN 4 ->
Lemma
(let f = load_felem5_4_compact lo hi in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == createi #Vec.pfelem 4 (fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]))
let load_felem5_4_lemma lo hi =
let f = load_felem5_4_compact lo hi in
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 128 < Vec.prime);
let res = createi #Vec.pfelem 4
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]) in
load_felem5_4_compact_lemma_i lo hi 0;
load_felem5_4_compact_lemma_i lo hi 1;
load_felem5_4_compact_lemma_i lo hi 2;
load_felem5_4_compact_lemma_i lo hi 3;
eq_intro (feval5 f) res
val load_felem5_le: b:lseq uint8 64 -> Lemma
(let lo0 = vec_from_bytes_le U64 4 (sub b 0 32) in
let hi0 = vec_from_bytes_le U64 4 (sub b 32 32) in
let f = load_felem5_4 lo0 hi0 in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == Vec.load_elem4 b)
let load_felem5_le b =
let lo0 = vec_from_bytes_le U64 4 (sub b 0 32) in
let hi0 = vec_from_bytes_le U64 4 (sub b 32 32) in
let lo1 = vec_interleave_low_n 2 lo0 hi0 in
let hi1 = vec_interleave_high_n 2 lo0 hi0 in
let lo = vec_interleave_low lo1 hi1 in
let hi = vec_interleave_high lo1 hi1 in
let out = load_felem5_4_compact lo hi in
load_felem5_4_interleave lo0 hi0;
assert (out == load_felem5_4 lo0 hi0);
load_felem5_4_lemma lo hi;
Hacl.Impl.Poly1305.Lemmas.uints_from_bytes_le_lemma64_4 b;
eq_intro (feval5 out) (Vec.load_elem4 b)
val load_acc5_2_lemma:
f:felem5 2{felem_fits5 f (2, 2, 2, 2, 2)}
-> e:felem5 2{felem_fits5 e (1, 1, 1, 1, 1)} ->
Lemma
(let res = load_acc5_2 f e in
felem_fits5 res (3, 3, 3, 3, 3) /\
feval5 res == Vec.fadd (create2 (feval5 f).[0] 0) (feval5 e))
[SMTPat (load_acc5_2 f e)]
let load_acc5_2_lemma f e =
let (f0, f1, f2, f3, f4) = f in
let r0 = vec_set f0 1ul (u64 0) in
let r1 = vec_set f1 1ul (u64 0) in
let r2 = vec_set f2 1ul (u64 0) in
let r3 = vec_set f3 1ul (u64 0) in
let r4 = vec_set f4 1ul (u64 0) in
let r = (r0, r1, r2, r3, r4) in
//assert ((feval5 r).[0] == (feval5 f).[0]);
assert ((feval5 r).[1] == 0);
eq_intro (feval5 r) (create2 (feval5 f).[0] 0)
val load_acc5_4_lemma:
f:felem5 4{felem_fits5 f (2, 2, 2, 2, 2)}
-> e:felem5 4{felem_fits5 e (1, 1, 1, 1, 1)} ->
Lemma
(let res = load_acc5_4 f e in
felem_fits5 res (3, 3, 3, 3, 3) /\
feval5 res == Vec.fadd (create4 (feval5 f).[0] 0 0 0) (feval5 e))
[SMTPat (load_acc5_4 f e)] | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val load_acc5_4_lemma:
f:felem5 4{felem_fits5 f (2, 2, 2, 2, 2)}
-> e:felem5 4{felem_fits5 e (1, 1, 1, 1, 1)} ->
Lemma
(let res = load_acc5_4 f e in
felem_fits5 res (3, 3, 3, 3, 3) /\
feval5 res == Vec.fadd (create4 (feval5 f).[0] 0 0 0) (feval5 e))
[SMTPat (load_acc5_4 f e)] | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.load_acc5_4_lemma | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
f:
Hacl.Spec.Poly1305.Field32xN.felem5 4
{Hacl.Spec.Poly1305.Field32xN.felem_fits5 f (2, 2, 2, 2, 2)} ->
e:
Hacl.Spec.Poly1305.Field32xN.felem5 4
{Hacl.Spec.Poly1305.Field32xN.felem_fits5 e (1, 1, 1, 1, 1)}
-> FStar.Pervasives.Lemma
(ensures
(let res = Hacl.Spec.Poly1305.Field32xN.load_acc5_4 f e in
Hacl.Spec.Poly1305.Field32xN.felem_fits5 res (3, 3, 3, 3, 3) /\
Hacl.Spec.Poly1305.Field32xN.feval5 res ==
Hacl.Spec.Poly1305.Vec.fadd (Lib.Sequence.create4 (Hacl.Spec.Poly1305.Field32xN.feval5 f).[
0 ]
0
0
0)
(Hacl.Spec.Poly1305.Field32xN.feval5 e)))
[SMTPat (Hacl.Spec.Poly1305.Field32xN.load_acc5_4 f e)] | {
"end_col": 52,
"end_line": 815,
"start_col": 27,
"start_line": 798
} |
FStar.Pervasives.Lemma | val load_felem5_4_interleave: lo:uint64xN 4 -> hi:uint64xN 4 -> Lemma
(let m0 = vec_interleave_low_n 2 lo hi in
let m1 = vec_interleave_high_n 2 lo hi in
let m2 = cast U64 4 (vec_shift_right (cast U128 2 m0) 48ul) in
let m3 = cast U64 4 (vec_shift_right (cast U128 2 m1) 48ul) in
let m4 = vec_interleave_high m0 m1 in
let t0 = vec_interleave_low m0 m1 in
let t3 = vec_interleave_low m2 m3 in
vec_v m4 == create4 (vec_v lo).[1] (vec_v lo).[3] (vec_v hi).[1] (vec_v hi).[3] /\
vec_v t0 == create4 (vec_v lo).[0] (vec_v lo).[2] (vec_v hi).[0] (vec_v hi).[2] /\
t3 == vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul)) | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let load_felem5_4_interleave lo hi =
let m0 = vec_interleave_low_n 2 lo hi in
vec_interleave_low_n_lemma_uint64_4_2 lo hi;
//assert (vec_v m0 == create4 (vec_v lo).[0] (vec_v lo).[1] (vec_v hi).[0] (vec_v hi).[1]);
let m1 = vec_interleave_high_n 2 lo hi in
vec_interleave_high_n_lemma_uint64_4_2 lo hi;
//assert (vec_v m1 == create4 (vec_v lo).[2] (vec_v lo).[3] (vec_v hi).[2] (vec_v hi).[3]);
let m4 = vec_interleave_high m0 m1 in
vec_interleave_high_lemma_uint64_4 m0 m1;
//assert (vec_v m4 == create4 (vec_v m0).[1] (vec_v m1).[1] (vec_v m0).[3] (vec_v m1).[3]);
assert (vec_v m4 == create4 (vec_v lo).[1] (vec_v lo).[3] (vec_v hi).[1] (vec_v hi).[3]);
let t0 = vec_interleave_low m0 m1 in
vec_interleave_low_lemma_uint64_4 m0 m1;
//assert (vec_v t0 == create4 (vec_v m0).[0] (vec_v m1).[0] (vec_v m0).[2] (vec_v m1).[2]);
assert (vec_v t0 == create4 (vec_v lo).[0] (vec_v lo).[2] (vec_v hi).[0] (vec_v hi).[2]);
let m2 = cast U64 4 (vec_shift_right (cast U128 2 m0) 48ul) in
vec_shift_right_uint128_small2 m0 48ul;
assert ((vec_v m2).[0] == (((vec_v lo).[0] >>. 48ul) |. ((vec_v lo).[1] <<. 16ul)));
assert ((vec_v m2).[2] == (((vec_v hi).[0] >>. 48ul) |. ((vec_v hi).[1] <<. 16ul)));
let m3 = cast U64 4 (vec_shift_right (cast U128 2 m1) 48ul) in
vec_shift_right_uint128_small2 m1 48ul;
assert ((vec_v m3).[0] == (((vec_v lo).[2] >>. 48ul) |. ((vec_v lo).[3] <<. 16ul)));
assert ((vec_v m3).[2] == (((vec_v hi).[2] >>. 48ul) |. ((vec_v hi).[3] <<. 16ul)));
let t3 = vec_interleave_low m2 m3 in
vec_interleave_low_lemma_uint64_4 m2 m3;
eq_intro (vec_v t3) (vec_v (vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul)));
vecv_extensionality t3 (vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul)) | val load_felem5_4_interleave: lo:uint64xN 4 -> hi:uint64xN 4 -> Lemma
(let m0 = vec_interleave_low_n 2 lo hi in
let m1 = vec_interleave_high_n 2 lo hi in
let m2 = cast U64 4 (vec_shift_right (cast U128 2 m0) 48ul) in
let m3 = cast U64 4 (vec_shift_right (cast U128 2 m1) 48ul) in
let m4 = vec_interleave_high m0 m1 in
let t0 = vec_interleave_low m0 m1 in
let t3 = vec_interleave_low m2 m3 in
vec_v m4 == create4 (vec_v lo).[1] (vec_v lo).[3] (vec_v hi).[1] (vec_v hi).[3] /\
vec_v t0 == create4 (vec_v lo).[0] (vec_v lo).[2] (vec_v hi).[0] (vec_v hi).[2] /\
t3 == vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul))
let load_felem5_4_interleave lo hi = | false | null | true | let m0 = vec_interleave_low_n 2 lo hi in
vec_interleave_low_n_lemma_uint64_4_2 lo hi;
let m1 = vec_interleave_high_n 2 lo hi in
vec_interleave_high_n_lemma_uint64_4_2 lo hi;
let m4 = vec_interleave_high m0 m1 in
vec_interleave_high_lemma_uint64_4 m0 m1;
assert (vec_v m4 == create4 (vec_v lo).[ 1 ] (vec_v lo).[ 3 ] (vec_v hi).[ 1 ] (vec_v hi).[ 3 ]);
let t0 = vec_interleave_low m0 m1 in
vec_interleave_low_lemma_uint64_4 m0 m1;
assert (vec_v t0 == create4 (vec_v lo).[ 0 ] (vec_v lo).[ 2 ] (vec_v hi).[ 0 ] (vec_v hi).[ 2 ]);
let m2 = cast U64 4 (vec_shift_right (cast U128 2 m0) 48ul) in
vec_shift_right_uint128_small2 m0 48ul;
assert ((vec_v m2).[ 0 ] == (((vec_v lo).[ 0 ] >>. 48ul) |. ((vec_v lo).[ 1 ] <<. 16ul)));
assert ((vec_v m2).[ 2 ] == (((vec_v hi).[ 0 ] >>. 48ul) |. ((vec_v hi).[ 1 ] <<. 16ul)));
let m3 = cast U64 4 (vec_shift_right (cast U128 2 m1) 48ul) in
vec_shift_right_uint128_small2 m1 48ul;
assert ((vec_v m3).[ 0 ] == (((vec_v lo).[ 2 ] >>. 48ul) |. ((vec_v lo).[ 3 ] <<. 16ul)));
assert ((vec_v m3).[ 2 ] == (((vec_v hi).[ 2 ] >>. 48ul) |. ((vec_v hi).[ 3 ] <<. 16ul)));
let t3 = vec_interleave_low m2 m3 in
vec_interleave_low_lemma_uint64_4 m2 m3;
eq_intro (vec_v t3) (vec_v (vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul)));
vecv_extensionality t3 (vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul)) | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [
"lemma"
] | [
"Hacl.Spec.Poly1305.Field32xN.uint64xN",
"Lib.IntVector.vecv_extensionality",
"Lib.IntTypes.U64",
"Lib.IntVector.vec_or",
"Lib.IntVector.vec_shift_right",
"FStar.UInt32.__uint_to_t",
"Lib.IntVector.vec_shift_left",
"Prims.unit",
"Lib.Sequence.eq_intro",
"Lib.IntTypes.uint_t",
"Lib.IntTypes.SEC",
"Lib.IntVector.vec_v",
"Lib.IntVector.vec_interleave_low_lemma_uint64_4",
"Lib.IntVector.vec_t",
"Lib.IntVector.vec_interleave_low",
"Prims._assert",
"Prims.eq2",
"Lib.IntTypes.int_t",
"Lib.Sequence.op_String_Access",
"Lib.IntTypes.op_Bar_Dot",
"Lib.IntTypes.op_Greater_Greater_Dot",
"Lib.IntTypes.op_Less_Less_Dot",
"Lib.IntVector.vec_shift_right_uint128_small2",
"Lib.IntVector.cast",
"Lib.IntTypes.U128",
"Lib.Sequence.lseq",
"Lib.Sequence.create4",
"Lib.IntVector.vec_interleave_high_lemma_uint64_4",
"Lib.IntVector.vec_interleave_high",
"Lib.IntVector.vec_interleave_high_n_lemma_uint64_4_2",
"Lib.IntVector.vec_interleave_high_n",
"Lib.IntVector.vec_interleave_low_n_lemma_uint64_4_2",
"Lib.IntVector.vec_interleave_low_n"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w)
val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)]
let fadd5_fits_lemma #w f1 f2 =
let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24
val fadd5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (feval5 (fadd5 f1 f2) == map2 Vec.pfadd (feval5 f1) (feval5 f2))
[SMTPat (fadd5 f1 f2)]
let fadd5_eval_lemma #w f1 f2 =
let o = fadd5 f1 f2 in
FStar.Classical.forall_intro (fadd5_eval_lemma_i f1 f2);
eq_intro (feval5 o) (map2 Vec.pfadd (feval5 f1) (feval5 f2))
val mul_felem5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_wide_fits5 (mul_felem5 #w f1 r r5) (126, 102, 78, 54, 30))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_fits_lemma #w f1 r r5 =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34)
val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i])
let mul_felem5_eval_lemma_i #w f1 r r5 i =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_eval_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
assert ((fas_nat5 (a0,a1,a2,a3,a4)).[i] == (uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i]);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_eval_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
assert ((fas_nat5 (a10,a11,a12,a13,a14)).[i] == (fas_nat5 (a0,a1,a2,a3,a4)).[i] + (uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i]);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_eval_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
assert ((fas_nat5 (a20,a21,a22,a23,a24)).[i] == (fas_nat5 (a10,a11,a12,a13,a14)).[i] + (uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i]);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
assert ((fas_nat5 (a30,a31,a32,a33,a34)).[i] == (fas_nat5 (a20,a21,a22,a23,a24)).[i] + (uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i]);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] == (fas_nat5 (a30,a31,a32,a33,a34)).[i] + (uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] ==
(uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i] +
(uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i] +
(uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i] +
(uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i] +
(uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
mul_felem5_eval_as_tup64 #w f1 r r5 i;
mul_felem5_lemma (as_tup64_i f1 i) (as_tup64_i r i)
val mul_felem5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (mul_felem5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_eval_lemma #w f1 r r5 =
let tmp = map2 (Vec.pfmul) (feval5 f1) (feval5 r) in
FStar.Classical.forall_intro (mul_felem5_eval_lemma_i #w f1 r r5);
eq_intro (feval5 (mul_felem5 #w f1 r r5)) tmp
val fmul_r5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_fits5 (fmul_r5 #w f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_fits_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_fits_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fmul_r5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fmul_r5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_eval_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_eval_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_fits_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_fits_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_fits_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fadd_mul_r5_eval_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fadd_mul_r5 acc f1 r r5) ==
map2 (Vec.pfmul) (map2 (Vec.pfadd) (feval5 acc) (feval5 f1)) (feval5 r))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_eval_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_eval_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_eval_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val reduce_felem5_eval_lemma:
#w:lanes
-> f:felem5 w{felem_fits5 f (2, 2, 2, 2, 2)} ->
Lemma
(felem_fits5 (reduce_felem5 f) (1, 1, 1, 1, 1) /\
(feval5 f).[0] == (fas_nat5 (reduce_felem5 f)).[0])
[SMTPat (reduce_felem5 f)]
let reduce_felem5_eval_lemma #w f =
carry_full_felem5_eval_lemma f;
carry_full_felem5_fits_lemma f;
let f = carry_full_felem5 f in
carry_reduce_felem5_lemma #w f;
subtract_p5_felem5_lemma #w (carry_full_felem5 f)
val fmul_r2_normalize50:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures fun a ->
let fr21 = create2 (feval5 r2).[0] (feval5 r).[0] in
feval5 a == Vec.fmul (feval5 acc) fr21 /\
felem_fits5 a (1, 2, 1, 1, 2))
let fmul_r2_normalize50 (a0, a1, a2, a3, a4) (r0, r1, r2, r3, r4) (r20, r21, r22, r23, r24) =
let r210 = vec_interleave_low r20 r0 in
vec_interleave_low_lemma2 r20 r0;
let r211 = vec_interleave_low r21 r1 in
vec_interleave_low_lemma2 r21 r1;
let r212 = vec_interleave_low r22 r2 in
vec_interleave_low_lemma2 r22 r2;
let r213 = vec_interleave_low r23 r3 in
vec_interleave_low_lemma2 r23 r3;
let r214 = vec_interleave_low r24 r4 in
vec_interleave_low_lemma2 r24 r4;
let acc = (a0, a1, a2, a3, a4) in
let fr = (r0, r1, r2, r3, r4) in
let fr2 = (r20, r21, r22, r23, r24) in
assert ((feval5 fr2).[0] == Vec.pfmul ((feval5 fr).[0]) ((feval5 fr).[0]));
let fr21 = (r210, r211, r212, r213, r214) in
eq_intro (feval5 fr21) (create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (feval5 fr21 == create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (felem_fits5 fr21 (2, 2, 2, 2, 2));
let fr215 = precomp_r5 #2 fr21 in
let a = fmul_r5 #2 acc fr21 fr215 in
fmul_r5_eval_lemma acc fr21 fr215;
fmul_r5_fits_lemma acc fr21 fr215;
assert (feval5 a == Vec.fmul (feval5 acc) (feval5 fr21));
assert (felem_fits5 a (1, 2, 1, 1, 2));
a
#push-options "--z3rlimit 150"
val fmul_r2_normalize51:
a:felem5 2
-> fa1:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 a (1, 2, 1, 1, 2) /\
felem_fits5 fa1 (1, 2, 1, 1, 2) /\
feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1])
(ensures fun out ->
(feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1] /\
felem_fits5 out (2, 4, 2, 2, 4))
let fmul_r2_normalize51 a fa1 =
let (a0, a1, a2, a3, a4) = a in
let (a10, a11, a12, a13, a14) = fa1 in
let o0 = vec_add_mod a0 a10 in
let o1 = vec_add_mod a1 a11 in
let o2 = vec_add_mod a2 a12 in
let o3 = vec_add_mod a3 a13 in
let o4 = vec_add_mod a4 a14 in
let out = (o0, o1, o2, o3, o4) in
let (a0, a1, a2, a3, a4) = as_tup64_i a 0 in
let (a10, a11, a12, a13, a14) = as_tup64_i fa1 0 in
let (o0, o1, o2, o3, o4) = as_tup64_i out 0 in
FStar.Math.Lemmas.modulo_lemma (v a0 + v a10) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a1 + v a11) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a2 + v a12) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a3 + v a13) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a4 + v a14) (pow2 64);
assert (felem_fits5 out (2, 4, 2, 2, 4));
calc (==) {
((feval5 a).[0] + (feval5 a).[1]) % Vec.prime;
(==) { }
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime + as_nat5 (a10, a11, a12, a13, a14) % Vec.prime) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (a0, a1, a2, a3, a4)) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime;
FStar.Math.Lemmas.lemma_mod_plus_distr_r
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime }
(as_nat5 (a0, a1, a2, a3, a4) + as_nat5 (a10, a11, a12, a13, a14)) % Vec.prime;
(==) { }
(feval5 out).[0];
};
out
#pop-options
val fmul_r2_normalize5_lemma:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures
(let out = fmul_r2_normalize5 acc r r2 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_2 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r2_normalize5 acc r r2)]
let fmul_r2_normalize5_lemma acc r r2 =
let a = fmul_r2_normalize50 acc r r2 in
let (a0, a1, a2, a3, a4) = a in
let a10 = vec_interleave_high a0 a0 in
vec_interleave_high_lemma2 a0 a0;
let a11 = vec_interleave_high a1 a1 in
vec_interleave_high_lemma2 a1 a1;
let a12 = vec_interleave_high a2 a2 in
vec_interleave_high_lemma2 a2 a2;
let a13 = vec_interleave_high a3 a3 in
vec_interleave_high_lemma2 a3 a3;
let a14 = vec_interleave_high a4 a4 in
vec_interleave_high_lemma2 a4 a4;
let fa1 = (a10, a11, a12, a13, a14) in
eq_intro (feval5 fa1) (create2 (feval5 a).[1] (feval5 a).[1]);
assert (feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1]);
assert (felem_fits5 fa1 (1, 2, 1, 1, 2));
let out = fmul_r2_normalize51 a fa1 in
assert ((feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1]);
let res = carry_full_felem5 out in
carry_full_felem5_lemma out
val fmul_r4_normalize50:
acc:felem5 4
-> r:felem5 4
-> r2:felem5 4
-> r3:felem5 4
-> r4:felem5 4 ->
Pure (felem5 4)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
felem_fits5 r3 (2, 2, 2, 2, 2) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.fmul (feval5 r) (feval5 r) /\
feval5 r3 == Vec.fmul (feval5 r2) (feval5 r) /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures fun out ->
let fr4321 = create4 (feval5 r4).[0] (feval5 r3).[0] (feval5 r2).[0] (feval5 r).[0] in
feval5 out == Vec.fmul (feval5 acc) fr4321 /\
felem_fits5 out (1, 2, 1, 1, 2))
let fmul_r4_normalize50 acc fr fr2 fr3 fr4 =
let (r10, r11, r12, r13, r14) = fr in
let (r20, r21, r22, r23, r24) = fr2 in
let (r30, r31, r32, r33, r34) = fr3 in
let (r40, r41, r42, r43, r44) = fr4 in
let (a0, a1, a2, a3, a4) = acc in
let v12120 = vec_interleave_low r20 r10 in
vec_interleave_low_lemma_uint64_4 r20 r10;
let v34340 = vec_interleave_low r40 r30 in
vec_interleave_low_lemma_uint64_4 r40 r30;
let r12340 = vec_interleave_low_n 2 v34340 v12120 in
vec_interleave_low_n_lemma_uint64_4_2 v34340 v12120;
let v12121 = vec_interleave_low r21 r11 in
vec_interleave_low_lemma_uint64_4 r21 r11;
let v34341 = vec_interleave_low r41 r31 in
vec_interleave_low_lemma_uint64_4 r41 r31;
let r12341 = vec_interleave_low_n 2 v34341 v12121 in
vec_interleave_low_n_lemma_uint64_4_2 v34341 v12121;
let v12122 = vec_interleave_low r22 r12 in
vec_interleave_low_lemma_uint64_4 r22 r12;
let v34342 = vec_interleave_low r42 r32 in
vec_interleave_low_lemma_uint64_4 r42 r32;
let r12342 = vec_interleave_low_n 2 v34342 v12122 in
vec_interleave_low_n_lemma_uint64_4_2 v34342 v12122;
let v12123 = vec_interleave_low r23 r13 in
vec_interleave_low_lemma_uint64_4 r23 r13;
let v34343 = vec_interleave_low r43 r33 in
vec_interleave_low_lemma_uint64_4 r43 r33;
let r12343 = vec_interleave_low_n 2 v34343 v12123 in
vec_interleave_low_n_lemma_uint64_4_2 v34343 v12123;
let v12124 = vec_interleave_low r24 r14 in
vec_interleave_low_lemma_uint64_4 r24 r14;
let v34344 = vec_interleave_low r44 r34 in
vec_interleave_low_lemma_uint64_4 r44 r34;
let r12344 = vec_interleave_low_n 2 v34344 v12124 in
vec_interleave_low_n_lemma_uint64_4_2 v34344 v12124;
let fr1234 = (r12340, r12341, r12342, r12343, r12344) in
eq_intro (feval5 fr1234) (create4 (feval5 fr4).[0] (feval5 fr3).[0] (feval5 fr2).[0] (feval5 fr).[0]);
let fr12345 = precomp_r5 #4 fr1234 in
let out = fmul_r5 #4 acc fr1234 fr12345 in
fmul_r5_eval_lemma acc fr1234 fr12345;
fmul_r5_fits_lemma acc fr1234 fr12345;
out
val lemma_fmul_r4_normalize51:
#m:scale32{m <= 2}
-> o:uint64xN 4{felem_fits1 o m} ->
Lemma
(let v00 = vec_interleave_high_n 2 o o in
let v10 = vec_add_mod o v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
felem_fits1 v20 (4 * m) /\
(uint64xN_v v20).[0] == (uint64xN_v o).[0] + (uint64xN_v o).[1] + (uint64xN_v o).[2] + (uint64xN_v o).[3])
let lemma_fmul_r4_normalize51 #m o =
let v00 = vec_interleave_high_n 2 o o in
vec_interleave_high_n_lemma_uint64_4_2 o o;
let (o0, o1, o2, o3) = ((vec_v o).[0], (vec_v o).[1], (vec_v o).[2], (vec_v o).[3]) in
assert (vec_v v00 == create4 o2 o3 o2 o3);
let v10 = vec_add_mod o v00 in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v o1 + v o3) (pow2 64);
assert (v (vec_v v10).[0] == v o0 + v o2);
assert (v (vec_v v10).[1] == v o1 + v o3);
let v10h = vec_interleave_high v10 v10 in
vec_interleave_high_lemma_uint64_4 v10 v10;
assert (v (vec_v v10h).[0] == v (vec_v v10).[1]);
let v20 = vec_add_mod v10 v10h in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2 + v o1 + v o3) (pow2 64)
val lemma_fmul_r4_normalize51_expand:
v2:felem5 4
-> out:felem5 4 ->
Lemma
(requires
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(uint64xN_v v20).[0] == (uint64xN_v o0).[0] + (uint64xN_v o0).[1] + (uint64xN_v o0).[2] + (uint64xN_v o0).[3] /\
(uint64xN_v v21).[0] == (uint64xN_v o1).[0] + (uint64xN_v o1).[1] + (uint64xN_v o1).[2] + (uint64xN_v o1).[3] /\
(uint64xN_v v22).[0] == (uint64xN_v o2).[0] + (uint64xN_v o2).[1] + (uint64xN_v o2).[2] + (uint64xN_v o2).[3] /\
(uint64xN_v v23).[0] == (uint64xN_v o3).[0] + (uint64xN_v o3).[1] + (uint64xN_v o3).[2] + (uint64xN_v o3).[3] /\
(uint64xN_v v24).[0] == (uint64xN_v o4).[0] + (uint64xN_v o4).[1] + (uint64xN_v o4).[2] + (uint64xN_v o4).[3]))
(ensures
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(feval5 v2).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 out).[0] (feval5 out).[1]) (feval5 out).[2]) (feval5 out).[3]))
let lemma_fmul_r4_normalize51_expand v2 out =
let (v20, v21, v22, v23, v24) = as_tup64_i v2 0 in
let (o0, o1, o2, o3, o4) = out in
calc (==) {
as_nat5 (v20, v21, v22, v23, v24) % Vec.prime;
(==) { }
(as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1) + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1))
(as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) Vec.prime }
((as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1)) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.modulo_distributivity (as_nat5 (as_tup64_i out 0)) (as_nat5 (as_tup64_i out 1)) Vec.prime }
(((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2))
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2)) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime) (as_nat5 (as_tup64_i out 2)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r ((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime)
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime;
};
assert ((feval5 v2).[0] ==
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime)
val fmul_r4_normalize51: a:felem5 4 ->
Pure (felem5 4)
(requires felem_fits5 a (1, 2, 1, 1, 2))
(ensures fun res ->
felem_fits5 res (4, 8, 4, 4, 8) /\
(feval5 res).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 a).[0] (feval5 a).[1]) (feval5 a).[2]) (feval5 a).[3])
let fmul_r4_normalize51 fa =
let (o0, o1, o2, o3, o4) = fa in
let v00 = vec_interleave_high_n 2 o0 o0 in
let v10 = vec_add_mod o0 v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
lemma_fmul_r4_normalize51 #1 o0;
let v01 = vec_interleave_high_n 2 o1 o1 in
let v11 = vec_add_mod o1 v01 in
let v11h = vec_interleave_high v11 v11 in
let v21 = vec_add_mod v11 v11h in
lemma_fmul_r4_normalize51 #2 o1;
let v02 = vec_interleave_high_n 2 o2 o2 in
let v12 = vec_add_mod o2 v02 in
let v12h = vec_interleave_high v12 v12 in
let v22 = vec_add_mod v12 v12h in
lemma_fmul_r4_normalize51 #1 o2;
let v03 = vec_interleave_high_n 2 o3 o3 in
let v13 = vec_add_mod o3 v03 in
let v13h = vec_interleave_high v13 v13 in
let v23 = vec_add_mod v13 v13h in
lemma_fmul_r4_normalize51 #1 o3;
let v04 = vec_interleave_high_n 2 o4 o4 in
let v14 = vec_add_mod o4 v04 in
let v14h = vec_interleave_high v14 v14 in
let v24 = vec_add_mod v14 v14h in
lemma_fmul_r4_normalize51 #2 o4;
let res = (v20, v21, v22, v23, v24) in
lemma_fmul_r4_normalize51_expand res fa;
res
val fmul_r4_normalize5_lemma:
acc:felem5 4
-> r:felem5 4
-> r_5:felem5 4
-> r4:felem5 4 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
r_5 == precomp_r5 r /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures
(let out = fmul_r4_normalize5 acc r r_5 r4 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_4 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r4_normalize5 acc r r_5 r4)]
#restart-solver
#push-options "--z3rlimit 500"
let fmul_r4_normalize5_lemma acc fr fr_5 fr4 =
let fr2 = fmul_r5 #4 fr fr fr_5 in
let fr3 = fmul_r5 #4 fr2 fr fr_5 in
let out = fmul_r4_normalize50 acc fr fr2 fr3 fr4 in
let v2 = fmul_r4_normalize51 out in
let res = carry_full_felem5 v2 in
carry_full_felem5_lemma v2
#pop-options
val load_felem5_lemma:
#w:lanes
-> lo:uint64xN w
-> hi:uint64xN w ->
Lemma
(let f = load_felem5 #w lo hi in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == createi #Vec.pfelem w
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]))
let load_felem5_lemma #w lo hi =
let f = load_felem5 #w lo hi in
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 128 < Vec.prime);
let res = createi #Vec.pfelem w
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]) in
match w with
| 1 ->
load_felem5_lemma_i #w lo hi 0;
eq_intro (feval5 f) res
| 2 ->
load_felem5_lemma_i #w lo hi 0;
load_felem5_lemma_i #w lo hi 1;
eq_intro (feval5 f) res
| 4 ->
load_felem5_lemma_i #w lo hi 0;
load_felem5_lemma_i #w lo hi 1;
load_felem5_lemma_i #w lo hi 2;
load_felem5_lemma_i #w lo hi 3;
eq_intro (feval5 f) res
val load_felem5_4_interleave: lo:uint64xN 4 -> hi:uint64xN 4 -> Lemma
(let m0 = vec_interleave_low_n 2 lo hi in
let m1 = vec_interleave_high_n 2 lo hi in
let m2 = cast U64 4 (vec_shift_right (cast U128 2 m0) 48ul) in
let m3 = cast U64 4 (vec_shift_right (cast U128 2 m1) 48ul) in
let m4 = vec_interleave_high m0 m1 in
let t0 = vec_interleave_low m0 m1 in
let t3 = vec_interleave_low m2 m3 in
vec_v m4 == create4 (vec_v lo).[1] (vec_v lo).[3] (vec_v hi).[1] (vec_v hi).[3] /\
vec_v t0 == create4 (vec_v lo).[0] (vec_v lo).[2] (vec_v hi).[0] (vec_v hi).[2] /\
t3 == vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul)) | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val load_felem5_4_interleave: lo:uint64xN 4 -> hi:uint64xN 4 -> Lemma
(let m0 = vec_interleave_low_n 2 lo hi in
let m1 = vec_interleave_high_n 2 lo hi in
let m2 = cast U64 4 (vec_shift_right (cast U128 2 m0) 48ul) in
let m3 = cast U64 4 (vec_shift_right (cast U128 2 m1) 48ul) in
let m4 = vec_interleave_high m0 m1 in
let t0 = vec_interleave_low m0 m1 in
let t3 = vec_interleave_low m2 m3 in
vec_v m4 == create4 (vec_v lo).[1] (vec_v lo).[3] (vec_v hi).[1] (vec_v hi).[3] /\
vec_v t0 == create4 (vec_v lo).[0] (vec_v lo).[2] (vec_v hi).[0] (vec_v hi).[2] /\
t3 == vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul)) | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.load_felem5_4_interleave | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | lo: Hacl.Spec.Poly1305.Field32xN.uint64xN 4 -> hi: Hacl.Spec.Poly1305.Field32xN.uint64xN 4
-> FStar.Pervasives.Lemma
(ensures
(let m0 = Lib.IntVector.vec_interleave_low_n 2 lo hi in
let m1 = Lib.IntVector.vec_interleave_high_n 2 lo hi in
let m2 =
Lib.IntVector.cast Lib.IntTypes.U64
4
(Lib.IntVector.vec_shift_right (Lib.IntVector.cast Lib.IntTypes.U128 2 m0) 48ul)
in
let m3 =
Lib.IntVector.cast Lib.IntTypes.U64
4
(Lib.IntVector.vec_shift_right (Lib.IntVector.cast Lib.IntTypes.U128 2 m1) 48ul)
in
let m4 = Lib.IntVector.vec_interleave_high m0 m1 in
let t0 = Lib.IntVector.vec_interleave_low m0 m1 in
let t3 = Lib.IntVector.vec_interleave_low m2 m3 in
Lib.IntVector.vec_v m4 ==
Lib.Sequence.create4 (Lib.IntVector.vec_v lo).[ 1 ]
(Lib.IntVector.vec_v lo).[ 3 ]
(Lib.IntVector.vec_v hi).[ 1 ]
(Lib.IntVector.vec_v hi).[ 3 ] /\
Lib.IntVector.vec_v t0 ==
Lib.Sequence.create4 (Lib.IntVector.vec_v lo).[ 0 ]
(Lib.IntVector.vec_v lo).[ 2 ]
(Lib.IntVector.vec_v hi).[ 0 ]
(Lib.IntVector.vec_v hi).[ 2 ] /\
t3 ==
Lib.IntVector.vec_or (Lib.IntVector.vec_shift_right t0 48ul)
(Lib.IntVector.vec_shift_left m4 16ul))) | {
"end_col": 84,
"end_line": 697,
"start_col": 36,
"start_line": 668
} |
FStar.Pervasives.Lemma | val lemma_fmul_r4_normalize51:
#m:scale32{m <= 2}
-> o:uint64xN 4{felem_fits1 o m} ->
Lemma
(let v00 = vec_interleave_high_n 2 o o in
let v10 = vec_add_mod o v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
felem_fits1 v20 (4 * m) /\
(uint64xN_v v20).[0] == (uint64xN_v o).[0] + (uint64xN_v o).[1] + (uint64xN_v o).[2] + (uint64xN_v o).[3]) | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let lemma_fmul_r4_normalize51 #m o =
let v00 = vec_interleave_high_n 2 o o in
vec_interleave_high_n_lemma_uint64_4_2 o o;
let (o0, o1, o2, o3) = ((vec_v o).[0], (vec_v o).[1], (vec_v o).[2], (vec_v o).[3]) in
assert (vec_v v00 == create4 o2 o3 o2 o3);
let v10 = vec_add_mod o v00 in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v o1 + v o3) (pow2 64);
assert (v (vec_v v10).[0] == v o0 + v o2);
assert (v (vec_v v10).[1] == v o1 + v o3);
let v10h = vec_interleave_high v10 v10 in
vec_interleave_high_lemma_uint64_4 v10 v10;
assert (v (vec_v v10h).[0] == v (vec_v v10).[1]);
let v20 = vec_add_mod v10 v10h in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2 + v o1 + v o3) (pow2 64) | val lemma_fmul_r4_normalize51:
#m:scale32{m <= 2}
-> o:uint64xN 4{felem_fits1 o m} ->
Lemma
(let v00 = vec_interleave_high_n 2 o o in
let v10 = vec_add_mod o v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
felem_fits1 v20 (4 * m) /\
(uint64xN_v v20).[0] == (uint64xN_v o).[0] + (uint64xN_v o).[1] + (uint64xN_v o).[2] + (uint64xN_v o).[3])
let lemma_fmul_r4_normalize51 #m o = | false | null | true | let v00 = vec_interleave_high_n 2 o o in
vec_interleave_high_n_lemma_uint64_4_2 o o;
let o0, o1, o2, o3 = ((vec_v o).[ 0 ], (vec_v o).[ 1 ], (vec_v o).[ 2 ], (vec_v o).[ 3 ]) in
assert (vec_v v00 == create4 o2 o3 o2 o3);
let v10 = vec_add_mod o v00 in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v o1 + v o3) (pow2 64);
assert (v (vec_v v10).[ 0 ] == v o0 + v o2);
assert (v (vec_v v10).[ 1 ] == v o1 + v o3);
let v10h = vec_interleave_high v10 v10 in
vec_interleave_high_lemma_uint64_4 v10 v10;
assert (v (vec_v v10h).[ 0 ] == v (vec_v v10).[ 1 ]);
let v20 = vec_add_mod v10 v10h in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2 + v o1 + v o3) (pow2 64) | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [
"lemma"
] | [
"Hacl.Spec.Poly1305.Field32xN.scale32",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Hacl.Spec.Poly1305.Field32xN.uint64xN",
"Hacl.Spec.Poly1305.Field32xN.felem_fits1",
"Lib.IntTypes.int_t",
"Lib.IntTypes.U64",
"Lib.IntTypes.SEC",
"FStar.Math.Lemmas.modulo_lemma",
"Prims.op_Addition",
"Lib.IntTypes.v",
"Prims.pow2",
"Lib.IntVector.vec_t",
"Lib.IntVector.vec_add_mod",
"Prims.unit",
"Prims._assert",
"Prims.eq2",
"Lib.IntTypes.range_t",
"Lib.Sequence.op_String_Access",
"Lib.IntTypes.uint_t",
"Lib.IntVector.vec_v",
"Lib.IntVector.vec_interleave_high_lemma_uint64_4",
"Lib.IntVector.vec_interleave_high",
"Prims.int",
"Lib.Sequence.lseq",
"Lib.Sequence.create4",
"FStar.Pervasives.Native.tuple4",
"FStar.Pervasives.Native.Mktuple4",
"Lib.IntVector.vec_interleave_high_n_lemma_uint64_4_2",
"Lib.IntVector.vec_interleave_high_n"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w)
val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)]
let fadd5_fits_lemma #w f1 f2 =
let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24
val fadd5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (feval5 (fadd5 f1 f2) == map2 Vec.pfadd (feval5 f1) (feval5 f2))
[SMTPat (fadd5 f1 f2)]
let fadd5_eval_lemma #w f1 f2 =
let o = fadd5 f1 f2 in
FStar.Classical.forall_intro (fadd5_eval_lemma_i f1 f2);
eq_intro (feval5 o) (map2 Vec.pfadd (feval5 f1) (feval5 f2))
val mul_felem5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_wide_fits5 (mul_felem5 #w f1 r r5) (126, 102, 78, 54, 30))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_fits_lemma #w f1 r r5 =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34)
val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i])
let mul_felem5_eval_lemma_i #w f1 r r5 i =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_eval_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
assert ((fas_nat5 (a0,a1,a2,a3,a4)).[i] == (uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i]);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_eval_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
assert ((fas_nat5 (a10,a11,a12,a13,a14)).[i] == (fas_nat5 (a0,a1,a2,a3,a4)).[i] + (uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i]);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_eval_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
assert ((fas_nat5 (a20,a21,a22,a23,a24)).[i] == (fas_nat5 (a10,a11,a12,a13,a14)).[i] + (uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i]);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
assert ((fas_nat5 (a30,a31,a32,a33,a34)).[i] == (fas_nat5 (a20,a21,a22,a23,a24)).[i] + (uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i]);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] == (fas_nat5 (a30,a31,a32,a33,a34)).[i] + (uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] ==
(uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i] +
(uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i] +
(uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i] +
(uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i] +
(uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
mul_felem5_eval_as_tup64 #w f1 r r5 i;
mul_felem5_lemma (as_tup64_i f1 i) (as_tup64_i r i)
val mul_felem5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (mul_felem5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_eval_lemma #w f1 r r5 =
let tmp = map2 (Vec.pfmul) (feval5 f1) (feval5 r) in
FStar.Classical.forall_intro (mul_felem5_eval_lemma_i #w f1 r r5);
eq_intro (feval5 (mul_felem5 #w f1 r r5)) tmp
val fmul_r5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_fits5 (fmul_r5 #w f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_fits_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_fits_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fmul_r5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fmul_r5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_eval_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_eval_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_fits_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_fits_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_fits_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fadd_mul_r5_eval_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fadd_mul_r5 acc f1 r r5) ==
map2 (Vec.pfmul) (map2 (Vec.pfadd) (feval5 acc) (feval5 f1)) (feval5 r))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_eval_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_eval_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_eval_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val reduce_felem5_eval_lemma:
#w:lanes
-> f:felem5 w{felem_fits5 f (2, 2, 2, 2, 2)} ->
Lemma
(felem_fits5 (reduce_felem5 f) (1, 1, 1, 1, 1) /\
(feval5 f).[0] == (fas_nat5 (reduce_felem5 f)).[0])
[SMTPat (reduce_felem5 f)]
let reduce_felem5_eval_lemma #w f =
carry_full_felem5_eval_lemma f;
carry_full_felem5_fits_lemma f;
let f = carry_full_felem5 f in
carry_reduce_felem5_lemma #w f;
subtract_p5_felem5_lemma #w (carry_full_felem5 f)
val fmul_r2_normalize50:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures fun a ->
let fr21 = create2 (feval5 r2).[0] (feval5 r).[0] in
feval5 a == Vec.fmul (feval5 acc) fr21 /\
felem_fits5 a (1, 2, 1, 1, 2))
let fmul_r2_normalize50 (a0, a1, a2, a3, a4) (r0, r1, r2, r3, r4) (r20, r21, r22, r23, r24) =
let r210 = vec_interleave_low r20 r0 in
vec_interleave_low_lemma2 r20 r0;
let r211 = vec_interleave_low r21 r1 in
vec_interleave_low_lemma2 r21 r1;
let r212 = vec_interleave_low r22 r2 in
vec_interleave_low_lemma2 r22 r2;
let r213 = vec_interleave_low r23 r3 in
vec_interleave_low_lemma2 r23 r3;
let r214 = vec_interleave_low r24 r4 in
vec_interleave_low_lemma2 r24 r4;
let acc = (a0, a1, a2, a3, a4) in
let fr = (r0, r1, r2, r3, r4) in
let fr2 = (r20, r21, r22, r23, r24) in
assert ((feval5 fr2).[0] == Vec.pfmul ((feval5 fr).[0]) ((feval5 fr).[0]));
let fr21 = (r210, r211, r212, r213, r214) in
eq_intro (feval5 fr21) (create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (feval5 fr21 == create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (felem_fits5 fr21 (2, 2, 2, 2, 2));
let fr215 = precomp_r5 #2 fr21 in
let a = fmul_r5 #2 acc fr21 fr215 in
fmul_r5_eval_lemma acc fr21 fr215;
fmul_r5_fits_lemma acc fr21 fr215;
assert (feval5 a == Vec.fmul (feval5 acc) (feval5 fr21));
assert (felem_fits5 a (1, 2, 1, 1, 2));
a
#push-options "--z3rlimit 150"
val fmul_r2_normalize51:
a:felem5 2
-> fa1:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 a (1, 2, 1, 1, 2) /\
felem_fits5 fa1 (1, 2, 1, 1, 2) /\
feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1])
(ensures fun out ->
(feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1] /\
felem_fits5 out (2, 4, 2, 2, 4))
let fmul_r2_normalize51 a fa1 =
let (a0, a1, a2, a3, a4) = a in
let (a10, a11, a12, a13, a14) = fa1 in
let o0 = vec_add_mod a0 a10 in
let o1 = vec_add_mod a1 a11 in
let o2 = vec_add_mod a2 a12 in
let o3 = vec_add_mod a3 a13 in
let o4 = vec_add_mod a4 a14 in
let out = (o0, o1, o2, o3, o4) in
let (a0, a1, a2, a3, a4) = as_tup64_i a 0 in
let (a10, a11, a12, a13, a14) = as_tup64_i fa1 0 in
let (o0, o1, o2, o3, o4) = as_tup64_i out 0 in
FStar.Math.Lemmas.modulo_lemma (v a0 + v a10) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a1 + v a11) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a2 + v a12) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a3 + v a13) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a4 + v a14) (pow2 64);
assert (felem_fits5 out (2, 4, 2, 2, 4));
calc (==) {
((feval5 a).[0] + (feval5 a).[1]) % Vec.prime;
(==) { }
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime + as_nat5 (a10, a11, a12, a13, a14) % Vec.prime) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (a0, a1, a2, a3, a4)) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime;
FStar.Math.Lemmas.lemma_mod_plus_distr_r
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime }
(as_nat5 (a0, a1, a2, a3, a4) + as_nat5 (a10, a11, a12, a13, a14)) % Vec.prime;
(==) { }
(feval5 out).[0];
};
out
#pop-options
val fmul_r2_normalize5_lemma:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures
(let out = fmul_r2_normalize5 acc r r2 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_2 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r2_normalize5 acc r r2)]
let fmul_r2_normalize5_lemma acc r r2 =
let a = fmul_r2_normalize50 acc r r2 in
let (a0, a1, a2, a3, a4) = a in
let a10 = vec_interleave_high a0 a0 in
vec_interleave_high_lemma2 a0 a0;
let a11 = vec_interleave_high a1 a1 in
vec_interleave_high_lemma2 a1 a1;
let a12 = vec_interleave_high a2 a2 in
vec_interleave_high_lemma2 a2 a2;
let a13 = vec_interleave_high a3 a3 in
vec_interleave_high_lemma2 a3 a3;
let a14 = vec_interleave_high a4 a4 in
vec_interleave_high_lemma2 a4 a4;
let fa1 = (a10, a11, a12, a13, a14) in
eq_intro (feval5 fa1) (create2 (feval5 a).[1] (feval5 a).[1]);
assert (feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1]);
assert (felem_fits5 fa1 (1, 2, 1, 1, 2));
let out = fmul_r2_normalize51 a fa1 in
assert ((feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1]);
let res = carry_full_felem5 out in
carry_full_felem5_lemma out
val fmul_r4_normalize50:
acc:felem5 4
-> r:felem5 4
-> r2:felem5 4
-> r3:felem5 4
-> r4:felem5 4 ->
Pure (felem5 4)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
felem_fits5 r3 (2, 2, 2, 2, 2) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.fmul (feval5 r) (feval5 r) /\
feval5 r3 == Vec.fmul (feval5 r2) (feval5 r) /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures fun out ->
let fr4321 = create4 (feval5 r4).[0] (feval5 r3).[0] (feval5 r2).[0] (feval5 r).[0] in
feval5 out == Vec.fmul (feval5 acc) fr4321 /\
felem_fits5 out (1, 2, 1, 1, 2))
let fmul_r4_normalize50 acc fr fr2 fr3 fr4 =
let (r10, r11, r12, r13, r14) = fr in
let (r20, r21, r22, r23, r24) = fr2 in
let (r30, r31, r32, r33, r34) = fr3 in
let (r40, r41, r42, r43, r44) = fr4 in
let (a0, a1, a2, a3, a4) = acc in
let v12120 = vec_interleave_low r20 r10 in
vec_interleave_low_lemma_uint64_4 r20 r10;
let v34340 = vec_interleave_low r40 r30 in
vec_interleave_low_lemma_uint64_4 r40 r30;
let r12340 = vec_interleave_low_n 2 v34340 v12120 in
vec_interleave_low_n_lemma_uint64_4_2 v34340 v12120;
let v12121 = vec_interleave_low r21 r11 in
vec_interleave_low_lemma_uint64_4 r21 r11;
let v34341 = vec_interleave_low r41 r31 in
vec_interleave_low_lemma_uint64_4 r41 r31;
let r12341 = vec_interleave_low_n 2 v34341 v12121 in
vec_interleave_low_n_lemma_uint64_4_2 v34341 v12121;
let v12122 = vec_interleave_low r22 r12 in
vec_interleave_low_lemma_uint64_4 r22 r12;
let v34342 = vec_interleave_low r42 r32 in
vec_interleave_low_lemma_uint64_4 r42 r32;
let r12342 = vec_interleave_low_n 2 v34342 v12122 in
vec_interleave_low_n_lemma_uint64_4_2 v34342 v12122;
let v12123 = vec_interleave_low r23 r13 in
vec_interleave_low_lemma_uint64_4 r23 r13;
let v34343 = vec_interleave_low r43 r33 in
vec_interleave_low_lemma_uint64_4 r43 r33;
let r12343 = vec_interleave_low_n 2 v34343 v12123 in
vec_interleave_low_n_lemma_uint64_4_2 v34343 v12123;
let v12124 = vec_interleave_low r24 r14 in
vec_interleave_low_lemma_uint64_4 r24 r14;
let v34344 = vec_interleave_low r44 r34 in
vec_interleave_low_lemma_uint64_4 r44 r34;
let r12344 = vec_interleave_low_n 2 v34344 v12124 in
vec_interleave_low_n_lemma_uint64_4_2 v34344 v12124;
let fr1234 = (r12340, r12341, r12342, r12343, r12344) in
eq_intro (feval5 fr1234) (create4 (feval5 fr4).[0] (feval5 fr3).[0] (feval5 fr2).[0] (feval5 fr).[0]);
let fr12345 = precomp_r5 #4 fr1234 in
let out = fmul_r5 #4 acc fr1234 fr12345 in
fmul_r5_eval_lemma acc fr1234 fr12345;
fmul_r5_fits_lemma acc fr1234 fr12345;
out
val lemma_fmul_r4_normalize51:
#m:scale32{m <= 2}
-> o:uint64xN 4{felem_fits1 o m} ->
Lemma
(let v00 = vec_interleave_high_n 2 o o in
let v10 = vec_add_mod o v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
felem_fits1 v20 (4 * m) /\
(uint64xN_v v20).[0] == (uint64xN_v o).[0] + (uint64xN_v o).[1] + (uint64xN_v o).[2] + (uint64xN_v o).[3]) | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val lemma_fmul_r4_normalize51:
#m:scale32{m <= 2}
-> o:uint64xN 4{felem_fits1 o m} ->
Lemma
(let v00 = vec_interleave_high_n 2 o o in
let v10 = vec_add_mod o v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
felem_fits1 v20 (4 * m) /\
(uint64xN_v v20).[0] == (uint64xN_v o).[0] + (uint64xN_v o).[1] + (uint64xN_v o).[2] + (uint64xN_v o).[3]) | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.lemma_fmul_r4_normalize51 | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | o: Hacl.Spec.Poly1305.Field32xN.uint64xN 4 {Hacl.Spec.Poly1305.Field32xN.felem_fits1 o m}
-> FStar.Pervasives.Lemma
(ensures
(let v00 = Lib.IntVector.vec_interleave_high_n 2 o o in
let v10 = Lib.IntVector.vec_add_mod o v00 in
let v10h = Lib.IntVector.vec_interleave_high v10 v10 in
let v20 = Lib.IntVector.vec_add_mod v10 v10h in
Hacl.Spec.Poly1305.Field32xN.felem_fits1 v20 (4 * m) /\
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v v20).[ 0 ] ==
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o).[ 0 ] +
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o).[ 1 ] +
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o).[ 2 ] +
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o).[ 3 ])) | {
"end_col": 70,
"end_line": 505,
"start_col": 36,
"start_line": 491
} |
Prims.Pure | val fmul_r4_normalize50:
acc:felem5 4
-> r:felem5 4
-> r2:felem5 4
-> r3:felem5 4
-> r4:felem5 4 ->
Pure (felem5 4)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
felem_fits5 r3 (2, 2, 2, 2, 2) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.fmul (feval5 r) (feval5 r) /\
feval5 r3 == Vec.fmul (feval5 r2) (feval5 r) /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures fun out ->
let fr4321 = create4 (feval5 r4).[0] (feval5 r3).[0] (feval5 r2).[0] (feval5 r).[0] in
feval5 out == Vec.fmul (feval5 acc) fr4321 /\
felem_fits5 out (1, 2, 1, 1, 2)) | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let fmul_r4_normalize50 acc fr fr2 fr3 fr4 =
let (r10, r11, r12, r13, r14) = fr in
let (r20, r21, r22, r23, r24) = fr2 in
let (r30, r31, r32, r33, r34) = fr3 in
let (r40, r41, r42, r43, r44) = fr4 in
let (a0, a1, a2, a3, a4) = acc in
let v12120 = vec_interleave_low r20 r10 in
vec_interleave_low_lemma_uint64_4 r20 r10;
let v34340 = vec_interleave_low r40 r30 in
vec_interleave_low_lemma_uint64_4 r40 r30;
let r12340 = vec_interleave_low_n 2 v34340 v12120 in
vec_interleave_low_n_lemma_uint64_4_2 v34340 v12120;
let v12121 = vec_interleave_low r21 r11 in
vec_interleave_low_lemma_uint64_4 r21 r11;
let v34341 = vec_interleave_low r41 r31 in
vec_interleave_low_lemma_uint64_4 r41 r31;
let r12341 = vec_interleave_low_n 2 v34341 v12121 in
vec_interleave_low_n_lemma_uint64_4_2 v34341 v12121;
let v12122 = vec_interleave_low r22 r12 in
vec_interleave_low_lemma_uint64_4 r22 r12;
let v34342 = vec_interleave_low r42 r32 in
vec_interleave_low_lemma_uint64_4 r42 r32;
let r12342 = vec_interleave_low_n 2 v34342 v12122 in
vec_interleave_low_n_lemma_uint64_4_2 v34342 v12122;
let v12123 = vec_interleave_low r23 r13 in
vec_interleave_low_lemma_uint64_4 r23 r13;
let v34343 = vec_interleave_low r43 r33 in
vec_interleave_low_lemma_uint64_4 r43 r33;
let r12343 = vec_interleave_low_n 2 v34343 v12123 in
vec_interleave_low_n_lemma_uint64_4_2 v34343 v12123;
let v12124 = vec_interleave_low r24 r14 in
vec_interleave_low_lemma_uint64_4 r24 r14;
let v34344 = vec_interleave_low r44 r34 in
vec_interleave_low_lemma_uint64_4 r44 r34;
let r12344 = vec_interleave_low_n 2 v34344 v12124 in
vec_interleave_low_n_lemma_uint64_4_2 v34344 v12124;
let fr1234 = (r12340, r12341, r12342, r12343, r12344) in
eq_intro (feval5 fr1234) (create4 (feval5 fr4).[0] (feval5 fr3).[0] (feval5 fr2).[0] (feval5 fr).[0]);
let fr12345 = precomp_r5 #4 fr1234 in
let out = fmul_r5 #4 acc fr1234 fr12345 in
fmul_r5_eval_lemma acc fr1234 fr12345;
fmul_r5_fits_lemma acc fr1234 fr12345;
out | val fmul_r4_normalize50:
acc:felem5 4
-> r:felem5 4
-> r2:felem5 4
-> r3:felem5 4
-> r4:felem5 4 ->
Pure (felem5 4)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
felem_fits5 r3 (2, 2, 2, 2, 2) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.fmul (feval5 r) (feval5 r) /\
feval5 r3 == Vec.fmul (feval5 r2) (feval5 r) /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures fun out ->
let fr4321 = create4 (feval5 r4).[0] (feval5 r3).[0] (feval5 r2).[0] (feval5 r).[0] in
feval5 out == Vec.fmul (feval5 acc) fr4321 /\
felem_fits5 out (1, 2, 1, 1, 2))
let fmul_r4_normalize50 acc fr fr2 fr3 fr4 = | false | null | false | let r10, r11, r12, r13, r14 = fr in
let r20, r21, r22, r23, r24 = fr2 in
let r30, r31, r32, r33, r34 = fr3 in
let r40, r41, r42, r43, r44 = fr4 in
let a0, a1, a2, a3, a4 = acc in
let v12120 = vec_interleave_low r20 r10 in
vec_interleave_low_lemma_uint64_4 r20 r10;
let v34340 = vec_interleave_low r40 r30 in
vec_interleave_low_lemma_uint64_4 r40 r30;
let r12340 = vec_interleave_low_n 2 v34340 v12120 in
vec_interleave_low_n_lemma_uint64_4_2 v34340 v12120;
let v12121 = vec_interleave_low r21 r11 in
vec_interleave_low_lemma_uint64_4 r21 r11;
let v34341 = vec_interleave_low r41 r31 in
vec_interleave_low_lemma_uint64_4 r41 r31;
let r12341 = vec_interleave_low_n 2 v34341 v12121 in
vec_interleave_low_n_lemma_uint64_4_2 v34341 v12121;
let v12122 = vec_interleave_low r22 r12 in
vec_interleave_low_lemma_uint64_4 r22 r12;
let v34342 = vec_interleave_low r42 r32 in
vec_interleave_low_lemma_uint64_4 r42 r32;
let r12342 = vec_interleave_low_n 2 v34342 v12122 in
vec_interleave_low_n_lemma_uint64_4_2 v34342 v12122;
let v12123 = vec_interleave_low r23 r13 in
vec_interleave_low_lemma_uint64_4 r23 r13;
let v34343 = vec_interleave_low r43 r33 in
vec_interleave_low_lemma_uint64_4 r43 r33;
let r12343 = vec_interleave_low_n 2 v34343 v12123 in
vec_interleave_low_n_lemma_uint64_4_2 v34343 v12123;
let v12124 = vec_interleave_low r24 r14 in
vec_interleave_low_lemma_uint64_4 r24 r14;
let v34344 = vec_interleave_low r44 r34 in
vec_interleave_low_lemma_uint64_4 r44 r34;
let r12344 = vec_interleave_low_n 2 v34344 v12124 in
vec_interleave_low_n_lemma_uint64_4_2 v34344 v12124;
let fr1234 = (r12340, r12341, r12342, r12343, r12344) in
eq_intro (feval5 fr1234)
(create4 (feval5 fr4).[ 0 ] (feval5 fr3).[ 0 ] (feval5 fr2).[ 0 ] (feval5 fr).[ 0 ]);
let fr12345 = precomp_r5 #4 fr1234 in
let out = fmul_r5 #4 acc fr1234 fr12345 in
fmul_r5_eval_lemma acc fr1234 fr12345;
fmul_r5_fits_lemma acc fr1234 fr12345;
out | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [] | [
"Hacl.Spec.Poly1305.Field32xN.felem5",
"Hacl.Spec.Poly1305.Field32xN.uint64xN",
"Prims.unit",
"Hacl.Spec.Poly1305.Field32xN.Lemmas.fmul_r5_fits_lemma",
"Hacl.Spec.Poly1305.Field32xN.Lemmas.fmul_r5_eval_lemma",
"Hacl.Spec.Poly1305.Field32xN.fmul_r5",
"Hacl.Spec.Poly1305.Field32xN.precomp_r5",
"Lib.Sequence.eq_intro",
"Hacl.Spec.Poly1305.Vec.pfelem",
"Hacl.Spec.Poly1305.Field32xN.feval5",
"Lib.Sequence.create4",
"Lib.Sequence.op_String_Access",
"FStar.Pervasives.Native.tuple5",
"FStar.Pervasives.Native.Mktuple5",
"Lib.IntVector.vec_interleave_low_n_lemma_uint64_4_2",
"Lib.IntVector.vec_t",
"Lib.IntTypes.U64",
"Lib.IntVector.vec_interleave_low_n",
"Lib.IntVector.vec_interleave_low_lemma_uint64_4",
"Lib.IntVector.vec_interleave_low"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w)
val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)]
let fadd5_fits_lemma #w f1 f2 =
let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24
val fadd5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (feval5 (fadd5 f1 f2) == map2 Vec.pfadd (feval5 f1) (feval5 f2))
[SMTPat (fadd5 f1 f2)]
let fadd5_eval_lemma #w f1 f2 =
let o = fadd5 f1 f2 in
FStar.Classical.forall_intro (fadd5_eval_lemma_i f1 f2);
eq_intro (feval5 o) (map2 Vec.pfadd (feval5 f1) (feval5 f2))
val mul_felem5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_wide_fits5 (mul_felem5 #w f1 r r5) (126, 102, 78, 54, 30))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_fits_lemma #w f1 r r5 =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34)
val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i])
let mul_felem5_eval_lemma_i #w f1 r r5 i =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_eval_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
assert ((fas_nat5 (a0,a1,a2,a3,a4)).[i] == (uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i]);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_eval_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
assert ((fas_nat5 (a10,a11,a12,a13,a14)).[i] == (fas_nat5 (a0,a1,a2,a3,a4)).[i] + (uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i]);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_eval_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
assert ((fas_nat5 (a20,a21,a22,a23,a24)).[i] == (fas_nat5 (a10,a11,a12,a13,a14)).[i] + (uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i]);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
assert ((fas_nat5 (a30,a31,a32,a33,a34)).[i] == (fas_nat5 (a20,a21,a22,a23,a24)).[i] + (uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i]);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] == (fas_nat5 (a30,a31,a32,a33,a34)).[i] + (uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] ==
(uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i] +
(uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i] +
(uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i] +
(uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i] +
(uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
mul_felem5_eval_as_tup64 #w f1 r r5 i;
mul_felem5_lemma (as_tup64_i f1 i) (as_tup64_i r i)
val mul_felem5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (mul_felem5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_eval_lemma #w f1 r r5 =
let tmp = map2 (Vec.pfmul) (feval5 f1) (feval5 r) in
FStar.Classical.forall_intro (mul_felem5_eval_lemma_i #w f1 r r5);
eq_intro (feval5 (mul_felem5 #w f1 r r5)) tmp
val fmul_r5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_fits5 (fmul_r5 #w f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_fits_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_fits_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fmul_r5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fmul_r5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_eval_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_eval_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_fits_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_fits_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_fits_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fadd_mul_r5_eval_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fadd_mul_r5 acc f1 r r5) ==
map2 (Vec.pfmul) (map2 (Vec.pfadd) (feval5 acc) (feval5 f1)) (feval5 r))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_eval_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_eval_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_eval_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val reduce_felem5_eval_lemma:
#w:lanes
-> f:felem5 w{felem_fits5 f (2, 2, 2, 2, 2)} ->
Lemma
(felem_fits5 (reduce_felem5 f) (1, 1, 1, 1, 1) /\
(feval5 f).[0] == (fas_nat5 (reduce_felem5 f)).[0])
[SMTPat (reduce_felem5 f)]
let reduce_felem5_eval_lemma #w f =
carry_full_felem5_eval_lemma f;
carry_full_felem5_fits_lemma f;
let f = carry_full_felem5 f in
carry_reduce_felem5_lemma #w f;
subtract_p5_felem5_lemma #w (carry_full_felem5 f)
val fmul_r2_normalize50:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures fun a ->
let fr21 = create2 (feval5 r2).[0] (feval5 r).[0] in
feval5 a == Vec.fmul (feval5 acc) fr21 /\
felem_fits5 a (1, 2, 1, 1, 2))
let fmul_r2_normalize50 (a0, a1, a2, a3, a4) (r0, r1, r2, r3, r4) (r20, r21, r22, r23, r24) =
let r210 = vec_interleave_low r20 r0 in
vec_interleave_low_lemma2 r20 r0;
let r211 = vec_interleave_low r21 r1 in
vec_interleave_low_lemma2 r21 r1;
let r212 = vec_interleave_low r22 r2 in
vec_interleave_low_lemma2 r22 r2;
let r213 = vec_interleave_low r23 r3 in
vec_interleave_low_lemma2 r23 r3;
let r214 = vec_interleave_low r24 r4 in
vec_interleave_low_lemma2 r24 r4;
let acc = (a0, a1, a2, a3, a4) in
let fr = (r0, r1, r2, r3, r4) in
let fr2 = (r20, r21, r22, r23, r24) in
assert ((feval5 fr2).[0] == Vec.pfmul ((feval5 fr).[0]) ((feval5 fr).[0]));
let fr21 = (r210, r211, r212, r213, r214) in
eq_intro (feval5 fr21) (create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (feval5 fr21 == create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (felem_fits5 fr21 (2, 2, 2, 2, 2));
let fr215 = precomp_r5 #2 fr21 in
let a = fmul_r5 #2 acc fr21 fr215 in
fmul_r5_eval_lemma acc fr21 fr215;
fmul_r5_fits_lemma acc fr21 fr215;
assert (feval5 a == Vec.fmul (feval5 acc) (feval5 fr21));
assert (felem_fits5 a (1, 2, 1, 1, 2));
a
#push-options "--z3rlimit 150"
val fmul_r2_normalize51:
a:felem5 2
-> fa1:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 a (1, 2, 1, 1, 2) /\
felem_fits5 fa1 (1, 2, 1, 1, 2) /\
feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1])
(ensures fun out ->
(feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1] /\
felem_fits5 out (2, 4, 2, 2, 4))
let fmul_r2_normalize51 a fa1 =
let (a0, a1, a2, a3, a4) = a in
let (a10, a11, a12, a13, a14) = fa1 in
let o0 = vec_add_mod a0 a10 in
let o1 = vec_add_mod a1 a11 in
let o2 = vec_add_mod a2 a12 in
let o3 = vec_add_mod a3 a13 in
let o4 = vec_add_mod a4 a14 in
let out = (o0, o1, o2, o3, o4) in
let (a0, a1, a2, a3, a4) = as_tup64_i a 0 in
let (a10, a11, a12, a13, a14) = as_tup64_i fa1 0 in
let (o0, o1, o2, o3, o4) = as_tup64_i out 0 in
FStar.Math.Lemmas.modulo_lemma (v a0 + v a10) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a1 + v a11) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a2 + v a12) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a3 + v a13) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a4 + v a14) (pow2 64);
assert (felem_fits5 out (2, 4, 2, 2, 4));
calc (==) {
((feval5 a).[0] + (feval5 a).[1]) % Vec.prime;
(==) { }
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime + as_nat5 (a10, a11, a12, a13, a14) % Vec.prime) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (a0, a1, a2, a3, a4)) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime;
FStar.Math.Lemmas.lemma_mod_plus_distr_r
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime }
(as_nat5 (a0, a1, a2, a3, a4) + as_nat5 (a10, a11, a12, a13, a14)) % Vec.prime;
(==) { }
(feval5 out).[0];
};
out
#pop-options
val fmul_r2_normalize5_lemma:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures
(let out = fmul_r2_normalize5 acc r r2 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_2 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r2_normalize5 acc r r2)]
let fmul_r2_normalize5_lemma acc r r2 =
let a = fmul_r2_normalize50 acc r r2 in
let (a0, a1, a2, a3, a4) = a in
let a10 = vec_interleave_high a0 a0 in
vec_interleave_high_lemma2 a0 a0;
let a11 = vec_interleave_high a1 a1 in
vec_interleave_high_lemma2 a1 a1;
let a12 = vec_interleave_high a2 a2 in
vec_interleave_high_lemma2 a2 a2;
let a13 = vec_interleave_high a3 a3 in
vec_interleave_high_lemma2 a3 a3;
let a14 = vec_interleave_high a4 a4 in
vec_interleave_high_lemma2 a4 a4;
let fa1 = (a10, a11, a12, a13, a14) in
eq_intro (feval5 fa1) (create2 (feval5 a).[1] (feval5 a).[1]);
assert (feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1]);
assert (felem_fits5 fa1 (1, 2, 1, 1, 2));
let out = fmul_r2_normalize51 a fa1 in
assert ((feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1]);
let res = carry_full_felem5 out in
carry_full_felem5_lemma out
val fmul_r4_normalize50:
acc:felem5 4
-> r:felem5 4
-> r2:felem5 4
-> r3:felem5 4
-> r4:felem5 4 ->
Pure (felem5 4)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
felem_fits5 r3 (2, 2, 2, 2, 2) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.fmul (feval5 r) (feval5 r) /\
feval5 r3 == Vec.fmul (feval5 r2) (feval5 r) /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures fun out ->
let fr4321 = create4 (feval5 r4).[0] (feval5 r3).[0] (feval5 r2).[0] (feval5 r).[0] in
feval5 out == Vec.fmul (feval5 acc) fr4321 /\
felem_fits5 out (1, 2, 1, 1, 2)) | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val fmul_r4_normalize50:
acc:felem5 4
-> r:felem5 4
-> r2:felem5 4
-> r3:felem5 4
-> r4:felem5 4 ->
Pure (felem5 4)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
felem_fits5 r3 (2, 2, 2, 2, 2) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.fmul (feval5 r) (feval5 r) /\
feval5 r3 == Vec.fmul (feval5 r2) (feval5 r) /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures fun out ->
let fr4321 = create4 (feval5 r4).[0] (feval5 r3).[0] (feval5 r2).[0] (feval5 r).[0] in
feval5 out == Vec.fmul (feval5 acc) fr4321 /\
felem_fits5 out (1, 2, 1, 1, 2)) | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.fmul_r4_normalize50 | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
acc: Hacl.Spec.Poly1305.Field32xN.felem5 4 ->
r: Hacl.Spec.Poly1305.Field32xN.felem5 4 ->
r2: Hacl.Spec.Poly1305.Field32xN.felem5 4 ->
r3: Hacl.Spec.Poly1305.Field32xN.felem5 4 ->
r4: Hacl.Spec.Poly1305.Field32xN.felem5 4
-> Prims.Pure (Hacl.Spec.Poly1305.Field32xN.felem5 4) | {
"end_col": 5,
"end_line": 477,
"start_col": 44,
"start_line": 428
} |
FStar.Pervasives.Lemma | val load_felem5_le: b:lseq uint8 64 -> Lemma
(let lo0 = vec_from_bytes_le U64 4 (sub b 0 32) in
let hi0 = vec_from_bytes_le U64 4 (sub b 32 32) in
let f = load_felem5_4 lo0 hi0 in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == Vec.load_elem4 b) | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let load_felem5_le b =
let lo0 = vec_from_bytes_le U64 4 (sub b 0 32) in
let hi0 = vec_from_bytes_le U64 4 (sub b 32 32) in
let lo1 = vec_interleave_low_n 2 lo0 hi0 in
let hi1 = vec_interleave_high_n 2 lo0 hi0 in
let lo = vec_interleave_low lo1 hi1 in
let hi = vec_interleave_high lo1 hi1 in
let out = load_felem5_4_compact lo hi in
load_felem5_4_interleave lo0 hi0;
assert (out == load_felem5_4 lo0 hi0);
load_felem5_4_lemma lo hi;
Hacl.Impl.Poly1305.Lemmas.uints_from_bytes_le_lemma64_4 b;
eq_intro (feval5 out) (Vec.load_elem4 b) | val load_felem5_le: b:lseq uint8 64 -> Lemma
(let lo0 = vec_from_bytes_le U64 4 (sub b 0 32) in
let hi0 = vec_from_bytes_le U64 4 (sub b 32 32) in
let f = load_felem5_4 lo0 hi0 in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == Vec.load_elem4 b)
let load_felem5_le b = | false | null | true | let lo0 = vec_from_bytes_le U64 4 (sub b 0 32) in
let hi0 = vec_from_bytes_le U64 4 (sub b 32 32) in
let lo1 = vec_interleave_low_n 2 lo0 hi0 in
let hi1 = vec_interleave_high_n 2 lo0 hi0 in
let lo = vec_interleave_low lo1 hi1 in
let hi = vec_interleave_high lo1 hi1 in
let out = load_felem5_4_compact lo hi in
load_felem5_4_interleave lo0 hi0;
assert (out == load_felem5_4 lo0 hi0);
load_felem5_4_lemma lo hi;
Hacl.Impl.Poly1305.Lemmas.uints_from_bytes_le_lemma64_4 b;
eq_intro (feval5 out) (Vec.load_elem4 b) | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [
"lemma"
] | [
"Lib.Sequence.lseq",
"Lib.IntTypes.uint8",
"Lib.Sequence.eq_intro",
"Hacl.Spec.Poly1305.Vec.pfelem",
"Hacl.Spec.Poly1305.Field32xN.feval5",
"Hacl.Spec.Poly1305.Vec.load_elem4",
"Prims.unit",
"Hacl.Impl.Poly1305.Lemmas.uints_from_bytes_le_lemma64_4",
"Hacl.Spec.Poly1305.Field32xN.Lemmas.load_felem5_4_lemma",
"Prims._assert",
"Prims.eq2",
"Hacl.Spec.Poly1305.Field32xN.felem5",
"Hacl.Spec.Poly1305.Field32xN.load_felem5_4",
"Hacl.Spec.Poly1305.Field32xN.Lemmas.load_felem5_4_interleave",
"Hacl.Spec.Poly1305.Field32xN.Lemmas.load_felem5_4_compact",
"Lib.IntVector.vec_t",
"Lib.IntTypes.U64",
"Lib.IntVector.vec_interleave_high",
"Lib.IntVector.vec_interleave_low",
"Lib.IntVector.vec_interleave_high_n",
"Lib.IntVector.vec_interleave_low_n",
"Lib.IntVector.vec_from_bytes_le",
"Lib.Sequence.sub"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w)
val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)]
let fadd5_fits_lemma #w f1 f2 =
let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24
val fadd5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (feval5 (fadd5 f1 f2) == map2 Vec.pfadd (feval5 f1) (feval5 f2))
[SMTPat (fadd5 f1 f2)]
let fadd5_eval_lemma #w f1 f2 =
let o = fadd5 f1 f2 in
FStar.Classical.forall_intro (fadd5_eval_lemma_i f1 f2);
eq_intro (feval5 o) (map2 Vec.pfadd (feval5 f1) (feval5 f2))
val mul_felem5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_wide_fits5 (mul_felem5 #w f1 r r5) (126, 102, 78, 54, 30))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_fits_lemma #w f1 r r5 =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34)
val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i])
let mul_felem5_eval_lemma_i #w f1 r r5 i =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_eval_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
assert ((fas_nat5 (a0,a1,a2,a3,a4)).[i] == (uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i]);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_eval_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
assert ((fas_nat5 (a10,a11,a12,a13,a14)).[i] == (fas_nat5 (a0,a1,a2,a3,a4)).[i] + (uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i]);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_eval_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
assert ((fas_nat5 (a20,a21,a22,a23,a24)).[i] == (fas_nat5 (a10,a11,a12,a13,a14)).[i] + (uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i]);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
assert ((fas_nat5 (a30,a31,a32,a33,a34)).[i] == (fas_nat5 (a20,a21,a22,a23,a24)).[i] + (uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i]);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] == (fas_nat5 (a30,a31,a32,a33,a34)).[i] + (uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] ==
(uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i] +
(uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i] +
(uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i] +
(uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i] +
(uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
mul_felem5_eval_as_tup64 #w f1 r r5 i;
mul_felem5_lemma (as_tup64_i f1 i) (as_tup64_i r i)
val mul_felem5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (mul_felem5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_eval_lemma #w f1 r r5 =
let tmp = map2 (Vec.pfmul) (feval5 f1) (feval5 r) in
FStar.Classical.forall_intro (mul_felem5_eval_lemma_i #w f1 r r5);
eq_intro (feval5 (mul_felem5 #w f1 r r5)) tmp
val fmul_r5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_fits5 (fmul_r5 #w f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_fits_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_fits_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fmul_r5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fmul_r5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_eval_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_eval_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_fits_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_fits_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_fits_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fadd_mul_r5_eval_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fadd_mul_r5 acc f1 r r5) ==
map2 (Vec.pfmul) (map2 (Vec.pfadd) (feval5 acc) (feval5 f1)) (feval5 r))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_eval_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_eval_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_eval_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val reduce_felem5_eval_lemma:
#w:lanes
-> f:felem5 w{felem_fits5 f (2, 2, 2, 2, 2)} ->
Lemma
(felem_fits5 (reduce_felem5 f) (1, 1, 1, 1, 1) /\
(feval5 f).[0] == (fas_nat5 (reduce_felem5 f)).[0])
[SMTPat (reduce_felem5 f)]
let reduce_felem5_eval_lemma #w f =
carry_full_felem5_eval_lemma f;
carry_full_felem5_fits_lemma f;
let f = carry_full_felem5 f in
carry_reduce_felem5_lemma #w f;
subtract_p5_felem5_lemma #w (carry_full_felem5 f)
val fmul_r2_normalize50:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures fun a ->
let fr21 = create2 (feval5 r2).[0] (feval5 r).[0] in
feval5 a == Vec.fmul (feval5 acc) fr21 /\
felem_fits5 a (1, 2, 1, 1, 2))
let fmul_r2_normalize50 (a0, a1, a2, a3, a4) (r0, r1, r2, r3, r4) (r20, r21, r22, r23, r24) =
let r210 = vec_interleave_low r20 r0 in
vec_interleave_low_lemma2 r20 r0;
let r211 = vec_interleave_low r21 r1 in
vec_interleave_low_lemma2 r21 r1;
let r212 = vec_interleave_low r22 r2 in
vec_interleave_low_lemma2 r22 r2;
let r213 = vec_interleave_low r23 r3 in
vec_interleave_low_lemma2 r23 r3;
let r214 = vec_interleave_low r24 r4 in
vec_interleave_low_lemma2 r24 r4;
let acc = (a0, a1, a2, a3, a4) in
let fr = (r0, r1, r2, r3, r4) in
let fr2 = (r20, r21, r22, r23, r24) in
assert ((feval5 fr2).[0] == Vec.pfmul ((feval5 fr).[0]) ((feval5 fr).[0]));
let fr21 = (r210, r211, r212, r213, r214) in
eq_intro (feval5 fr21) (create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (feval5 fr21 == create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (felem_fits5 fr21 (2, 2, 2, 2, 2));
let fr215 = precomp_r5 #2 fr21 in
let a = fmul_r5 #2 acc fr21 fr215 in
fmul_r5_eval_lemma acc fr21 fr215;
fmul_r5_fits_lemma acc fr21 fr215;
assert (feval5 a == Vec.fmul (feval5 acc) (feval5 fr21));
assert (felem_fits5 a (1, 2, 1, 1, 2));
a
#push-options "--z3rlimit 150"
val fmul_r2_normalize51:
a:felem5 2
-> fa1:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 a (1, 2, 1, 1, 2) /\
felem_fits5 fa1 (1, 2, 1, 1, 2) /\
feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1])
(ensures fun out ->
(feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1] /\
felem_fits5 out (2, 4, 2, 2, 4))
let fmul_r2_normalize51 a fa1 =
let (a0, a1, a2, a3, a4) = a in
let (a10, a11, a12, a13, a14) = fa1 in
let o0 = vec_add_mod a0 a10 in
let o1 = vec_add_mod a1 a11 in
let o2 = vec_add_mod a2 a12 in
let o3 = vec_add_mod a3 a13 in
let o4 = vec_add_mod a4 a14 in
let out = (o0, o1, o2, o3, o4) in
let (a0, a1, a2, a3, a4) = as_tup64_i a 0 in
let (a10, a11, a12, a13, a14) = as_tup64_i fa1 0 in
let (o0, o1, o2, o3, o4) = as_tup64_i out 0 in
FStar.Math.Lemmas.modulo_lemma (v a0 + v a10) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a1 + v a11) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a2 + v a12) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a3 + v a13) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a4 + v a14) (pow2 64);
assert (felem_fits5 out (2, 4, 2, 2, 4));
calc (==) {
((feval5 a).[0] + (feval5 a).[1]) % Vec.prime;
(==) { }
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime + as_nat5 (a10, a11, a12, a13, a14) % Vec.prime) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (a0, a1, a2, a3, a4)) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime;
FStar.Math.Lemmas.lemma_mod_plus_distr_r
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime }
(as_nat5 (a0, a1, a2, a3, a4) + as_nat5 (a10, a11, a12, a13, a14)) % Vec.prime;
(==) { }
(feval5 out).[0];
};
out
#pop-options
val fmul_r2_normalize5_lemma:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures
(let out = fmul_r2_normalize5 acc r r2 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_2 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r2_normalize5 acc r r2)]
let fmul_r2_normalize5_lemma acc r r2 =
let a = fmul_r2_normalize50 acc r r2 in
let (a0, a1, a2, a3, a4) = a in
let a10 = vec_interleave_high a0 a0 in
vec_interleave_high_lemma2 a0 a0;
let a11 = vec_interleave_high a1 a1 in
vec_interleave_high_lemma2 a1 a1;
let a12 = vec_interleave_high a2 a2 in
vec_interleave_high_lemma2 a2 a2;
let a13 = vec_interleave_high a3 a3 in
vec_interleave_high_lemma2 a3 a3;
let a14 = vec_interleave_high a4 a4 in
vec_interleave_high_lemma2 a4 a4;
let fa1 = (a10, a11, a12, a13, a14) in
eq_intro (feval5 fa1) (create2 (feval5 a).[1] (feval5 a).[1]);
assert (feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1]);
assert (felem_fits5 fa1 (1, 2, 1, 1, 2));
let out = fmul_r2_normalize51 a fa1 in
assert ((feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1]);
let res = carry_full_felem5 out in
carry_full_felem5_lemma out
val fmul_r4_normalize50:
acc:felem5 4
-> r:felem5 4
-> r2:felem5 4
-> r3:felem5 4
-> r4:felem5 4 ->
Pure (felem5 4)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
felem_fits5 r3 (2, 2, 2, 2, 2) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.fmul (feval5 r) (feval5 r) /\
feval5 r3 == Vec.fmul (feval5 r2) (feval5 r) /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures fun out ->
let fr4321 = create4 (feval5 r4).[0] (feval5 r3).[0] (feval5 r2).[0] (feval5 r).[0] in
feval5 out == Vec.fmul (feval5 acc) fr4321 /\
felem_fits5 out (1, 2, 1, 1, 2))
let fmul_r4_normalize50 acc fr fr2 fr3 fr4 =
let (r10, r11, r12, r13, r14) = fr in
let (r20, r21, r22, r23, r24) = fr2 in
let (r30, r31, r32, r33, r34) = fr3 in
let (r40, r41, r42, r43, r44) = fr4 in
let (a0, a1, a2, a3, a4) = acc in
let v12120 = vec_interleave_low r20 r10 in
vec_interleave_low_lemma_uint64_4 r20 r10;
let v34340 = vec_interleave_low r40 r30 in
vec_interleave_low_lemma_uint64_4 r40 r30;
let r12340 = vec_interleave_low_n 2 v34340 v12120 in
vec_interleave_low_n_lemma_uint64_4_2 v34340 v12120;
let v12121 = vec_interleave_low r21 r11 in
vec_interleave_low_lemma_uint64_4 r21 r11;
let v34341 = vec_interleave_low r41 r31 in
vec_interleave_low_lemma_uint64_4 r41 r31;
let r12341 = vec_interleave_low_n 2 v34341 v12121 in
vec_interleave_low_n_lemma_uint64_4_2 v34341 v12121;
let v12122 = vec_interleave_low r22 r12 in
vec_interleave_low_lemma_uint64_4 r22 r12;
let v34342 = vec_interleave_low r42 r32 in
vec_interleave_low_lemma_uint64_4 r42 r32;
let r12342 = vec_interleave_low_n 2 v34342 v12122 in
vec_interleave_low_n_lemma_uint64_4_2 v34342 v12122;
let v12123 = vec_interleave_low r23 r13 in
vec_interleave_low_lemma_uint64_4 r23 r13;
let v34343 = vec_interleave_low r43 r33 in
vec_interleave_low_lemma_uint64_4 r43 r33;
let r12343 = vec_interleave_low_n 2 v34343 v12123 in
vec_interleave_low_n_lemma_uint64_4_2 v34343 v12123;
let v12124 = vec_interleave_low r24 r14 in
vec_interleave_low_lemma_uint64_4 r24 r14;
let v34344 = vec_interleave_low r44 r34 in
vec_interleave_low_lemma_uint64_4 r44 r34;
let r12344 = vec_interleave_low_n 2 v34344 v12124 in
vec_interleave_low_n_lemma_uint64_4_2 v34344 v12124;
let fr1234 = (r12340, r12341, r12342, r12343, r12344) in
eq_intro (feval5 fr1234) (create4 (feval5 fr4).[0] (feval5 fr3).[0] (feval5 fr2).[0] (feval5 fr).[0]);
let fr12345 = precomp_r5 #4 fr1234 in
let out = fmul_r5 #4 acc fr1234 fr12345 in
fmul_r5_eval_lemma acc fr1234 fr12345;
fmul_r5_fits_lemma acc fr1234 fr12345;
out
val lemma_fmul_r4_normalize51:
#m:scale32{m <= 2}
-> o:uint64xN 4{felem_fits1 o m} ->
Lemma
(let v00 = vec_interleave_high_n 2 o o in
let v10 = vec_add_mod o v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
felem_fits1 v20 (4 * m) /\
(uint64xN_v v20).[0] == (uint64xN_v o).[0] + (uint64xN_v o).[1] + (uint64xN_v o).[2] + (uint64xN_v o).[3])
let lemma_fmul_r4_normalize51 #m o =
let v00 = vec_interleave_high_n 2 o o in
vec_interleave_high_n_lemma_uint64_4_2 o o;
let (o0, o1, o2, o3) = ((vec_v o).[0], (vec_v o).[1], (vec_v o).[2], (vec_v o).[3]) in
assert (vec_v v00 == create4 o2 o3 o2 o3);
let v10 = vec_add_mod o v00 in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v o1 + v o3) (pow2 64);
assert (v (vec_v v10).[0] == v o0 + v o2);
assert (v (vec_v v10).[1] == v o1 + v o3);
let v10h = vec_interleave_high v10 v10 in
vec_interleave_high_lemma_uint64_4 v10 v10;
assert (v (vec_v v10h).[0] == v (vec_v v10).[1]);
let v20 = vec_add_mod v10 v10h in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2 + v o1 + v o3) (pow2 64)
val lemma_fmul_r4_normalize51_expand:
v2:felem5 4
-> out:felem5 4 ->
Lemma
(requires
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(uint64xN_v v20).[0] == (uint64xN_v o0).[0] + (uint64xN_v o0).[1] + (uint64xN_v o0).[2] + (uint64xN_v o0).[3] /\
(uint64xN_v v21).[0] == (uint64xN_v o1).[0] + (uint64xN_v o1).[1] + (uint64xN_v o1).[2] + (uint64xN_v o1).[3] /\
(uint64xN_v v22).[0] == (uint64xN_v o2).[0] + (uint64xN_v o2).[1] + (uint64xN_v o2).[2] + (uint64xN_v o2).[3] /\
(uint64xN_v v23).[0] == (uint64xN_v o3).[0] + (uint64xN_v o3).[1] + (uint64xN_v o3).[2] + (uint64xN_v o3).[3] /\
(uint64xN_v v24).[0] == (uint64xN_v o4).[0] + (uint64xN_v o4).[1] + (uint64xN_v o4).[2] + (uint64xN_v o4).[3]))
(ensures
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(feval5 v2).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 out).[0] (feval5 out).[1]) (feval5 out).[2]) (feval5 out).[3]))
let lemma_fmul_r4_normalize51_expand v2 out =
let (v20, v21, v22, v23, v24) = as_tup64_i v2 0 in
let (o0, o1, o2, o3, o4) = out in
calc (==) {
as_nat5 (v20, v21, v22, v23, v24) % Vec.prime;
(==) { }
(as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1) + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1))
(as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) Vec.prime }
((as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1)) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.modulo_distributivity (as_nat5 (as_tup64_i out 0)) (as_nat5 (as_tup64_i out 1)) Vec.prime }
(((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2))
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2)) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime) (as_nat5 (as_tup64_i out 2)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r ((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime)
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime;
};
assert ((feval5 v2).[0] ==
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime)
val fmul_r4_normalize51: a:felem5 4 ->
Pure (felem5 4)
(requires felem_fits5 a (1, 2, 1, 1, 2))
(ensures fun res ->
felem_fits5 res (4, 8, 4, 4, 8) /\
(feval5 res).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 a).[0] (feval5 a).[1]) (feval5 a).[2]) (feval5 a).[3])
let fmul_r4_normalize51 fa =
let (o0, o1, o2, o3, o4) = fa in
let v00 = vec_interleave_high_n 2 o0 o0 in
let v10 = vec_add_mod o0 v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
lemma_fmul_r4_normalize51 #1 o0;
let v01 = vec_interleave_high_n 2 o1 o1 in
let v11 = vec_add_mod o1 v01 in
let v11h = vec_interleave_high v11 v11 in
let v21 = vec_add_mod v11 v11h in
lemma_fmul_r4_normalize51 #2 o1;
let v02 = vec_interleave_high_n 2 o2 o2 in
let v12 = vec_add_mod o2 v02 in
let v12h = vec_interleave_high v12 v12 in
let v22 = vec_add_mod v12 v12h in
lemma_fmul_r4_normalize51 #1 o2;
let v03 = vec_interleave_high_n 2 o3 o3 in
let v13 = vec_add_mod o3 v03 in
let v13h = vec_interleave_high v13 v13 in
let v23 = vec_add_mod v13 v13h in
lemma_fmul_r4_normalize51 #1 o3;
let v04 = vec_interleave_high_n 2 o4 o4 in
let v14 = vec_add_mod o4 v04 in
let v14h = vec_interleave_high v14 v14 in
let v24 = vec_add_mod v14 v14h in
lemma_fmul_r4_normalize51 #2 o4;
let res = (v20, v21, v22, v23, v24) in
lemma_fmul_r4_normalize51_expand res fa;
res
val fmul_r4_normalize5_lemma:
acc:felem5 4
-> r:felem5 4
-> r_5:felem5 4
-> r4:felem5 4 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
r_5 == precomp_r5 r /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures
(let out = fmul_r4_normalize5 acc r r_5 r4 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_4 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r4_normalize5 acc r r_5 r4)]
#restart-solver
#push-options "--z3rlimit 500"
let fmul_r4_normalize5_lemma acc fr fr_5 fr4 =
let fr2 = fmul_r5 #4 fr fr fr_5 in
let fr3 = fmul_r5 #4 fr2 fr fr_5 in
let out = fmul_r4_normalize50 acc fr fr2 fr3 fr4 in
let v2 = fmul_r4_normalize51 out in
let res = carry_full_felem5 v2 in
carry_full_felem5_lemma v2
#pop-options
val load_felem5_lemma:
#w:lanes
-> lo:uint64xN w
-> hi:uint64xN w ->
Lemma
(let f = load_felem5 #w lo hi in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == createi #Vec.pfelem w
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]))
let load_felem5_lemma #w lo hi =
let f = load_felem5 #w lo hi in
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 128 < Vec.prime);
let res = createi #Vec.pfelem w
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]) in
match w with
| 1 ->
load_felem5_lemma_i #w lo hi 0;
eq_intro (feval5 f) res
| 2 ->
load_felem5_lemma_i #w lo hi 0;
load_felem5_lemma_i #w lo hi 1;
eq_intro (feval5 f) res
| 4 ->
load_felem5_lemma_i #w lo hi 0;
load_felem5_lemma_i #w lo hi 1;
load_felem5_lemma_i #w lo hi 2;
load_felem5_lemma_i #w lo hi 3;
eq_intro (feval5 f) res
val load_felem5_4_interleave: lo:uint64xN 4 -> hi:uint64xN 4 -> Lemma
(let m0 = vec_interleave_low_n 2 lo hi in
let m1 = vec_interleave_high_n 2 lo hi in
let m2 = cast U64 4 (vec_shift_right (cast U128 2 m0) 48ul) in
let m3 = cast U64 4 (vec_shift_right (cast U128 2 m1) 48ul) in
let m4 = vec_interleave_high m0 m1 in
let t0 = vec_interleave_low m0 m1 in
let t3 = vec_interleave_low m2 m3 in
vec_v m4 == create4 (vec_v lo).[1] (vec_v lo).[3] (vec_v hi).[1] (vec_v hi).[3] /\
vec_v t0 == create4 (vec_v lo).[0] (vec_v lo).[2] (vec_v hi).[0] (vec_v hi).[2] /\
t3 == vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul))
let load_felem5_4_interleave lo hi =
let m0 = vec_interleave_low_n 2 lo hi in
vec_interleave_low_n_lemma_uint64_4_2 lo hi;
//assert (vec_v m0 == create4 (vec_v lo).[0] (vec_v lo).[1] (vec_v hi).[0] (vec_v hi).[1]);
let m1 = vec_interleave_high_n 2 lo hi in
vec_interleave_high_n_lemma_uint64_4_2 lo hi;
//assert (vec_v m1 == create4 (vec_v lo).[2] (vec_v lo).[3] (vec_v hi).[2] (vec_v hi).[3]);
let m4 = vec_interleave_high m0 m1 in
vec_interleave_high_lemma_uint64_4 m0 m1;
//assert (vec_v m4 == create4 (vec_v m0).[1] (vec_v m1).[1] (vec_v m0).[3] (vec_v m1).[3]);
assert (vec_v m4 == create4 (vec_v lo).[1] (vec_v lo).[3] (vec_v hi).[1] (vec_v hi).[3]);
let t0 = vec_interleave_low m0 m1 in
vec_interleave_low_lemma_uint64_4 m0 m1;
//assert (vec_v t0 == create4 (vec_v m0).[0] (vec_v m1).[0] (vec_v m0).[2] (vec_v m1).[2]);
assert (vec_v t0 == create4 (vec_v lo).[0] (vec_v lo).[2] (vec_v hi).[0] (vec_v hi).[2]);
let m2 = cast U64 4 (vec_shift_right (cast U128 2 m0) 48ul) in
vec_shift_right_uint128_small2 m0 48ul;
assert ((vec_v m2).[0] == (((vec_v lo).[0] >>. 48ul) |. ((vec_v lo).[1] <<. 16ul)));
assert ((vec_v m2).[2] == (((vec_v hi).[0] >>. 48ul) |. ((vec_v hi).[1] <<. 16ul)));
let m3 = cast U64 4 (vec_shift_right (cast U128 2 m1) 48ul) in
vec_shift_right_uint128_small2 m1 48ul;
assert ((vec_v m3).[0] == (((vec_v lo).[2] >>. 48ul) |. ((vec_v lo).[3] <<. 16ul)));
assert ((vec_v m3).[2] == (((vec_v hi).[2] >>. 48ul) |. ((vec_v hi).[3] <<. 16ul)));
let t3 = vec_interleave_low m2 m3 in
vec_interleave_low_lemma_uint64_4 m2 m3;
eq_intro (vec_v t3) (vec_v (vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul)));
vecv_extensionality t3 (vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul))
noextract
val load_felem5_4_compact: lo:uint64xN 4 -> hi:uint64xN 4 -> felem5 4
let load_felem5_4_compact lo hi =
let mask26 = mask26 4 in
let t3 = vec_or (vec_shift_right lo 48ul) (vec_shift_left hi 16ul) in
let o0 = vec_and lo mask26 in
let o1 = vec_and (vec_shift_right lo 26ul) mask26 in
let o2 = vec_and (vec_shift_right t3 4ul) mask26 in
let o3 = vec_and (vec_shift_right t3 30ul) mask26 in
let o4 = vec_shift_right hi 40ul in
(o0, o1, o2, o3, o4)
val load_felem5_4_compact_lemma_i: lo:uint64xN 4 -> hi:uint64xN 4 -> i:nat{i < 4} ->
Lemma
(let f = as_tup64_i (load_felem5_4_compact lo hi) i in
tup64_fits5 f (1, 1, 1, 1, 1) /\
as_nat5 f < pow2 128 /\
as_nat5 f % Vec.prime == (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i])
let load_felem5_4_compact_lemma_i lo hi i =
assert (as_tup64_i (load_felem5_4_compact lo hi) i == load_tup64_4_compact (vec_v lo).[i] (vec_v hi).[i]);
load_tup64_4_compact_lemma (vec_v lo).[i] (vec_v hi).[i]
val load_felem5_4_lemma: lo:uint64xN 4 -> hi:uint64xN 4 ->
Lemma
(let f = load_felem5_4_compact lo hi in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == createi #Vec.pfelem 4 (fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]))
let load_felem5_4_lemma lo hi =
let f = load_felem5_4_compact lo hi in
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 128 < Vec.prime);
let res = createi #Vec.pfelem 4
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]) in
load_felem5_4_compact_lemma_i lo hi 0;
load_felem5_4_compact_lemma_i lo hi 1;
load_felem5_4_compact_lemma_i lo hi 2;
load_felem5_4_compact_lemma_i lo hi 3;
eq_intro (feval5 f) res
val load_felem5_le: b:lseq uint8 64 -> Lemma
(let lo0 = vec_from_bytes_le U64 4 (sub b 0 32) in
let hi0 = vec_from_bytes_le U64 4 (sub b 32 32) in
let f = load_felem5_4 lo0 hi0 in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\ | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val load_felem5_le: b:lseq uint8 64 -> Lemma
(let lo0 = vec_from_bytes_le U64 4 (sub b 0 32) in
let hi0 = vec_from_bytes_le U64 4 (sub b 32 32) in
let f = load_felem5_4 lo0 hi0 in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == Vec.load_elem4 b) | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.load_felem5_le | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | b: Lib.Sequence.lseq Lib.IntTypes.uint8 64
-> FStar.Pervasives.Lemma
(ensures
(let lo0 = Lib.IntVector.vec_from_bytes_le Lib.IntTypes.U64 4 (Lib.Sequence.sub b 0 32) in
let hi0 = Lib.IntVector.vec_from_bytes_le Lib.IntTypes.U64 4 (Lib.Sequence.sub b 32 32) in
let f = Hacl.Spec.Poly1305.Field32xN.load_felem5_4 lo0 hi0 in
Hacl.Spec.Poly1305.Field32xN.felem_fits5 f (1, 1, 1, 1, 1) /\
Hacl.Spec.Poly1305.Field32xN.felem_less5 f (Prims.pow2 128) /\
Hacl.Spec.Poly1305.Field32xN.feval5 f == Hacl.Spec.Poly1305.Vec.load_elem4 b)) | {
"end_col": 42,
"end_line": 764,
"start_col": 22,
"start_line": 750
} |
FStar.Pervasives.Lemma | val load_felem5_4_lemma: lo:uint64xN 4 -> hi:uint64xN 4 ->
Lemma
(let f = load_felem5_4_compact lo hi in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == createi #Vec.pfelem 4 (fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i])) | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let load_felem5_4_lemma lo hi =
let f = load_felem5_4_compact lo hi in
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 128 < Vec.prime);
let res = createi #Vec.pfelem 4
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]) in
load_felem5_4_compact_lemma_i lo hi 0;
load_felem5_4_compact_lemma_i lo hi 1;
load_felem5_4_compact_lemma_i lo hi 2;
load_felem5_4_compact_lemma_i lo hi 3;
eq_intro (feval5 f) res | val load_felem5_4_lemma: lo:uint64xN 4 -> hi:uint64xN 4 ->
Lemma
(let f = load_felem5_4_compact lo hi in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == createi #Vec.pfelem 4 (fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]))
let load_felem5_4_lemma lo hi = | false | null | true | let f = load_felem5_4_compact lo hi in
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 128 < Vec.prime);
let res =
createi #Vec.pfelem 4 (fun i -> (uint64xN_v hi).[ i ] * pow2 64 + (uint64xN_v lo).[ i ])
in
load_felem5_4_compact_lemma_i lo hi 0;
load_felem5_4_compact_lemma_i lo hi 1;
load_felem5_4_compact_lemma_i lo hi 2;
load_felem5_4_compact_lemma_i lo hi 3;
eq_intro (feval5 f) res | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [
"lemma"
] | [
"Hacl.Spec.Poly1305.Field32xN.uint64xN",
"Lib.Sequence.eq_intro",
"Hacl.Spec.Poly1305.Vec.pfelem",
"Hacl.Spec.Poly1305.Field32xN.feval5",
"Prims.unit",
"Hacl.Spec.Poly1305.Field32xN.Lemmas.load_felem5_4_compact_lemma_i",
"Lib.Sequence.lseq",
"Prims.l_Forall",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_LessThan",
"Prims.eq2",
"Lib.Sequence.index",
"Prims.op_Addition",
"Prims.op_Multiply",
"Hacl.Spec.Poly1305.Field32xN.uint64xN_v",
"Prims.pow2",
"Lib.Sequence.createi",
"FStar.Mul.op_Star",
"Lib.Sequence.op_String_Access",
"FStar.Pervasives.assert_norm",
"Hacl.Spec.Poly1305.Vec.prime",
"Prims.op_Equality",
"Prims.int",
"Hacl.Spec.Poly1305.Field32xN.felem5",
"Hacl.Spec.Poly1305.Field32xN.Lemmas.load_felem5_4_compact"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w)
val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)]
let fadd5_fits_lemma #w f1 f2 =
let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24
val fadd5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (feval5 (fadd5 f1 f2) == map2 Vec.pfadd (feval5 f1) (feval5 f2))
[SMTPat (fadd5 f1 f2)]
let fadd5_eval_lemma #w f1 f2 =
let o = fadd5 f1 f2 in
FStar.Classical.forall_intro (fadd5_eval_lemma_i f1 f2);
eq_intro (feval5 o) (map2 Vec.pfadd (feval5 f1) (feval5 f2))
val mul_felem5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_wide_fits5 (mul_felem5 #w f1 r r5) (126, 102, 78, 54, 30))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_fits_lemma #w f1 r r5 =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34)
val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i])
let mul_felem5_eval_lemma_i #w f1 r r5 i =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_eval_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
assert ((fas_nat5 (a0,a1,a2,a3,a4)).[i] == (uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i]);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_eval_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
assert ((fas_nat5 (a10,a11,a12,a13,a14)).[i] == (fas_nat5 (a0,a1,a2,a3,a4)).[i] + (uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i]);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_eval_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
assert ((fas_nat5 (a20,a21,a22,a23,a24)).[i] == (fas_nat5 (a10,a11,a12,a13,a14)).[i] + (uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i]);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
assert ((fas_nat5 (a30,a31,a32,a33,a34)).[i] == (fas_nat5 (a20,a21,a22,a23,a24)).[i] + (uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i]);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] == (fas_nat5 (a30,a31,a32,a33,a34)).[i] + (uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] ==
(uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i] +
(uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i] +
(uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i] +
(uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i] +
(uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
mul_felem5_eval_as_tup64 #w f1 r r5 i;
mul_felem5_lemma (as_tup64_i f1 i) (as_tup64_i r i)
val mul_felem5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (mul_felem5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_eval_lemma #w f1 r r5 =
let tmp = map2 (Vec.pfmul) (feval5 f1) (feval5 r) in
FStar.Classical.forall_intro (mul_felem5_eval_lemma_i #w f1 r r5);
eq_intro (feval5 (mul_felem5 #w f1 r r5)) tmp
val fmul_r5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_fits5 (fmul_r5 #w f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_fits_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_fits_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fmul_r5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fmul_r5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_eval_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_eval_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_fits_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_fits_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_fits_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fadd_mul_r5_eval_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fadd_mul_r5 acc f1 r r5) ==
map2 (Vec.pfmul) (map2 (Vec.pfadd) (feval5 acc) (feval5 f1)) (feval5 r))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_eval_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_eval_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_eval_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val reduce_felem5_eval_lemma:
#w:lanes
-> f:felem5 w{felem_fits5 f (2, 2, 2, 2, 2)} ->
Lemma
(felem_fits5 (reduce_felem5 f) (1, 1, 1, 1, 1) /\
(feval5 f).[0] == (fas_nat5 (reduce_felem5 f)).[0])
[SMTPat (reduce_felem5 f)]
let reduce_felem5_eval_lemma #w f =
carry_full_felem5_eval_lemma f;
carry_full_felem5_fits_lemma f;
let f = carry_full_felem5 f in
carry_reduce_felem5_lemma #w f;
subtract_p5_felem5_lemma #w (carry_full_felem5 f)
val fmul_r2_normalize50:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures fun a ->
let fr21 = create2 (feval5 r2).[0] (feval5 r).[0] in
feval5 a == Vec.fmul (feval5 acc) fr21 /\
felem_fits5 a (1, 2, 1, 1, 2))
let fmul_r2_normalize50 (a0, a1, a2, a3, a4) (r0, r1, r2, r3, r4) (r20, r21, r22, r23, r24) =
let r210 = vec_interleave_low r20 r0 in
vec_interleave_low_lemma2 r20 r0;
let r211 = vec_interleave_low r21 r1 in
vec_interleave_low_lemma2 r21 r1;
let r212 = vec_interleave_low r22 r2 in
vec_interleave_low_lemma2 r22 r2;
let r213 = vec_interleave_low r23 r3 in
vec_interleave_low_lemma2 r23 r3;
let r214 = vec_interleave_low r24 r4 in
vec_interleave_low_lemma2 r24 r4;
let acc = (a0, a1, a2, a3, a4) in
let fr = (r0, r1, r2, r3, r4) in
let fr2 = (r20, r21, r22, r23, r24) in
assert ((feval5 fr2).[0] == Vec.pfmul ((feval5 fr).[0]) ((feval5 fr).[0]));
let fr21 = (r210, r211, r212, r213, r214) in
eq_intro (feval5 fr21) (create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (feval5 fr21 == create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (felem_fits5 fr21 (2, 2, 2, 2, 2));
let fr215 = precomp_r5 #2 fr21 in
let a = fmul_r5 #2 acc fr21 fr215 in
fmul_r5_eval_lemma acc fr21 fr215;
fmul_r5_fits_lemma acc fr21 fr215;
assert (feval5 a == Vec.fmul (feval5 acc) (feval5 fr21));
assert (felem_fits5 a (1, 2, 1, 1, 2));
a
#push-options "--z3rlimit 150"
val fmul_r2_normalize51:
a:felem5 2
-> fa1:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 a (1, 2, 1, 1, 2) /\
felem_fits5 fa1 (1, 2, 1, 1, 2) /\
feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1])
(ensures fun out ->
(feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1] /\
felem_fits5 out (2, 4, 2, 2, 4))
let fmul_r2_normalize51 a fa1 =
let (a0, a1, a2, a3, a4) = a in
let (a10, a11, a12, a13, a14) = fa1 in
let o0 = vec_add_mod a0 a10 in
let o1 = vec_add_mod a1 a11 in
let o2 = vec_add_mod a2 a12 in
let o3 = vec_add_mod a3 a13 in
let o4 = vec_add_mod a4 a14 in
let out = (o0, o1, o2, o3, o4) in
let (a0, a1, a2, a3, a4) = as_tup64_i a 0 in
let (a10, a11, a12, a13, a14) = as_tup64_i fa1 0 in
let (o0, o1, o2, o3, o4) = as_tup64_i out 0 in
FStar.Math.Lemmas.modulo_lemma (v a0 + v a10) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a1 + v a11) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a2 + v a12) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a3 + v a13) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a4 + v a14) (pow2 64);
assert (felem_fits5 out (2, 4, 2, 2, 4));
calc (==) {
((feval5 a).[0] + (feval5 a).[1]) % Vec.prime;
(==) { }
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime + as_nat5 (a10, a11, a12, a13, a14) % Vec.prime) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (a0, a1, a2, a3, a4)) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime;
FStar.Math.Lemmas.lemma_mod_plus_distr_r
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime }
(as_nat5 (a0, a1, a2, a3, a4) + as_nat5 (a10, a11, a12, a13, a14)) % Vec.prime;
(==) { }
(feval5 out).[0];
};
out
#pop-options
val fmul_r2_normalize5_lemma:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures
(let out = fmul_r2_normalize5 acc r r2 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_2 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r2_normalize5 acc r r2)]
let fmul_r2_normalize5_lemma acc r r2 =
let a = fmul_r2_normalize50 acc r r2 in
let (a0, a1, a2, a3, a4) = a in
let a10 = vec_interleave_high a0 a0 in
vec_interleave_high_lemma2 a0 a0;
let a11 = vec_interleave_high a1 a1 in
vec_interleave_high_lemma2 a1 a1;
let a12 = vec_interleave_high a2 a2 in
vec_interleave_high_lemma2 a2 a2;
let a13 = vec_interleave_high a3 a3 in
vec_interleave_high_lemma2 a3 a3;
let a14 = vec_interleave_high a4 a4 in
vec_interleave_high_lemma2 a4 a4;
let fa1 = (a10, a11, a12, a13, a14) in
eq_intro (feval5 fa1) (create2 (feval5 a).[1] (feval5 a).[1]);
assert (feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1]);
assert (felem_fits5 fa1 (1, 2, 1, 1, 2));
let out = fmul_r2_normalize51 a fa1 in
assert ((feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1]);
let res = carry_full_felem5 out in
carry_full_felem5_lemma out
val fmul_r4_normalize50:
acc:felem5 4
-> r:felem5 4
-> r2:felem5 4
-> r3:felem5 4
-> r4:felem5 4 ->
Pure (felem5 4)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
felem_fits5 r3 (2, 2, 2, 2, 2) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.fmul (feval5 r) (feval5 r) /\
feval5 r3 == Vec.fmul (feval5 r2) (feval5 r) /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures fun out ->
let fr4321 = create4 (feval5 r4).[0] (feval5 r3).[0] (feval5 r2).[0] (feval5 r).[0] in
feval5 out == Vec.fmul (feval5 acc) fr4321 /\
felem_fits5 out (1, 2, 1, 1, 2))
let fmul_r4_normalize50 acc fr fr2 fr3 fr4 =
let (r10, r11, r12, r13, r14) = fr in
let (r20, r21, r22, r23, r24) = fr2 in
let (r30, r31, r32, r33, r34) = fr3 in
let (r40, r41, r42, r43, r44) = fr4 in
let (a0, a1, a2, a3, a4) = acc in
let v12120 = vec_interleave_low r20 r10 in
vec_interleave_low_lemma_uint64_4 r20 r10;
let v34340 = vec_interleave_low r40 r30 in
vec_interleave_low_lemma_uint64_4 r40 r30;
let r12340 = vec_interleave_low_n 2 v34340 v12120 in
vec_interleave_low_n_lemma_uint64_4_2 v34340 v12120;
let v12121 = vec_interleave_low r21 r11 in
vec_interleave_low_lemma_uint64_4 r21 r11;
let v34341 = vec_interleave_low r41 r31 in
vec_interleave_low_lemma_uint64_4 r41 r31;
let r12341 = vec_interleave_low_n 2 v34341 v12121 in
vec_interleave_low_n_lemma_uint64_4_2 v34341 v12121;
let v12122 = vec_interleave_low r22 r12 in
vec_interleave_low_lemma_uint64_4 r22 r12;
let v34342 = vec_interleave_low r42 r32 in
vec_interleave_low_lemma_uint64_4 r42 r32;
let r12342 = vec_interleave_low_n 2 v34342 v12122 in
vec_interleave_low_n_lemma_uint64_4_2 v34342 v12122;
let v12123 = vec_interleave_low r23 r13 in
vec_interleave_low_lemma_uint64_4 r23 r13;
let v34343 = vec_interleave_low r43 r33 in
vec_interleave_low_lemma_uint64_4 r43 r33;
let r12343 = vec_interleave_low_n 2 v34343 v12123 in
vec_interleave_low_n_lemma_uint64_4_2 v34343 v12123;
let v12124 = vec_interleave_low r24 r14 in
vec_interleave_low_lemma_uint64_4 r24 r14;
let v34344 = vec_interleave_low r44 r34 in
vec_interleave_low_lemma_uint64_4 r44 r34;
let r12344 = vec_interleave_low_n 2 v34344 v12124 in
vec_interleave_low_n_lemma_uint64_4_2 v34344 v12124;
let fr1234 = (r12340, r12341, r12342, r12343, r12344) in
eq_intro (feval5 fr1234) (create4 (feval5 fr4).[0] (feval5 fr3).[0] (feval5 fr2).[0] (feval5 fr).[0]);
let fr12345 = precomp_r5 #4 fr1234 in
let out = fmul_r5 #4 acc fr1234 fr12345 in
fmul_r5_eval_lemma acc fr1234 fr12345;
fmul_r5_fits_lemma acc fr1234 fr12345;
out
val lemma_fmul_r4_normalize51:
#m:scale32{m <= 2}
-> o:uint64xN 4{felem_fits1 o m} ->
Lemma
(let v00 = vec_interleave_high_n 2 o o in
let v10 = vec_add_mod o v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
felem_fits1 v20 (4 * m) /\
(uint64xN_v v20).[0] == (uint64xN_v o).[0] + (uint64xN_v o).[1] + (uint64xN_v o).[2] + (uint64xN_v o).[3])
let lemma_fmul_r4_normalize51 #m o =
let v00 = vec_interleave_high_n 2 o o in
vec_interleave_high_n_lemma_uint64_4_2 o o;
let (o0, o1, o2, o3) = ((vec_v o).[0], (vec_v o).[1], (vec_v o).[2], (vec_v o).[3]) in
assert (vec_v v00 == create4 o2 o3 o2 o3);
let v10 = vec_add_mod o v00 in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v o1 + v o3) (pow2 64);
assert (v (vec_v v10).[0] == v o0 + v o2);
assert (v (vec_v v10).[1] == v o1 + v o3);
let v10h = vec_interleave_high v10 v10 in
vec_interleave_high_lemma_uint64_4 v10 v10;
assert (v (vec_v v10h).[0] == v (vec_v v10).[1]);
let v20 = vec_add_mod v10 v10h in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2 + v o1 + v o3) (pow2 64)
val lemma_fmul_r4_normalize51_expand:
v2:felem5 4
-> out:felem5 4 ->
Lemma
(requires
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(uint64xN_v v20).[0] == (uint64xN_v o0).[0] + (uint64xN_v o0).[1] + (uint64xN_v o0).[2] + (uint64xN_v o0).[3] /\
(uint64xN_v v21).[0] == (uint64xN_v o1).[0] + (uint64xN_v o1).[1] + (uint64xN_v o1).[2] + (uint64xN_v o1).[3] /\
(uint64xN_v v22).[0] == (uint64xN_v o2).[0] + (uint64xN_v o2).[1] + (uint64xN_v o2).[2] + (uint64xN_v o2).[3] /\
(uint64xN_v v23).[0] == (uint64xN_v o3).[0] + (uint64xN_v o3).[1] + (uint64xN_v o3).[2] + (uint64xN_v o3).[3] /\
(uint64xN_v v24).[0] == (uint64xN_v o4).[0] + (uint64xN_v o4).[1] + (uint64xN_v o4).[2] + (uint64xN_v o4).[3]))
(ensures
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(feval5 v2).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 out).[0] (feval5 out).[1]) (feval5 out).[2]) (feval5 out).[3]))
let lemma_fmul_r4_normalize51_expand v2 out =
let (v20, v21, v22, v23, v24) = as_tup64_i v2 0 in
let (o0, o1, o2, o3, o4) = out in
calc (==) {
as_nat5 (v20, v21, v22, v23, v24) % Vec.prime;
(==) { }
(as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1) + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1))
(as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) Vec.prime }
((as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1)) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.modulo_distributivity (as_nat5 (as_tup64_i out 0)) (as_nat5 (as_tup64_i out 1)) Vec.prime }
(((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2))
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2)) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime) (as_nat5 (as_tup64_i out 2)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r ((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime)
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime;
};
assert ((feval5 v2).[0] ==
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime)
val fmul_r4_normalize51: a:felem5 4 ->
Pure (felem5 4)
(requires felem_fits5 a (1, 2, 1, 1, 2))
(ensures fun res ->
felem_fits5 res (4, 8, 4, 4, 8) /\
(feval5 res).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 a).[0] (feval5 a).[1]) (feval5 a).[2]) (feval5 a).[3])
let fmul_r4_normalize51 fa =
let (o0, o1, o2, o3, o4) = fa in
let v00 = vec_interleave_high_n 2 o0 o0 in
let v10 = vec_add_mod o0 v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
lemma_fmul_r4_normalize51 #1 o0;
let v01 = vec_interleave_high_n 2 o1 o1 in
let v11 = vec_add_mod o1 v01 in
let v11h = vec_interleave_high v11 v11 in
let v21 = vec_add_mod v11 v11h in
lemma_fmul_r4_normalize51 #2 o1;
let v02 = vec_interleave_high_n 2 o2 o2 in
let v12 = vec_add_mod o2 v02 in
let v12h = vec_interleave_high v12 v12 in
let v22 = vec_add_mod v12 v12h in
lemma_fmul_r4_normalize51 #1 o2;
let v03 = vec_interleave_high_n 2 o3 o3 in
let v13 = vec_add_mod o3 v03 in
let v13h = vec_interleave_high v13 v13 in
let v23 = vec_add_mod v13 v13h in
lemma_fmul_r4_normalize51 #1 o3;
let v04 = vec_interleave_high_n 2 o4 o4 in
let v14 = vec_add_mod o4 v04 in
let v14h = vec_interleave_high v14 v14 in
let v24 = vec_add_mod v14 v14h in
lemma_fmul_r4_normalize51 #2 o4;
let res = (v20, v21, v22, v23, v24) in
lemma_fmul_r4_normalize51_expand res fa;
res
val fmul_r4_normalize5_lemma:
acc:felem5 4
-> r:felem5 4
-> r_5:felem5 4
-> r4:felem5 4 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
r_5 == precomp_r5 r /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures
(let out = fmul_r4_normalize5 acc r r_5 r4 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_4 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r4_normalize5 acc r r_5 r4)]
#restart-solver
#push-options "--z3rlimit 500"
let fmul_r4_normalize5_lemma acc fr fr_5 fr4 =
let fr2 = fmul_r5 #4 fr fr fr_5 in
let fr3 = fmul_r5 #4 fr2 fr fr_5 in
let out = fmul_r4_normalize50 acc fr fr2 fr3 fr4 in
let v2 = fmul_r4_normalize51 out in
let res = carry_full_felem5 v2 in
carry_full_felem5_lemma v2
#pop-options
val load_felem5_lemma:
#w:lanes
-> lo:uint64xN w
-> hi:uint64xN w ->
Lemma
(let f = load_felem5 #w lo hi in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == createi #Vec.pfelem w
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]))
let load_felem5_lemma #w lo hi =
let f = load_felem5 #w lo hi in
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 128 < Vec.prime);
let res = createi #Vec.pfelem w
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]) in
match w with
| 1 ->
load_felem5_lemma_i #w lo hi 0;
eq_intro (feval5 f) res
| 2 ->
load_felem5_lemma_i #w lo hi 0;
load_felem5_lemma_i #w lo hi 1;
eq_intro (feval5 f) res
| 4 ->
load_felem5_lemma_i #w lo hi 0;
load_felem5_lemma_i #w lo hi 1;
load_felem5_lemma_i #w lo hi 2;
load_felem5_lemma_i #w lo hi 3;
eq_intro (feval5 f) res
val load_felem5_4_interleave: lo:uint64xN 4 -> hi:uint64xN 4 -> Lemma
(let m0 = vec_interleave_low_n 2 lo hi in
let m1 = vec_interleave_high_n 2 lo hi in
let m2 = cast U64 4 (vec_shift_right (cast U128 2 m0) 48ul) in
let m3 = cast U64 4 (vec_shift_right (cast U128 2 m1) 48ul) in
let m4 = vec_interleave_high m0 m1 in
let t0 = vec_interleave_low m0 m1 in
let t3 = vec_interleave_low m2 m3 in
vec_v m4 == create4 (vec_v lo).[1] (vec_v lo).[3] (vec_v hi).[1] (vec_v hi).[3] /\
vec_v t0 == create4 (vec_v lo).[0] (vec_v lo).[2] (vec_v hi).[0] (vec_v hi).[2] /\
t3 == vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul))
let load_felem5_4_interleave lo hi =
let m0 = vec_interleave_low_n 2 lo hi in
vec_interleave_low_n_lemma_uint64_4_2 lo hi;
//assert (vec_v m0 == create4 (vec_v lo).[0] (vec_v lo).[1] (vec_v hi).[0] (vec_v hi).[1]);
let m1 = vec_interleave_high_n 2 lo hi in
vec_interleave_high_n_lemma_uint64_4_2 lo hi;
//assert (vec_v m1 == create4 (vec_v lo).[2] (vec_v lo).[3] (vec_v hi).[2] (vec_v hi).[3]);
let m4 = vec_interleave_high m0 m1 in
vec_interleave_high_lemma_uint64_4 m0 m1;
//assert (vec_v m4 == create4 (vec_v m0).[1] (vec_v m1).[1] (vec_v m0).[3] (vec_v m1).[3]);
assert (vec_v m4 == create4 (vec_v lo).[1] (vec_v lo).[3] (vec_v hi).[1] (vec_v hi).[3]);
let t0 = vec_interleave_low m0 m1 in
vec_interleave_low_lemma_uint64_4 m0 m1;
//assert (vec_v t0 == create4 (vec_v m0).[0] (vec_v m1).[0] (vec_v m0).[2] (vec_v m1).[2]);
assert (vec_v t0 == create4 (vec_v lo).[0] (vec_v lo).[2] (vec_v hi).[0] (vec_v hi).[2]);
let m2 = cast U64 4 (vec_shift_right (cast U128 2 m0) 48ul) in
vec_shift_right_uint128_small2 m0 48ul;
assert ((vec_v m2).[0] == (((vec_v lo).[0] >>. 48ul) |. ((vec_v lo).[1] <<. 16ul)));
assert ((vec_v m2).[2] == (((vec_v hi).[0] >>. 48ul) |. ((vec_v hi).[1] <<. 16ul)));
let m3 = cast U64 4 (vec_shift_right (cast U128 2 m1) 48ul) in
vec_shift_right_uint128_small2 m1 48ul;
assert ((vec_v m3).[0] == (((vec_v lo).[2] >>. 48ul) |. ((vec_v lo).[3] <<. 16ul)));
assert ((vec_v m3).[2] == (((vec_v hi).[2] >>. 48ul) |. ((vec_v hi).[3] <<. 16ul)));
let t3 = vec_interleave_low m2 m3 in
vec_interleave_low_lemma_uint64_4 m2 m3;
eq_intro (vec_v t3) (vec_v (vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul)));
vecv_extensionality t3 (vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul))
noextract
val load_felem5_4_compact: lo:uint64xN 4 -> hi:uint64xN 4 -> felem5 4
let load_felem5_4_compact lo hi =
let mask26 = mask26 4 in
let t3 = vec_or (vec_shift_right lo 48ul) (vec_shift_left hi 16ul) in
let o0 = vec_and lo mask26 in
let o1 = vec_and (vec_shift_right lo 26ul) mask26 in
let o2 = vec_and (vec_shift_right t3 4ul) mask26 in
let o3 = vec_and (vec_shift_right t3 30ul) mask26 in
let o4 = vec_shift_right hi 40ul in
(o0, o1, o2, o3, o4)
val load_felem5_4_compact_lemma_i: lo:uint64xN 4 -> hi:uint64xN 4 -> i:nat{i < 4} ->
Lemma
(let f = as_tup64_i (load_felem5_4_compact lo hi) i in
tup64_fits5 f (1, 1, 1, 1, 1) /\
as_nat5 f < pow2 128 /\
as_nat5 f % Vec.prime == (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i])
let load_felem5_4_compact_lemma_i lo hi i =
assert (as_tup64_i (load_felem5_4_compact lo hi) i == load_tup64_4_compact (vec_v lo).[i] (vec_v hi).[i]);
load_tup64_4_compact_lemma (vec_v lo).[i] (vec_v hi).[i]
val load_felem5_4_lemma: lo:uint64xN 4 -> hi:uint64xN 4 ->
Lemma
(let f = load_felem5_4_compact lo hi in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == createi #Vec.pfelem 4 (fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i])) | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val load_felem5_4_lemma: lo:uint64xN 4 -> hi:uint64xN 4 ->
Lemma
(let f = load_felem5_4_compact lo hi in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == createi #Vec.pfelem 4 (fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i])) | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.load_felem5_4_lemma | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | lo: Hacl.Spec.Poly1305.Field32xN.uint64xN 4 -> hi: Hacl.Spec.Poly1305.Field32xN.uint64xN 4
-> FStar.Pervasives.Lemma
(ensures
(let f = Hacl.Spec.Poly1305.Field32xN.Lemmas.load_felem5_4_compact lo hi in
Hacl.Spec.Poly1305.Field32xN.felem_fits5 f (1, 1, 1, 1, 1) /\
Hacl.Spec.Poly1305.Field32xN.felem_less5 f (Prims.pow2 128) /\
Hacl.Spec.Poly1305.Field32xN.feval5 f ==
Lib.Sequence.createi 4
(fun i ->
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v hi).[ i ] * Prims.pow2 64 +
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v lo).[ i ]))) | {
"end_col": 25,
"end_line": 741,
"start_col": 31,
"start_line": 730
} |
FStar.Pervasives.Lemma | val fmul_r2_normalize5_lemma:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures
(let out = fmul_r2_normalize5 acc r r2 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_2 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r2_normalize5 acc r r2)] | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let fmul_r2_normalize5_lemma acc r r2 =
let a = fmul_r2_normalize50 acc r r2 in
let (a0, a1, a2, a3, a4) = a in
let a10 = vec_interleave_high a0 a0 in
vec_interleave_high_lemma2 a0 a0;
let a11 = vec_interleave_high a1 a1 in
vec_interleave_high_lemma2 a1 a1;
let a12 = vec_interleave_high a2 a2 in
vec_interleave_high_lemma2 a2 a2;
let a13 = vec_interleave_high a3 a3 in
vec_interleave_high_lemma2 a3 a3;
let a14 = vec_interleave_high a4 a4 in
vec_interleave_high_lemma2 a4 a4;
let fa1 = (a10, a11, a12, a13, a14) in
eq_intro (feval5 fa1) (create2 (feval5 a).[1] (feval5 a).[1]);
assert (feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1]);
assert (felem_fits5 fa1 (1, 2, 1, 1, 2));
let out = fmul_r2_normalize51 a fa1 in
assert ((feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1]);
let res = carry_full_felem5 out in
carry_full_felem5_lemma out | val fmul_r2_normalize5_lemma:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures
(let out = fmul_r2_normalize5 acc r r2 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_2 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r2_normalize5 acc r r2)]
let fmul_r2_normalize5_lemma acc r r2 = | false | null | true | let a = fmul_r2_normalize50 acc r r2 in
let a0, a1, a2, a3, a4 = a in
let a10 = vec_interleave_high a0 a0 in
vec_interleave_high_lemma2 a0 a0;
let a11 = vec_interleave_high a1 a1 in
vec_interleave_high_lemma2 a1 a1;
let a12 = vec_interleave_high a2 a2 in
vec_interleave_high_lemma2 a2 a2;
let a13 = vec_interleave_high a3 a3 in
vec_interleave_high_lemma2 a3 a3;
let a14 = vec_interleave_high a4 a4 in
vec_interleave_high_lemma2 a4 a4;
let fa1 = (a10, a11, a12, a13, a14) in
eq_intro (feval5 fa1) (create2 (feval5 a).[ 1 ] (feval5 a).[ 1 ]);
assert (feval5 fa1 == create2 (feval5 a).[ 1 ] (feval5 a).[ 1 ]);
assert (felem_fits5 fa1 (1, 2, 1, 1, 2));
let out = fmul_r2_normalize51 a fa1 in
assert ((feval5 out).[ 0 ] == Vec.pfadd (feval5 a).[ 0 ] (feval5 a).[ 1 ]);
let res = carry_full_felem5 out in
carry_full_felem5_lemma out | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [
"lemma"
] | [
"Hacl.Spec.Poly1305.Field32xN.felem5",
"Hacl.Spec.Poly1305.Field32xN.uint64xN",
"Hacl.Poly1305.Field32xN.Lemmas1.carry_full_felem5_lemma",
"Hacl.Spec.Poly1305.Field32xN.carry_full_felem5",
"Prims.unit",
"Prims._assert",
"Prims.eq2",
"Hacl.Spec.Poly1305.Vec.pfelem",
"Lib.Sequence.op_String_Access",
"Hacl.Spec.Poly1305.Field32xN.feval5",
"Hacl.Spec.Poly1305.Vec.pfadd",
"Hacl.Spec.Poly1305.Field32xN.Lemmas.fmul_r2_normalize51",
"Hacl.Spec.Poly1305.Field32xN.felem_fits5",
"FStar.Pervasives.Native.Mktuple5",
"Prims.nat",
"Lib.Sequence.lseq",
"Lib.Sequence.create2",
"Lib.Sequence.eq_intro",
"FStar.Pervasives.Native.tuple5",
"Lib.IntVector.vec_interleave_high_lemma2",
"Lib.IntTypes.U64",
"Lib.IntVector.vec_t",
"Lib.IntVector.vec_interleave_high",
"Hacl.Spec.Poly1305.Field32xN.Lemmas.fmul_r2_normalize50"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w)
val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)]
let fadd5_fits_lemma #w f1 f2 =
let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24
val fadd5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (feval5 (fadd5 f1 f2) == map2 Vec.pfadd (feval5 f1) (feval5 f2))
[SMTPat (fadd5 f1 f2)]
let fadd5_eval_lemma #w f1 f2 =
let o = fadd5 f1 f2 in
FStar.Classical.forall_intro (fadd5_eval_lemma_i f1 f2);
eq_intro (feval5 o) (map2 Vec.pfadd (feval5 f1) (feval5 f2))
val mul_felem5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_wide_fits5 (mul_felem5 #w f1 r r5) (126, 102, 78, 54, 30))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_fits_lemma #w f1 r r5 =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34)
val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i])
let mul_felem5_eval_lemma_i #w f1 r r5 i =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_eval_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
assert ((fas_nat5 (a0,a1,a2,a3,a4)).[i] == (uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i]);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_eval_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
assert ((fas_nat5 (a10,a11,a12,a13,a14)).[i] == (fas_nat5 (a0,a1,a2,a3,a4)).[i] + (uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i]);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_eval_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
assert ((fas_nat5 (a20,a21,a22,a23,a24)).[i] == (fas_nat5 (a10,a11,a12,a13,a14)).[i] + (uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i]);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
assert ((fas_nat5 (a30,a31,a32,a33,a34)).[i] == (fas_nat5 (a20,a21,a22,a23,a24)).[i] + (uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i]);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] == (fas_nat5 (a30,a31,a32,a33,a34)).[i] + (uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] ==
(uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i] +
(uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i] +
(uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i] +
(uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i] +
(uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
mul_felem5_eval_as_tup64 #w f1 r r5 i;
mul_felem5_lemma (as_tup64_i f1 i) (as_tup64_i r i)
val mul_felem5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (mul_felem5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_eval_lemma #w f1 r r5 =
let tmp = map2 (Vec.pfmul) (feval5 f1) (feval5 r) in
FStar.Classical.forall_intro (mul_felem5_eval_lemma_i #w f1 r r5);
eq_intro (feval5 (mul_felem5 #w f1 r r5)) tmp
val fmul_r5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_fits5 (fmul_r5 #w f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_fits_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_fits_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fmul_r5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fmul_r5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_eval_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_eval_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_fits_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_fits_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_fits_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fadd_mul_r5_eval_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fadd_mul_r5 acc f1 r r5) ==
map2 (Vec.pfmul) (map2 (Vec.pfadd) (feval5 acc) (feval5 f1)) (feval5 r))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_eval_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_eval_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_eval_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val reduce_felem5_eval_lemma:
#w:lanes
-> f:felem5 w{felem_fits5 f (2, 2, 2, 2, 2)} ->
Lemma
(felem_fits5 (reduce_felem5 f) (1, 1, 1, 1, 1) /\
(feval5 f).[0] == (fas_nat5 (reduce_felem5 f)).[0])
[SMTPat (reduce_felem5 f)]
let reduce_felem5_eval_lemma #w f =
carry_full_felem5_eval_lemma f;
carry_full_felem5_fits_lemma f;
let f = carry_full_felem5 f in
carry_reduce_felem5_lemma #w f;
subtract_p5_felem5_lemma #w (carry_full_felem5 f)
val fmul_r2_normalize50:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures fun a ->
let fr21 = create2 (feval5 r2).[0] (feval5 r).[0] in
feval5 a == Vec.fmul (feval5 acc) fr21 /\
felem_fits5 a (1, 2, 1, 1, 2))
let fmul_r2_normalize50 (a0, a1, a2, a3, a4) (r0, r1, r2, r3, r4) (r20, r21, r22, r23, r24) =
let r210 = vec_interleave_low r20 r0 in
vec_interleave_low_lemma2 r20 r0;
let r211 = vec_interleave_low r21 r1 in
vec_interleave_low_lemma2 r21 r1;
let r212 = vec_interleave_low r22 r2 in
vec_interleave_low_lemma2 r22 r2;
let r213 = vec_interleave_low r23 r3 in
vec_interleave_low_lemma2 r23 r3;
let r214 = vec_interleave_low r24 r4 in
vec_interleave_low_lemma2 r24 r4;
let acc = (a0, a1, a2, a3, a4) in
let fr = (r0, r1, r2, r3, r4) in
let fr2 = (r20, r21, r22, r23, r24) in
assert ((feval5 fr2).[0] == Vec.pfmul ((feval5 fr).[0]) ((feval5 fr).[0]));
let fr21 = (r210, r211, r212, r213, r214) in
eq_intro (feval5 fr21) (create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (feval5 fr21 == create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (felem_fits5 fr21 (2, 2, 2, 2, 2));
let fr215 = precomp_r5 #2 fr21 in
let a = fmul_r5 #2 acc fr21 fr215 in
fmul_r5_eval_lemma acc fr21 fr215;
fmul_r5_fits_lemma acc fr21 fr215;
assert (feval5 a == Vec.fmul (feval5 acc) (feval5 fr21));
assert (felem_fits5 a (1, 2, 1, 1, 2));
a
#push-options "--z3rlimit 150"
val fmul_r2_normalize51:
a:felem5 2
-> fa1:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 a (1, 2, 1, 1, 2) /\
felem_fits5 fa1 (1, 2, 1, 1, 2) /\
feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1])
(ensures fun out ->
(feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1] /\
felem_fits5 out (2, 4, 2, 2, 4))
let fmul_r2_normalize51 a fa1 =
let (a0, a1, a2, a3, a4) = a in
let (a10, a11, a12, a13, a14) = fa1 in
let o0 = vec_add_mod a0 a10 in
let o1 = vec_add_mod a1 a11 in
let o2 = vec_add_mod a2 a12 in
let o3 = vec_add_mod a3 a13 in
let o4 = vec_add_mod a4 a14 in
let out = (o0, o1, o2, o3, o4) in
let (a0, a1, a2, a3, a4) = as_tup64_i a 0 in
let (a10, a11, a12, a13, a14) = as_tup64_i fa1 0 in
let (o0, o1, o2, o3, o4) = as_tup64_i out 0 in
FStar.Math.Lemmas.modulo_lemma (v a0 + v a10) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a1 + v a11) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a2 + v a12) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a3 + v a13) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a4 + v a14) (pow2 64);
assert (felem_fits5 out (2, 4, 2, 2, 4));
calc (==) {
((feval5 a).[0] + (feval5 a).[1]) % Vec.prime;
(==) { }
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime + as_nat5 (a10, a11, a12, a13, a14) % Vec.prime) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (a0, a1, a2, a3, a4)) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime;
FStar.Math.Lemmas.lemma_mod_plus_distr_r
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime }
(as_nat5 (a0, a1, a2, a3, a4) + as_nat5 (a10, a11, a12, a13, a14)) % Vec.prime;
(==) { }
(feval5 out).[0];
};
out
#pop-options
val fmul_r2_normalize5_lemma:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures
(let out = fmul_r2_normalize5 acc r r2 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_2 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r2_normalize5 acc r r2)] | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val fmul_r2_normalize5_lemma:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures
(let out = fmul_r2_normalize5 acc r r2 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_2 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r2_normalize5 acc r r2)] | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.fmul_r2_normalize5_lemma | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
acc: Hacl.Spec.Poly1305.Field32xN.felem5 2 ->
r: Hacl.Spec.Poly1305.Field32xN.felem5 2 ->
r2: Hacl.Spec.Poly1305.Field32xN.felem5 2
-> FStar.Pervasives.Lemma
(requires
Hacl.Spec.Poly1305.Field32xN.felem_fits5 acc (3, 3, 3, 3, 3) /\
Hacl.Spec.Poly1305.Field32xN.felem_fits5 r (1, 1, 1, 1, 1) /\
Hacl.Spec.Poly1305.Field32xN.felem_fits5 r2 (2, 2, 2, 2, 2) /\
Hacl.Spec.Poly1305.Field32xN.feval5 r2 ==
Hacl.Spec.Poly1305.Vec.compute_r2 (Hacl.Spec.Poly1305.Field32xN.feval5 r).[ 0 ])
(ensures
(let out = Hacl.Spec.Poly1305.Field32xN.fmul_r2_normalize5 acc r r2 in
Hacl.Spec.Poly1305.Field32xN.felem_fits5 out (2, 1, 1, 1, 1) /\
(Hacl.Spec.Poly1305.Field32xN.feval5 out).[ 0 ] ==
Hacl.Spec.Poly1305.Vec.normalize_2 (Hacl.Spec.Poly1305.Field32xN.feval5 r).[ 0 ]
(Hacl.Spec.Poly1305.Field32xN.feval5 acc)))
[SMTPat (Hacl.Spec.Poly1305.Field32xN.fmul_r2_normalize5 acc r r2)] | {
"end_col": 29,
"end_line": 404,
"start_col": 39,
"start_line": 381
} |
Prims.Pure | val fmul_r2_normalize51:
a:felem5 2
-> fa1:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 a (1, 2, 1, 1, 2) /\
felem_fits5 fa1 (1, 2, 1, 1, 2) /\
feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1])
(ensures fun out ->
(feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1] /\
felem_fits5 out (2, 4, 2, 2, 4)) | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let fmul_r2_normalize51 a fa1 =
let (a0, a1, a2, a3, a4) = a in
let (a10, a11, a12, a13, a14) = fa1 in
let o0 = vec_add_mod a0 a10 in
let o1 = vec_add_mod a1 a11 in
let o2 = vec_add_mod a2 a12 in
let o3 = vec_add_mod a3 a13 in
let o4 = vec_add_mod a4 a14 in
let out = (o0, o1, o2, o3, o4) in
let (a0, a1, a2, a3, a4) = as_tup64_i a 0 in
let (a10, a11, a12, a13, a14) = as_tup64_i fa1 0 in
let (o0, o1, o2, o3, o4) = as_tup64_i out 0 in
FStar.Math.Lemmas.modulo_lemma (v a0 + v a10) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a1 + v a11) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a2 + v a12) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a3 + v a13) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a4 + v a14) (pow2 64);
assert (felem_fits5 out (2, 4, 2, 2, 4));
calc (==) {
((feval5 a).[0] + (feval5 a).[1]) % Vec.prime;
(==) { }
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime + as_nat5 (a10, a11, a12, a13, a14) % Vec.prime) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (a0, a1, a2, a3, a4)) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime;
FStar.Math.Lemmas.lemma_mod_plus_distr_r
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime }
(as_nat5 (a0, a1, a2, a3, a4) + as_nat5 (a10, a11, a12, a13, a14)) % Vec.prime;
(==) { }
(feval5 out).[0];
};
out | val fmul_r2_normalize51:
a:felem5 2
-> fa1:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 a (1, 2, 1, 1, 2) /\
felem_fits5 fa1 (1, 2, 1, 1, 2) /\
feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1])
(ensures fun out ->
(feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1] /\
felem_fits5 out (2, 4, 2, 2, 4))
let fmul_r2_normalize51 a fa1 = | false | null | false | let a0, a1, a2, a3, a4 = a in
let a10, a11, a12, a13, a14 = fa1 in
let o0 = vec_add_mod a0 a10 in
let o1 = vec_add_mod a1 a11 in
let o2 = vec_add_mod a2 a12 in
let o3 = vec_add_mod a3 a13 in
let o4 = vec_add_mod a4 a14 in
let out = (o0, o1, o2, o3, o4) in
let a0, a1, a2, a3, a4 = as_tup64_i a 0 in
let a10, a11, a12, a13, a14 = as_tup64_i fa1 0 in
let o0, o1, o2, o3, o4 = as_tup64_i out 0 in
FStar.Math.Lemmas.modulo_lemma (v a0 + v a10) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a1 + v a11) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a2 + v a12) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a3 + v a13) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a4 + v a14) (pow2 64);
assert (felem_fits5 out (2, 4, 2, 2, 4));
calc ( == ) {
((feval5 a).[ 0 ] + (feval5 a).[ 1 ]) % Vec.prime;
( == ) { () }
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime + as_nat5 (a10, a11, a12, a13, a14) % Vec.prime) %
Vec.prime;
( == ) { (FStar.Math.Lemmas.lemma_mod_plus_distr_l (as_nat5 (a0, a1, a2, a3, a4))
(as_nat5 (a10, a11, a12, a13, a14))
Vec.prime;
FStar.Math.Lemmas.lemma_mod_plus_distr_r (as_nat5 (a0, a1, a2, a3, a4) % Vec.prime)
(as_nat5 (a10, a11, a12, a13, a14))
Vec.prime) }
(as_nat5 (a0, a1, a2, a3, a4) + as_nat5 (a10, a11, a12, a13, a14)) % Vec.prime;
( == ) { () }
(feval5 out).[ 0 ];
};
out | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [] | [
"Hacl.Spec.Poly1305.Field32xN.felem5",
"Hacl.Spec.Poly1305.Field32xN.uint64xN",
"Lib.IntTypes.uint64",
"Prims.unit",
"FStar.Calc.calc_finish",
"Prims.int",
"Prims.eq2",
"Prims.op_Modulus",
"Prims.op_Addition",
"Lib.Sequence.op_String_Access",
"Hacl.Spec.Poly1305.Vec.pfelem",
"Hacl.Spec.Poly1305.Field32xN.feval5",
"Hacl.Spec.Poly1305.Vec.prime",
"Prims.Cons",
"FStar.Preorder.relation",
"Prims.Nil",
"FStar.Calc.calc_step",
"Hacl.Spec.Poly1305.Field32xN.as_nat5",
"FStar.Pervasives.Native.Mktuple5",
"FStar.Calc.calc_init",
"FStar.Calc.calc_pack",
"Prims.squash",
"FStar.Math.Lemmas.lemma_mod_plus_distr_r",
"FStar.Math.Lemmas.lemma_mod_plus_distr_l",
"Prims._assert",
"Hacl.Spec.Poly1305.Field32xN.felem_fits5",
"Prims.nat",
"FStar.Math.Lemmas.modulo_lemma",
"Lib.IntTypes.v",
"Lib.IntTypes.U64",
"Lib.IntTypes.SEC",
"Prims.pow2",
"Hacl.Spec.Poly1305.Field32xN.tup64_5",
"Hacl.Spec.Poly1305.Field32xN.as_tup64_i",
"FStar.Pervasives.Native.tuple5",
"Lib.IntVector.vec_t",
"Lib.IntVector.vec_add_mod"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w)
val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)]
let fadd5_fits_lemma #w f1 f2 =
let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24
val fadd5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (feval5 (fadd5 f1 f2) == map2 Vec.pfadd (feval5 f1) (feval5 f2))
[SMTPat (fadd5 f1 f2)]
let fadd5_eval_lemma #w f1 f2 =
let o = fadd5 f1 f2 in
FStar.Classical.forall_intro (fadd5_eval_lemma_i f1 f2);
eq_intro (feval5 o) (map2 Vec.pfadd (feval5 f1) (feval5 f2))
val mul_felem5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_wide_fits5 (mul_felem5 #w f1 r r5) (126, 102, 78, 54, 30))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_fits_lemma #w f1 r r5 =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34)
val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i])
let mul_felem5_eval_lemma_i #w f1 r r5 i =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_eval_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
assert ((fas_nat5 (a0,a1,a2,a3,a4)).[i] == (uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i]);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_eval_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
assert ((fas_nat5 (a10,a11,a12,a13,a14)).[i] == (fas_nat5 (a0,a1,a2,a3,a4)).[i] + (uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i]);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_eval_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
assert ((fas_nat5 (a20,a21,a22,a23,a24)).[i] == (fas_nat5 (a10,a11,a12,a13,a14)).[i] + (uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i]);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
assert ((fas_nat5 (a30,a31,a32,a33,a34)).[i] == (fas_nat5 (a20,a21,a22,a23,a24)).[i] + (uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i]);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] == (fas_nat5 (a30,a31,a32,a33,a34)).[i] + (uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] ==
(uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i] +
(uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i] +
(uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i] +
(uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i] +
(uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
mul_felem5_eval_as_tup64 #w f1 r r5 i;
mul_felem5_lemma (as_tup64_i f1 i) (as_tup64_i r i)
val mul_felem5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (mul_felem5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_eval_lemma #w f1 r r5 =
let tmp = map2 (Vec.pfmul) (feval5 f1) (feval5 r) in
FStar.Classical.forall_intro (mul_felem5_eval_lemma_i #w f1 r r5);
eq_intro (feval5 (mul_felem5 #w f1 r r5)) tmp
val fmul_r5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_fits5 (fmul_r5 #w f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_fits_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_fits_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fmul_r5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fmul_r5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_eval_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_eval_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_fits_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_fits_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_fits_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fadd_mul_r5_eval_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fadd_mul_r5 acc f1 r r5) ==
map2 (Vec.pfmul) (map2 (Vec.pfadd) (feval5 acc) (feval5 f1)) (feval5 r))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_eval_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_eval_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_eval_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val reduce_felem5_eval_lemma:
#w:lanes
-> f:felem5 w{felem_fits5 f (2, 2, 2, 2, 2)} ->
Lemma
(felem_fits5 (reduce_felem5 f) (1, 1, 1, 1, 1) /\
(feval5 f).[0] == (fas_nat5 (reduce_felem5 f)).[0])
[SMTPat (reduce_felem5 f)]
let reduce_felem5_eval_lemma #w f =
carry_full_felem5_eval_lemma f;
carry_full_felem5_fits_lemma f;
let f = carry_full_felem5 f in
carry_reduce_felem5_lemma #w f;
subtract_p5_felem5_lemma #w (carry_full_felem5 f)
val fmul_r2_normalize50:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures fun a ->
let fr21 = create2 (feval5 r2).[0] (feval5 r).[0] in
feval5 a == Vec.fmul (feval5 acc) fr21 /\
felem_fits5 a (1, 2, 1, 1, 2))
let fmul_r2_normalize50 (a0, a1, a2, a3, a4) (r0, r1, r2, r3, r4) (r20, r21, r22, r23, r24) =
let r210 = vec_interleave_low r20 r0 in
vec_interleave_low_lemma2 r20 r0;
let r211 = vec_interleave_low r21 r1 in
vec_interleave_low_lemma2 r21 r1;
let r212 = vec_interleave_low r22 r2 in
vec_interleave_low_lemma2 r22 r2;
let r213 = vec_interleave_low r23 r3 in
vec_interleave_low_lemma2 r23 r3;
let r214 = vec_interleave_low r24 r4 in
vec_interleave_low_lemma2 r24 r4;
let acc = (a0, a1, a2, a3, a4) in
let fr = (r0, r1, r2, r3, r4) in
let fr2 = (r20, r21, r22, r23, r24) in
assert ((feval5 fr2).[0] == Vec.pfmul ((feval5 fr).[0]) ((feval5 fr).[0]));
let fr21 = (r210, r211, r212, r213, r214) in
eq_intro (feval5 fr21) (create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (feval5 fr21 == create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (felem_fits5 fr21 (2, 2, 2, 2, 2));
let fr215 = precomp_r5 #2 fr21 in
let a = fmul_r5 #2 acc fr21 fr215 in
fmul_r5_eval_lemma acc fr21 fr215;
fmul_r5_fits_lemma acc fr21 fr215;
assert (feval5 a == Vec.fmul (feval5 acc) (feval5 fr21));
assert (felem_fits5 a (1, 2, 1, 1, 2));
a
#push-options "--z3rlimit 150"
val fmul_r2_normalize51:
a:felem5 2
-> fa1:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 a (1, 2, 1, 1, 2) /\
felem_fits5 fa1 (1, 2, 1, 1, 2) /\
feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1])
(ensures fun out ->
(feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1] /\
felem_fits5 out (2, 4, 2, 2, 4)) | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 150,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val fmul_r2_normalize51:
a:felem5 2
-> fa1:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 a (1, 2, 1, 1, 2) /\
felem_fits5 fa1 (1, 2, 1, 1, 2) /\
feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1])
(ensures fun out ->
(feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1] /\
felem_fits5 out (2, 4, 2, 2, 4)) | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.fmul_r2_normalize51 | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Hacl.Spec.Poly1305.Field32xN.felem5 2 -> fa1: Hacl.Spec.Poly1305.Field32xN.felem5 2
-> Prims.Pure (Hacl.Spec.Poly1305.Field32xN.felem5 2) | {
"end_col": 5,
"end_line": 361,
"start_col": 31,
"start_line": 328
} |
FStar.Pervasives.Lemma | val set_bit5_lemma:
#w:lanes
-> f:lseq (uint64xN w) 5
-> i:size_nat{i <= 128} ->
Lemma
(requires
lfelem_fits f (1, 1, 1, 1, 1) /\
lfelem_less f (pow2 i))
(ensures
lfelem_fits (set_bit5 f i) (1, 1, 1, 1, 1) /\
lfeval (set_bit5 f i) == map (Vec.pfadd (pow2 i)) (lfeval f)) | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let set_bit5_lemma #w f i =
let tmp = map (Vec.pfadd (pow2 i)) (lfeval f) in
match w with
| 1 ->
set_bit5_lemma_k #w f i 0;
eq_intro (lfeval (set_bit5 f i)) tmp
| 2 ->
set_bit5_lemma_k #w f i 0;
set_bit5_lemma_k #w f i 1;
eq_intro (lfeval (set_bit5 f i)) tmp
| 4 ->
set_bit5_lemma_k #w f i 0;
set_bit5_lemma_k #w f i 1;
set_bit5_lemma_k #w f i 2;
set_bit5_lemma_k #w f i 3;
eq_intro (lfeval (set_bit5 f i)) tmp | val set_bit5_lemma:
#w:lanes
-> f:lseq (uint64xN w) 5
-> i:size_nat{i <= 128} ->
Lemma
(requires
lfelem_fits f (1, 1, 1, 1, 1) /\
lfelem_less f (pow2 i))
(ensures
lfelem_fits (set_bit5 f i) (1, 1, 1, 1, 1) /\
lfeval (set_bit5 f i) == map (Vec.pfadd (pow2 i)) (lfeval f))
let set_bit5_lemma #w f i = | false | null | true | let tmp = map (Vec.pfadd (pow2 i)) (lfeval f) in
match w with
| 1 ->
set_bit5_lemma_k #w f i 0;
eq_intro (lfeval (set_bit5 f i)) tmp
| 2 ->
set_bit5_lemma_k #w f i 0;
set_bit5_lemma_k #w f i 1;
eq_intro (lfeval (set_bit5 f i)) tmp
| 4 ->
set_bit5_lemma_k #w f i 0;
set_bit5_lemma_k #w f i 1;
set_bit5_lemma_k #w f i 2;
set_bit5_lemma_k #w f i 3;
eq_intro (lfeval (set_bit5 f i)) tmp | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [
"lemma"
] | [
"Hacl.Spec.Poly1305.Field32xN.lanes",
"Lib.Sequence.lseq",
"Hacl.Spec.Poly1305.Field32xN.uint64xN",
"Lib.IntTypes.size_nat",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Lib.Sequence.eq_intro",
"Hacl.Spec.Poly1305.Vec.pfelem",
"Hacl.Spec.Poly1305.Field32xN.lfeval",
"Hacl.Spec.Poly1305.Field32xN.set_bit5",
"Prims.unit",
"Hacl.Poly1305.Field32xN.Lemmas2.set_bit5_lemma_k",
"Prims.l_Forall",
"Prims.nat",
"Prims.l_imp",
"Prims.op_LessThan",
"Prims.eq2",
"Lib.Sequence.index",
"Hacl.Spec.Poly1305.Vec.pfadd",
"Prims.pow2",
"Lib.Sequence.map"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w)
val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)]
let fadd5_fits_lemma #w f1 f2 =
let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24
val fadd5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (feval5 (fadd5 f1 f2) == map2 Vec.pfadd (feval5 f1) (feval5 f2))
[SMTPat (fadd5 f1 f2)]
let fadd5_eval_lemma #w f1 f2 =
let o = fadd5 f1 f2 in
FStar.Classical.forall_intro (fadd5_eval_lemma_i f1 f2);
eq_intro (feval5 o) (map2 Vec.pfadd (feval5 f1) (feval5 f2))
val mul_felem5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_wide_fits5 (mul_felem5 #w f1 r r5) (126, 102, 78, 54, 30))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_fits_lemma #w f1 r r5 =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34)
val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i])
let mul_felem5_eval_lemma_i #w f1 r r5 i =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_eval_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
assert ((fas_nat5 (a0,a1,a2,a3,a4)).[i] == (uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i]);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_eval_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
assert ((fas_nat5 (a10,a11,a12,a13,a14)).[i] == (fas_nat5 (a0,a1,a2,a3,a4)).[i] + (uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i]);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_eval_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
assert ((fas_nat5 (a20,a21,a22,a23,a24)).[i] == (fas_nat5 (a10,a11,a12,a13,a14)).[i] + (uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i]);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
assert ((fas_nat5 (a30,a31,a32,a33,a34)).[i] == (fas_nat5 (a20,a21,a22,a23,a24)).[i] + (uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i]);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] == (fas_nat5 (a30,a31,a32,a33,a34)).[i] + (uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] ==
(uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i] +
(uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i] +
(uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i] +
(uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i] +
(uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
mul_felem5_eval_as_tup64 #w f1 r r5 i;
mul_felem5_lemma (as_tup64_i f1 i) (as_tup64_i r i)
val mul_felem5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (mul_felem5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_eval_lemma #w f1 r r5 =
let tmp = map2 (Vec.pfmul) (feval5 f1) (feval5 r) in
FStar.Classical.forall_intro (mul_felem5_eval_lemma_i #w f1 r r5);
eq_intro (feval5 (mul_felem5 #w f1 r r5)) tmp
val fmul_r5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_fits5 (fmul_r5 #w f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_fits_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_fits_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fmul_r5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fmul_r5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_eval_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_eval_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_fits_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_fits_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_fits_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fadd_mul_r5_eval_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fadd_mul_r5 acc f1 r r5) ==
map2 (Vec.pfmul) (map2 (Vec.pfadd) (feval5 acc) (feval5 f1)) (feval5 r))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_eval_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_eval_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_eval_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val reduce_felem5_eval_lemma:
#w:lanes
-> f:felem5 w{felem_fits5 f (2, 2, 2, 2, 2)} ->
Lemma
(felem_fits5 (reduce_felem5 f) (1, 1, 1, 1, 1) /\
(feval5 f).[0] == (fas_nat5 (reduce_felem5 f)).[0])
[SMTPat (reduce_felem5 f)]
let reduce_felem5_eval_lemma #w f =
carry_full_felem5_eval_lemma f;
carry_full_felem5_fits_lemma f;
let f = carry_full_felem5 f in
carry_reduce_felem5_lemma #w f;
subtract_p5_felem5_lemma #w (carry_full_felem5 f)
val fmul_r2_normalize50:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures fun a ->
let fr21 = create2 (feval5 r2).[0] (feval5 r).[0] in
feval5 a == Vec.fmul (feval5 acc) fr21 /\
felem_fits5 a (1, 2, 1, 1, 2))
let fmul_r2_normalize50 (a0, a1, a2, a3, a4) (r0, r1, r2, r3, r4) (r20, r21, r22, r23, r24) =
let r210 = vec_interleave_low r20 r0 in
vec_interleave_low_lemma2 r20 r0;
let r211 = vec_interleave_low r21 r1 in
vec_interleave_low_lemma2 r21 r1;
let r212 = vec_interleave_low r22 r2 in
vec_interleave_low_lemma2 r22 r2;
let r213 = vec_interleave_low r23 r3 in
vec_interleave_low_lemma2 r23 r3;
let r214 = vec_interleave_low r24 r4 in
vec_interleave_low_lemma2 r24 r4;
let acc = (a0, a1, a2, a3, a4) in
let fr = (r0, r1, r2, r3, r4) in
let fr2 = (r20, r21, r22, r23, r24) in
assert ((feval5 fr2).[0] == Vec.pfmul ((feval5 fr).[0]) ((feval5 fr).[0]));
let fr21 = (r210, r211, r212, r213, r214) in
eq_intro (feval5 fr21) (create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (feval5 fr21 == create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (felem_fits5 fr21 (2, 2, 2, 2, 2));
let fr215 = precomp_r5 #2 fr21 in
let a = fmul_r5 #2 acc fr21 fr215 in
fmul_r5_eval_lemma acc fr21 fr215;
fmul_r5_fits_lemma acc fr21 fr215;
assert (feval5 a == Vec.fmul (feval5 acc) (feval5 fr21));
assert (felem_fits5 a (1, 2, 1, 1, 2));
a
#push-options "--z3rlimit 150"
val fmul_r2_normalize51:
a:felem5 2
-> fa1:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 a (1, 2, 1, 1, 2) /\
felem_fits5 fa1 (1, 2, 1, 1, 2) /\
feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1])
(ensures fun out ->
(feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1] /\
felem_fits5 out (2, 4, 2, 2, 4))
let fmul_r2_normalize51 a fa1 =
let (a0, a1, a2, a3, a4) = a in
let (a10, a11, a12, a13, a14) = fa1 in
let o0 = vec_add_mod a0 a10 in
let o1 = vec_add_mod a1 a11 in
let o2 = vec_add_mod a2 a12 in
let o3 = vec_add_mod a3 a13 in
let o4 = vec_add_mod a4 a14 in
let out = (o0, o1, o2, o3, o4) in
let (a0, a1, a2, a3, a4) = as_tup64_i a 0 in
let (a10, a11, a12, a13, a14) = as_tup64_i fa1 0 in
let (o0, o1, o2, o3, o4) = as_tup64_i out 0 in
FStar.Math.Lemmas.modulo_lemma (v a0 + v a10) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a1 + v a11) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a2 + v a12) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a3 + v a13) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a4 + v a14) (pow2 64);
assert (felem_fits5 out (2, 4, 2, 2, 4));
calc (==) {
((feval5 a).[0] + (feval5 a).[1]) % Vec.prime;
(==) { }
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime + as_nat5 (a10, a11, a12, a13, a14) % Vec.prime) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (a0, a1, a2, a3, a4)) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime;
FStar.Math.Lemmas.lemma_mod_plus_distr_r
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime }
(as_nat5 (a0, a1, a2, a3, a4) + as_nat5 (a10, a11, a12, a13, a14)) % Vec.prime;
(==) { }
(feval5 out).[0];
};
out
#pop-options
val fmul_r2_normalize5_lemma:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures
(let out = fmul_r2_normalize5 acc r r2 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_2 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r2_normalize5 acc r r2)]
let fmul_r2_normalize5_lemma acc r r2 =
let a = fmul_r2_normalize50 acc r r2 in
let (a0, a1, a2, a3, a4) = a in
let a10 = vec_interleave_high a0 a0 in
vec_interleave_high_lemma2 a0 a0;
let a11 = vec_interleave_high a1 a1 in
vec_interleave_high_lemma2 a1 a1;
let a12 = vec_interleave_high a2 a2 in
vec_interleave_high_lemma2 a2 a2;
let a13 = vec_interleave_high a3 a3 in
vec_interleave_high_lemma2 a3 a3;
let a14 = vec_interleave_high a4 a4 in
vec_interleave_high_lemma2 a4 a4;
let fa1 = (a10, a11, a12, a13, a14) in
eq_intro (feval5 fa1) (create2 (feval5 a).[1] (feval5 a).[1]);
assert (feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1]);
assert (felem_fits5 fa1 (1, 2, 1, 1, 2));
let out = fmul_r2_normalize51 a fa1 in
assert ((feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1]);
let res = carry_full_felem5 out in
carry_full_felem5_lemma out
val fmul_r4_normalize50:
acc:felem5 4
-> r:felem5 4
-> r2:felem5 4
-> r3:felem5 4
-> r4:felem5 4 ->
Pure (felem5 4)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
felem_fits5 r3 (2, 2, 2, 2, 2) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.fmul (feval5 r) (feval5 r) /\
feval5 r3 == Vec.fmul (feval5 r2) (feval5 r) /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures fun out ->
let fr4321 = create4 (feval5 r4).[0] (feval5 r3).[0] (feval5 r2).[0] (feval5 r).[0] in
feval5 out == Vec.fmul (feval5 acc) fr4321 /\
felem_fits5 out (1, 2, 1, 1, 2))
let fmul_r4_normalize50 acc fr fr2 fr3 fr4 =
let (r10, r11, r12, r13, r14) = fr in
let (r20, r21, r22, r23, r24) = fr2 in
let (r30, r31, r32, r33, r34) = fr3 in
let (r40, r41, r42, r43, r44) = fr4 in
let (a0, a1, a2, a3, a4) = acc in
let v12120 = vec_interleave_low r20 r10 in
vec_interleave_low_lemma_uint64_4 r20 r10;
let v34340 = vec_interleave_low r40 r30 in
vec_interleave_low_lemma_uint64_4 r40 r30;
let r12340 = vec_interleave_low_n 2 v34340 v12120 in
vec_interleave_low_n_lemma_uint64_4_2 v34340 v12120;
let v12121 = vec_interleave_low r21 r11 in
vec_interleave_low_lemma_uint64_4 r21 r11;
let v34341 = vec_interleave_low r41 r31 in
vec_interleave_low_lemma_uint64_4 r41 r31;
let r12341 = vec_interleave_low_n 2 v34341 v12121 in
vec_interleave_low_n_lemma_uint64_4_2 v34341 v12121;
let v12122 = vec_interleave_low r22 r12 in
vec_interleave_low_lemma_uint64_4 r22 r12;
let v34342 = vec_interleave_low r42 r32 in
vec_interleave_low_lemma_uint64_4 r42 r32;
let r12342 = vec_interleave_low_n 2 v34342 v12122 in
vec_interleave_low_n_lemma_uint64_4_2 v34342 v12122;
let v12123 = vec_interleave_low r23 r13 in
vec_interleave_low_lemma_uint64_4 r23 r13;
let v34343 = vec_interleave_low r43 r33 in
vec_interleave_low_lemma_uint64_4 r43 r33;
let r12343 = vec_interleave_low_n 2 v34343 v12123 in
vec_interleave_low_n_lemma_uint64_4_2 v34343 v12123;
let v12124 = vec_interleave_low r24 r14 in
vec_interleave_low_lemma_uint64_4 r24 r14;
let v34344 = vec_interleave_low r44 r34 in
vec_interleave_low_lemma_uint64_4 r44 r34;
let r12344 = vec_interleave_low_n 2 v34344 v12124 in
vec_interleave_low_n_lemma_uint64_4_2 v34344 v12124;
let fr1234 = (r12340, r12341, r12342, r12343, r12344) in
eq_intro (feval5 fr1234) (create4 (feval5 fr4).[0] (feval5 fr3).[0] (feval5 fr2).[0] (feval5 fr).[0]);
let fr12345 = precomp_r5 #4 fr1234 in
let out = fmul_r5 #4 acc fr1234 fr12345 in
fmul_r5_eval_lemma acc fr1234 fr12345;
fmul_r5_fits_lemma acc fr1234 fr12345;
out
val lemma_fmul_r4_normalize51:
#m:scale32{m <= 2}
-> o:uint64xN 4{felem_fits1 o m} ->
Lemma
(let v00 = vec_interleave_high_n 2 o o in
let v10 = vec_add_mod o v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
felem_fits1 v20 (4 * m) /\
(uint64xN_v v20).[0] == (uint64xN_v o).[0] + (uint64xN_v o).[1] + (uint64xN_v o).[2] + (uint64xN_v o).[3])
let lemma_fmul_r4_normalize51 #m o =
let v00 = vec_interleave_high_n 2 o o in
vec_interleave_high_n_lemma_uint64_4_2 o o;
let (o0, o1, o2, o3) = ((vec_v o).[0], (vec_v o).[1], (vec_v o).[2], (vec_v o).[3]) in
assert (vec_v v00 == create4 o2 o3 o2 o3);
let v10 = vec_add_mod o v00 in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v o1 + v o3) (pow2 64);
assert (v (vec_v v10).[0] == v o0 + v o2);
assert (v (vec_v v10).[1] == v o1 + v o3);
let v10h = vec_interleave_high v10 v10 in
vec_interleave_high_lemma_uint64_4 v10 v10;
assert (v (vec_v v10h).[0] == v (vec_v v10).[1]);
let v20 = vec_add_mod v10 v10h in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2 + v o1 + v o3) (pow2 64)
val lemma_fmul_r4_normalize51_expand:
v2:felem5 4
-> out:felem5 4 ->
Lemma
(requires
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(uint64xN_v v20).[0] == (uint64xN_v o0).[0] + (uint64xN_v o0).[1] + (uint64xN_v o0).[2] + (uint64xN_v o0).[3] /\
(uint64xN_v v21).[0] == (uint64xN_v o1).[0] + (uint64xN_v o1).[1] + (uint64xN_v o1).[2] + (uint64xN_v o1).[3] /\
(uint64xN_v v22).[0] == (uint64xN_v o2).[0] + (uint64xN_v o2).[1] + (uint64xN_v o2).[2] + (uint64xN_v o2).[3] /\
(uint64xN_v v23).[0] == (uint64xN_v o3).[0] + (uint64xN_v o3).[1] + (uint64xN_v o3).[2] + (uint64xN_v o3).[3] /\
(uint64xN_v v24).[0] == (uint64xN_v o4).[0] + (uint64xN_v o4).[1] + (uint64xN_v o4).[2] + (uint64xN_v o4).[3]))
(ensures
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(feval5 v2).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 out).[0] (feval5 out).[1]) (feval5 out).[2]) (feval5 out).[3]))
let lemma_fmul_r4_normalize51_expand v2 out =
let (v20, v21, v22, v23, v24) = as_tup64_i v2 0 in
let (o0, o1, o2, o3, o4) = out in
calc (==) {
as_nat5 (v20, v21, v22, v23, v24) % Vec.prime;
(==) { }
(as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1) + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1))
(as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) Vec.prime }
((as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1)) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.modulo_distributivity (as_nat5 (as_tup64_i out 0)) (as_nat5 (as_tup64_i out 1)) Vec.prime }
(((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2))
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2)) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime) (as_nat5 (as_tup64_i out 2)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r ((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime)
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime;
};
assert ((feval5 v2).[0] ==
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime)
val fmul_r4_normalize51: a:felem5 4 ->
Pure (felem5 4)
(requires felem_fits5 a (1, 2, 1, 1, 2))
(ensures fun res ->
felem_fits5 res (4, 8, 4, 4, 8) /\
(feval5 res).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 a).[0] (feval5 a).[1]) (feval5 a).[2]) (feval5 a).[3])
let fmul_r4_normalize51 fa =
let (o0, o1, o2, o3, o4) = fa in
let v00 = vec_interleave_high_n 2 o0 o0 in
let v10 = vec_add_mod o0 v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
lemma_fmul_r4_normalize51 #1 o0;
let v01 = vec_interleave_high_n 2 o1 o1 in
let v11 = vec_add_mod o1 v01 in
let v11h = vec_interleave_high v11 v11 in
let v21 = vec_add_mod v11 v11h in
lemma_fmul_r4_normalize51 #2 o1;
let v02 = vec_interleave_high_n 2 o2 o2 in
let v12 = vec_add_mod o2 v02 in
let v12h = vec_interleave_high v12 v12 in
let v22 = vec_add_mod v12 v12h in
lemma_fmul_r4_normalize51 #1 o2;
let v03 = vec_interleave_high_n 2 o3 o3 in
let v13 = vec_add_mod o3 v03 in
let v13h = vec_interleave_high v13 v13 in
let v23 = vec_add_mod v13 v13h in
lemma_fmul_r4_normalize51 #1 o3;
let v04 = vec_interleave_high_n 2 o4 o4 in
let v14 = vec_add_mod o4 v04 in
let v14h = vec_interleave_high v14 v14 in
let v24 = vec_add_mod v14 v14h in
lemma_fmul_r4_normalize51 #2 o4;
let res = (v20, v21, v22, v23, v24) in
lemma_fmul_r4_normalize51_expand res fa;
res
val fmul_r4_normalize5_lemma:
acc:felem5 4
-> r:felem5 4
-> r_5:felem5 4
-> r4:felem5 4 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
r_5 == precomp_r5 r /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures
(let out = fmul_r4_normalize5 acc r r_5 r4 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_4 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r4_normalize5 acc r r_5 r4)]
#restart-solver
#push-options "--z3rlimit 500"
let fmul_r4_normalize5_lemma acc fr fr_5 fr4 =
let fr2 = fmul_r5 #4 fr fr fr_5 in
let fr3 = fmul_r5 #4 fr2 fr fr_5 in
let out = fmul_r4_normalize50 acc fr fr2 fr3 fr4 in
let v2 = fmul_r4_normalize51 out in
let res = carry_full_felem5 v2 in
carry_full_felem5_lemma v2
#pop-options
val load_felem5_lemma:
#w:lanes
-> lo:uint64xN w
-> hi:uint64xN w ->
Lemma
(let f = load_felem5 #w lo hi in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == createi #Vec.pfelem w
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]))
let load_felem5_lemma #w lo hi =
let f = load_felem5 #w lo hi in
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 128 < Vec.prime);
let res = createi #Vec.pfelem w
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]) in
match w with
| 1 ->
load_felem5_lemma_i #w lo hi 0;
eq_intro (feval5 f) res
| 2 ->
load_felem5_lemma_i #w lo hi 0;
load_felem5_lemma_i #w lo hi 1;
eq_intro (feval5 f) res
| 4 ->
load_felem5_lemma_i #w lo hi 0;
load_felem5_lemma_i #w lo hi 1;
load_felem5_lemma_i #w lo hi 2;
load_felem5_lemma_i #w lo hi 3;
eq_intro (feval5 f) res
val load_felem5_4_interleave: lo:uint64xN 4 -> hi:uint64xN 4 -> Lemma
(let m0 = vec_interleave_low_n 2 lo hi in
let m1 = vec_interleave_high_n 2 lo hi in
let m2 = cast U64 4 (vec_shift_right (cast U128 2 m0) 48ul) in
let m3 = cast U64 4 (vec_shift_right (cast U128 2 m1) 48ul) in
let m4 = vec_interleave_high m0 m1 in
let t0 = vec_interleave_low m0 m1 in
let t3 = vec_interleave_low m2 m3 in
vec_v m4 == create4 (vec_v lo).[1] (vec_v lo).[3] (vec_v hi).[1] (vec_v hi).[3] /\
vec_v t0 == create4 (vec_v lo).[0] (vec_v lo).[2] (vec_v hi).[0] (vec_v hi).[2] /\
t3 == vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul))
let load_felem5_4_interleave lo hi =
let m0 = vec_interleave_low_n 2 lo hi in
vec_interleave_low_n_lemma_uint64_4_2 lo hi;
//assert (vec_v m0 == create4 (vec_v lo).[0] (vec_v lo).[1] (vec_v hi).[0] (vec_v hi).[1]);
let m1 = vec_interleave_high_n 2 lo hi in
vec_interleave_high_n_lemma_uint64_4_2 lo hi;
//assert (vec_v m1 == create4 (vec_v lo).[2] (vec_v lo).[3] (vec_v hi).[2] (vec_v hi).[3]);
let m4 = vec_interleave_high m0 m1 in
vec_interleave_high_lemma_uint64_4 m0 m1;
//assert (vec_v m4 == create4 (vec_v m0).[1] (vec_v m1).[1] (vec_v m0).[3] (vec_v m1).[3]);
assert (vec_v m4 == create4 (vec_v lo).[1] (vec_v lo).[3] (vec_v hi).[1] (vec_v hi).[3]);
let t0 = vec_interleave_low m0 m1 in
vec_interleave_low_lemma_uint64_4 m0 m1;
//assert (vec_v t0 == create4 (vec_v m0).[0] (vec_v m1).[0] (vec_v m0).[2] (vec_v m1).[2]);
assert (vec_v t0 == create4 (vec_v lo).[0] (vec_v lo).[2] (vec_v hi).[0] (vec_v hi).[2]);
let m2 = cast U64 4 (vec_shift_right (cast U128 2 m0) 48ul) in
vec_shift_right_uint128_small2 m0 48ul;
assert ((vec_v m2).[0] == (((vec_v lo).[0] >>. 48ul) |. ((vec_v lo).[1] <<. 16ul)));
assert ((vec_v m2).[2] == (((vec_v hi).[0] >>. 48ul) |. ((vec_v hi).[1] <<. 16ul)));
let m3 = cast U64 4 (vec_shift_right (cast U128 2 m1) 48ul) in
vec_shift_right_uint128_small2 m1 48ul;
assert ((vec_v m3).[0] == (((vec_v lo).[2] >>. 48ul) |. ((vec_v lo).[3] <<. 16ul)));
assert ((vec_v m3).[2] == (((vec_v hi).[2] >>. 48ul) |. ((vec_v hi).[3] <<. 16ul)));
let t3 = vec_interleave_low m2 m3 in
vec_interleave_low_lemma_uint64_4 m2 m3;
eq_intro (vec_v t3) (vec_v (vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul)));
vecv_extensionality t3 (vec_or (vec_shift_right t0 48ul) (vec_shift_left m4 16ul))
noextract
val load_felem5_4_compact: lo:uint64xN 4 -> hi:uint64xN 4 -> felem5 4
let load_felem5_4_compact lo hi =
let mask26 = mask26 4 in
let t3 = vec_or (vec_shift_right lo 48ul) (vec_shift_left hi 16ul) in
let o0 = vec_and lo mask26 in
let o1 = vec_and (vec_shift_right lo 26ul) mask26 in
let o2 = vec_and (vec_shift_right t3 4ul) mask26 in
let o3 = vec_and (vec_shift_right t3 30ul) mask26 in
let o4 = vec_shift_right hi 40ul in
(o0, o1, o2, o3, o4)
val load_felem5_4_compact_lemma_i: lo:uint64xN 4 -> hi:uint64xN 4 -> i:nat{i < 4} ->
Lemma
(let f = as_tup64_i (load_felem5_4_compact lo hi) i in
tup64_fits5 f (1, 1, 1, 1, 1) /\
as_nat5 f < pow2 128 /\
as_nat5 f % Vec.prime == (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i])
let load_felem5_4_compact_lemma_i lo hi i =
assert (as_tup64_i (load_felem5_4_compact lo hi) i == load_tup64_4_compact (vec_v lo).[i] (vec_v hi).[i]);
load_tup64_4_compact_lemma (vec_v lo).[i] (vec_v hi).[i]
val load_felem5_4_lemma: lo:uint64xN 4 -> hi:uint64xN 4 ->
Lemma
(let f = load_felem5_4_compact lo hi in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == createi #Vec.pfelem 4 (fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]))
let load_felem5_4_lemma lo hi =
let f = load_felem5_4_compact lo hi in
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 128 < Vec.prime);
let res = createi #Vec.pfelem 4
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]) in
load_felem5_4_compact_lemma_i lo hi 0;
load_felem5_4_compact_lemma_i lo hi 1;
load_felem5_4_compact_lemma_i lo hi 2;
load_felem5_4_compact_lemma_i lo hi 3;
eq_intro (feval5 f) res
val load_felem5_le: b:lseq uint8 64 -> Lemma
(let lo0 = vec_from_bytes_le U64 4 (sub b 0 32) in
let hi0 = vec_from_bytes_le U64 4 (sub b 32 32) in
let f = load_felem5_4 lo0 hi0 in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == Vec.load_elem4 b)
let load_felem5_le b =
let lo0 = vec_from_bytes_le U64 4 (sub b 0 32) in
let hi0 = vec_from_bytes_le U64 4 (sub b 32 32) in
let lo1 = vec_interleave_low_n 2 lo0 hi0 in
let hi1 = vec_interleave_high_n 2 lo0 hi0 in
let lo = vec_interleave_low lo1 hi1 in
let hi = vec_interleave_high lo1 hi1 in
let out = load_felem5_4_compact lo hi in
load_felem5_4_interleave lo0 hi0;
assert (out == load_felem5_4 lo0 hi0);
load_felem5_4_lemma lo hi;
Hacl.Impl.Poly1305.Lemmas.uints_from_bytes_le_lemma64_4 b;
eq_intro (feval5 out) (Vec.load_elem4 b)
val load_acc5_2_lemma:
f:felem5 2{felem_fits5 f (2, 2, 2, 2, 2)}
-> e:felem5 2{felem_fits5 e (1, 1, 1, 1, 1)} ->
Lemma
(let res = load_acc5_2 f e in
felem_fits5 res (3, 3, 3, 3, 3) /\
feval5 res == Vec.fadd (create2 (feval5 f).[0] 0) (feval5 e))
[SMTPat (load_acc5_2 f e)]
let load_acc5_2_lemma f e =
let (f0, f1, f2, f3, f4) = f in
let r0 = vec_set f0 1ul (u64 0) in
let r1 = vec_set f1 1ul (u64 0) in
let r2 = vec_set f2 1ul (u64 0) in
let r3 = vec_set f3 1ul (u64 0) in
let r4 = vec_set f4 1ul (u64 0) in
let r = (r0, r1, r2, r3, r4) in
//assert ((feval5 r).[0] == (feval5 f).[0]);
assert ((feval5 r).[1] == 0);
eq_intro (feval5 r) (create2 (feval5 f).[0] 0)
val load_acc5_4_lemma:
f:felem5 4{felem_fits5 f (2, 2, 2, 2, 2)}
-> e:felem5 4{felem_fits5 e (1, 1, 1, 1, 1)} ->
Lemma
(let res = load_acc5_4 f e in
felem_fits5 res (3, 3, 3, 3, 3) /\
feval5 res == Vec.fadd (create4 (feval5 f).[0] 0 0 0) (feval5 e))
[SMTPat (load_acc5_4 f e)]
let load_acc5_4_lemma f e =
let (f0, f1, f2, f3, f4) = f in
let (r0, r1, r2, r3, r4) = (zero 4, zero 4, zero 4, zero 4, zero 4) in
let r = (r0, r1, r2, r3, r4) in
assert ((feval5 r).[1] == 0);
assert ((feval5 r).[2] == 0);
assert ((feval5 r).[3] == 0);
let r0 = vec_set r0 0ul (vec_get f0 0ul) in
let r1 = vec_set r1 0ul (vec_get f1 0ul) in
let r2 = vec_set r2 0ul (vec_get f2 0ul) in
let r3 = vec_set r3 0ul (vec_get f3 0ul) in
let r4 = vec_set r4 0ul (vec_get f4 0ul) in
let r = (r0, r1, r2, r3, r4) in
assert ((feval5 r).[0] == (feval5 f).[0]);
assert ((feval5 r).[1] == 0);
assert ((feval5 r).[2] == 0);
assert ((feval5 r).[3] == 0);
eq_intro (feval5 r) (create4 (feval5 f).[0] 0 0 0)
val store_felem5_lemma:
#w:lanes
-> f:felem5 w{felem_fits5 f (1, 1, 1, 1, 1)} ->
Lemma
(let (lo, hi) = store_felem5 f in
v hi * pow2 64 + v lo == (fas_nat5 f).[0] % pow2 128)
[SMTPat (store_felem5 f)]
let store_felem5_lemma #w f =
store_felem5_lemma #w f
val set_bit5_lemma:
#w:lanes
-> f:lseq (uint64xN w) 5
-> i:size_nat{i <= 128} ->
Lemma
(requires
lfelem_fits f (1, 1, 1, 1, 1) /\
lfelem_less f (pow2 i))
(ensures
lfelem_fits (set_bit5 f i) (1, 1, 1, 1, 1) /\
lfeval (set_bit5 f i) == map (Vec.pfadd (pow2 i)) (lfeval f)) | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val set_bit5_lemma:
#w:lanes
-> f:lseq (uint64xN w) 5
-> i:size_nat{i <= 128} ->
Lemma
(requires
lfelem_fits f (1, 1, 1, 1, 1) /\
lfelem_less f (pow2 i))
(ensures
lfelem_fits (set_bit5 f i) (1, 1, 1, 1, 1) /\
lfeval (set_bit5 f i) == map (Vec.pfadd (pow2 i)) (lfeval f)) | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.set_bit5_lemma | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
f: Lib.Sequence.lseq (Hacl.Spec.Poly1305.Field32xN.uint64xN w) 5 ->
i: Lib.IntTypes.size_nat{i <= 128}
-> FStar.Pervasives.Lemma
(requires
Hacl.Spec.Poly1305.Field32xN.lfelem_fits f (1, 1, 1, 1, 1) /\
Hacl.Spec.Poly1305.Field32xN.lfelem_less f (Prims.pow2 i))
(ensures
Hacl.Spec.Poly1305.Field32xN.lfelem_fits (Hacl.Spec.Poly1305.Field32xN.set_bit5 f i)
(1, 1, 1, 1, 1) /\
Hacl.Spec.Poly1305.Field32xN.lfeval (Hacl.Spec.Poly1305.Field32xN.set_bit5 f i) ==
Lib.Sequence.map (Hacl.Spec.Poly1305.Vec.pfadd (Prims.pow2 i))
(Hacl.Spec.Poly1305.Field32xN.lfeval f)) | {
"end_col": 40,
"end_line": 857,
"start_col": 27,
"start_line": 842
} |
FStar.Pervasives.Lemma | val load_felem5_lemma:
#w:lanes
-> lo:uint64xN w
-> hi:uint64xN w ->
Lemma
(let f = load_felem5 #w lo hi in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == createi #Vec.pfelem w
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i])) | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let load_felem5_lemma #w lo hi =
let f = load_felem5 #w lo hi in
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 128 < Vec.prime);
let res = createi #Vec.pfelem w
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]) in
match w with
| 1 ->
load_felem5_lemma_i #w lo hi 0;
eq_intro (feval5 f) res
| 2 ->
load_felem5_lemma_i #w lo hi 0;
load_felem5_lemma_i #w lo hi 1;
eq_intro (feval5 f) res
| 4 ->
load_felem5_lemma_i #w lo hi 0;
load_felem5_lemma_i #w lo hi 1;
load_felem5_lemma_i #w lo hi 2;
load_felem5_lemma_i #w lo hi 3;
eq_intro (feval5 f) res | val load_felem5_lemma:
#w:lanes
-> lo:uint64xN w
-> hi:uint64xN w ->
Lemma
(let f = load_felem5 #w lo hi in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == createi #Vec.pfelem w
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i]))
let load_felem5_lemma #w lo hi = | false | null | true | let f = load_felem5 #w lo hi in
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 128 < Vec.prime);
let res =
createi #Vec.pfelem w (fun i -> (uint64xN_v hi).[ i ] * pow2 64 + (uint64xN_v lo).[ i ])
in
match w with
| 1 ->
load_felem5_lemma_i #w lo hi 0;
eq_intro (feval5 f) res
| 2 ->
load_felem5_lemma_i #w lo hi 0;
load_felem5_lemma_i #w lo hi 1;
eq_intro (feval5 f) res
| 4 ->
load_felem5_lemma_i #w lo hi 0;
load_felem5_lemma_i #w lo hi 1;
load_felem5_lemma_i #w lo hi 2;
load_felem5_lemma_i #w lo hi 3;
eq_intro (feval5 f) res | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [
"lemma"
] | [
"Hacl.Spec.Poly1305.Field32xN.lanes",
"Hacl.Spec.Poly1305.Field32xN.uint64xN",
"Lib.Sequence.eq_intro",
"Hacl.Spec.Poly1305.Vec.pfelem",
"Hacl.Spec.Poly1305.Field32xN.feval5",
"Prims.unit",
"Hacl.Poly1305.Field32xN.Lemmas2.load_felem5_lemma_i",
"Lib.Sequence.lseq",
"Prims.l_Forall",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_LessThan",
"Prims.eq2",
"Lib.Sequence.index",
"Prims.op_Addition",
"Prims.op_Multiply",
"Hacl.Spec.Poly1305.Field32xN.uint64xN_v",
"Prims.pow2",
"Lib.Sequence.createi",
"FStar.Mul.op_Star",
"Lib.Sequence.op_String_Access",
"FStar.Pervasives.assert_norm",
"Hacl.Spec.Poly1305.Vec.prime",
"Prims.op_Equality",
"Prims.int",
"Hacl.Spec.Poly1305.Field32xN.felem5",
"Hacl.Spec.Poly1305.Field32xN.load_felem5"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w)
val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)]
let fadd5_fits_lemma #w f1 f2 =
let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24
val fadd5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (feval5 (fadd5 f1 f2) == map2 Vec.pfadd (feval5 f1) (feval5 f2))
[SMTPat (fadd5 f1 f2)]
let fadd5_eval_lemma #w f1 f2 =
let o = fadd5 f1 f2 in
FStar.Classical.forall_intro (fadd5_eval_lemma_i f1 f2);
eq_intro (feval5 o) (map2 Vec.pfadd (feval5 f1) (feval5 f2))
val mul_felem5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_wide_fits5 (mul_felem5 #w f1 r r5) (126, 102, 78, 54, 30))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_fits_lemma #w f1 r r5 =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34)
val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i])
let mul_felem5_eval_lemma_i #w f1 r r5 i =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_eval_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
assert ((fas_nat5 (a0,a1,a2,a3,a4)).[i] == (uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i]);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_eval_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
assert ((fas_nat5 (a10,a11,a12,a13,a14)).[i] == (fas_nat5 (a0,a1,a2,a3,a4)).[i] + (uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i]);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_eval_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
assert ((fas_nat5 (a20,a21,a22,a23,a24)).[i] == (fas_nat5 (a10,a11,a12,a13,a14)).[i] + (uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i]);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
assert ((fas_nat5 (a30,a31,a32,a33,a34)).[i] == (fas_nat5 (a20,a21,a22,a23,a24)).[i] + (uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i]);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] == (fas_nat5 (a30,a31,a32,a33,a34)).[i] + (uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] ==
(uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i] +
(uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i] +
(uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i] +
(uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i] +
(uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
mul_felem5_eval_as_tup64 #w f1 r r5 i;
mul_felem5_lemma (as_tup64_i f1 i) (as_tup64_i r i)
val mul_felem5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (mul_felem5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_eval_lemma #w f1 r r5 =
let tmp = map2 (Vec.pfmul) (feval5 f1) (feval5 r) in
FStar.Classical.forall_intro (mul_felem5_eval_lemma_i #w f1 r r5);
eq_intro (feval5 (mul_felem5 #w f1 r r5)) tmp
val fmul_r5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_fits5 (fmul_r5 #w f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_fits_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_fits_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fmul_r5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fmul_r5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_eval_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_eval_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_fits_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_fits_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_fits_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fadd_mul_r5_eval_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fadd_mul_r5 acc f1 r r5) ==
map2 (Vec.pfmul) (map2 (Vec.pfadd) (feval5 acc) (feval5 f1)) (feval5 r))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_eval_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_eval_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_eval_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val reduce_felem5_eval_lemma:
#w:lanes
-> f:felem5 w{felem_fits5 f (2, 2, 2, 2, 2)} ->
Lemma
(felem_fits5 (reduce_felem5 f) (1, 1, 1, 1, 1) /\
(feval5 f).[0] == (fas_nat5 (reduce_felem5 f)).[0])
[SMTPat (reduce_felem5 f)]
let reduce_felem5_eval_lemma #w f =
carry_full_felem5_eval_lemma f;
carry_full_felem5_fits_lemma f;
let f = carry_full_felem5 f in
carry_reduce_felem5_lemma #w f;
subtract_p5_felem5_lemma #w (carry_full_felem5 f)
val fmul_r2_normalize50:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures fun a ->
let fr21 = create2 (feval5 r2).[0] (feval5 r).[0] in
feval5 a == Vec.fmul (feval5 acc) fr21 /\
felem_fits5 a (1, 2, 1, 1, 2))
let fmul_r2_normalize50 (a0, a1, a2, a3, a4) (r0, r1, r2, r3, r4) (r20, r21, r22, r23, r24) =
let r210 = vec_interleave_low r20 r0 in
vec_interleave_low_lemma2 r20 r0;
let r211 = vec_interleave_low r21 r1 in
vec_interleave_low_lemma2 r21 r1;
let r212 = vec_interleave_low r22 r2 in
vec_interleave_low_lemma2 r22 r2;
let r213 = vec_interleave_low r23 r3 in
vec_interleave_low_lemma2 r23 r3;
let r214 = vec_interleave_low r24 r4 in
vec_interleave_low_lemma2 r24 r4;
let acc = (a0, a1, a2, a3, a4) in
let fr = (r0, r1, r2, r3, r4) in
let fr2 = (r20, r21, r22, r23, r24) in
assert ((feval5 fr2).[0] == Vec.pfmul ((feval5 fr).[0]) ((feval5 fr).[0]));
let fr21 = (r210, r211, r212, r213, r214) in
eq_intro (feval5 fr21) (create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (feval5 fr21 == create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (felem_fits5 fr21 (2, 2, 2, 2, 2));
let fr215 = precomp_r5 #2 fr21 in
let a = fmul_r5 #2 acc fr21 fr215 in
fmul_r5_eval_lemma acc fr21 fr215;
fmul_r5_fits_lemma acc fr21 fr215;
assert (feval5 a == Vec.fmul (feval5 acc) (feval5 fr21));
assert (felem_fits5 a (1, 2, 1, 1, 2));
a
#push-options "--z3rlimit 150"
val fmul_r2_normalize51:
a:felem5 2
-> fa1:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 a (1, 2, 1, 1, 2) /\
felem_fits5 fa1 (1, 2, 1, 1, 2) /\
feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1])
(ensures fun out ->
(feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1] /\
felem_fits5 out (2, 4, 2, 2, 4))
let fmul_r2_normalize51 a fa1 =
let (a0, a1, a2, a3, a4) = a in
let (a10, a11, a12, a13, a14) = fa1 in
let o0 = vec_add_mod a0 a10 in
let o1 = vec_add_mod a1 a11 in
let o2 = vec_add_mod a2 a12 in
let o3 = vec_add_mod a3 a13 in
let o4 = vec_add_mod a4 a14 in
let out = (o0, o1, o2, o3, o4) in
let (a0, a1, a2, a3, a4) = as_tup64_i a 0 in
let (a10, a11, a12, a13, a14) = as_tup64_i fa1 0 in
let (o0, o1, o2, o3, o4) = as_tup64_i out 0 in
FStar.Math.Lemmas.modulo_lemma (v a0 + v a10) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a1 + v a11) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a2 + v a12) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a3 + v a13) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a4 + v a14) (pow2 64);
assert (felem_fits5 out (2, 4, 2, 2, 4));
calc (==) {
((feval5 a).[0] + (feval5 a).[1]) % Vec.prime;
(==) { }
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime + as_nat5 (a10, a11, a12, a13, a14) % Vec.prime) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (a0, a1, a2, a3, a4)) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime;
FStar.Math.Lemmas.lemma_mod_plus_distr_r
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime }
(as_nat5 (a0, a1, a2, a3, a4) + as_nat5 (a10, a11, a12, a13, a14)) % Vec.prime;
(==) { }
(feval5 out).[0];
};
out
#pop-options
val fmul_r2_normalize5_lemma:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures
(let out = fmul_r2_normalize5 acc r r2 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_2 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r2_normalize5 acc r r2)]
let fmul_r2_normalize5_lemma acc r r2 =
let a = fmul_r2_normalize50 acc r r2 in
let (a0, a1, a2, a3, a4) = a in
let a10 = vec_interleave_high a0 a0 in
vec_interleave_high_lemma2 a0 a0;
let a11 = vec_interleave_high a1 a1 in
vec_interleave_high_lemma2 a1 a1;
let a12 = vec_interleave_high a2 a2 in
vec_interleave_high_lemma2 a2 a2;
let a13 = vec_interleave_high a3 a3 in
vec_interleave_high_lemma2 a3 a3;
let a14 = vec_interleave_high a4 a4 in
vec_interleave_high_lemma2 a4 a4;
let fa1 = (a10, a11, a12, a13, a14) in
eq_intro (feval5 fa1) (create2 (feval5 a).[1] (feval5 a).[1]);
assert (feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1]);
assert (felem_fits5 fa1 (1, 2, 1, 1, 2));
let out = fmul_r2_normalize51 a fa1 in
assert ((feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1]);
let res = carry_full_felem5 out in
carry_full_felem5_lemma out
val fmul_r4_normalize50:
acc:felem5 4
-> r:felem5 4
-> r2:felem5 4
-> r3:felem5 4
-> r4:felem5 4 ->
Pure (felem5 4)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
felem_fits5 r3 (2, 2, 2, 2, 2) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.fmul (feval5 r) (feval5 r) /\
feval5 r3 == Vec.fmul (feval5 r2) (feval5 r) /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures fun out ->
let fr4321 = create4 (feval5 r4).[0] (feval5 r3).[0] (feval5 r2).[0] (feval5 r).[0] in
feval5 out == Vec.fmul (feval5 acc) fr4321 /\
felem_fits5 out (1, 2, 1, 1, 2))
let fmul_r4_normalize50 acc fr fr2 fr3 fr4 =
let (r10, r11, r12, r13, r14) = fr in
let (r20, r21, r22, r23, r24) = fr2 in
let (r30, r31, r32, r33, r34) = fr3 in
let (r40, r41, r42, r43, r44) = fr4 in
let (a0, a1, a2, a3, a4) = acc in
let v12120 = vec_interleave_low r20 r10 in
vec_interleave_low_lemma_uint64_4 r20 r10;
let v34340 = vec_interleave_low r40 r30 in
vec_interleave_low_lemma_uint64_4 r40 r30;
let r12340 = vec_interleave_low_n 2 v34340 v12120 in
vec_interleave_low_n_lemma_uint64_4_2 v34340 v12120;
let v12121 = vec_interleave_low r21 r11 in
vec_interleave_low_lemma_uint64_4 r21 r11;
let v34341 = vec_interleave_low r41 r31 in
vec_interleave_low_lemma_uint64_4 r41 r31;
let r12341 = vec_interleave_low_n 2 v34341 v12121 in
vec_interleave_low_n_lemma_uint64_4_2 v34341 v12121;
let v12122 = vec_interleave_low r22 r12 in
vec_interleave_low_lemma_uint64_4 r22 r12;
let v34342 = vec_interleave_low r42 r32 in
vec_interleave_low_lemma_uint64_4 r42 r32;
let r12342 = vec_interleave_low_n 2 v34342 v12122 in
vec_interleave_low_n_lemma_uint64_4_2 v34342 v12122;
let v12123 = vec_interleave_low r23 r13 in
vec_interleave_low_lemma_uint64_4 r23 r13;
let v34343 = vec_interleave_low r43 r33 in
vec_interleave_low_lemma_uint64_4 r43 r33;
let r12343 = vec_interleave_low_n 2 v34343 v12123 in
vec_interleave_low_n_lemma_uint64_4_2 v34343 v12123;
let v12124 = vec_interleave_low r24 r14 in
vec_interleave_low_lemma_uint64_4 r24 r14;
let v34344 = vec_interleave_low r44 r34 in
vec_interleave_low_lemma_uint64_4 r44 r34;
let r12344 = vec_interleave_low_n 2 v34344 v12124 in
vec_interleave_low_n_lemma_uint64_4_2 v34344 v12124;
let fr1234 = (r12340, r12341, r12342, r12343, r12344) in
eq_intro (feval5 fr1234) (create4 (feval5 fr4).[0] (feval5 fr3).[0] (feval5 fr2).[0] (feval5 fr).[0]);
let fr12345 = precomp_r5 #4 fr1234 in
let out = fmul_r5 #4 acc fr1234 fr12345 in
fmul_r5_eval_lemma acc fr1234 fr12345;
fmul_r5_fits_lemma acc fr1234 fr12345;
out
val lemma_fmul_r4_normalize51:
#m:scale32{m <= 2}
-> o:uint64xN 4{felem_fits1 o m} ->
Lemma
(let v00 = vec_interleave_high_n 2 o o in
let v10 = vec_add_mod o v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
felem_fits1 v20 (4 * m) /\
(uint64xN_v v20).[0] == (uint64xN_v o).[0] + (uint64xN_v o).[1] + (uint64xN_v o).[2] + (uint64xN_v o).[3])
let lemma_fmul_r4_normalize51 #m o =
let v00 = vec_interleave_high_n 2 o o in
vec_interleave_high_n_lemma_uint64_4_2 o o;
let (o0, o1, o2, o3) = ((vec_v o).[0], (vec_v o).[1], (vec_v o).[2], (vec_v o).[3]) in
assert (vec_v v00 == create4 o2 o3 o2 o3);
let v10 = vec_add_mod o v00 in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v o1 + v o3) (pow2 64);
assert (v (vec_v v10).[0] == v o0 + v o2);
assert (v (vec_v v10).[1] == v o1 + v o3);
let v10h = vec_interleave_high v10 v10 in
vec_interleave_high_lemma_uint64_4 v10 v10;
assert (v (vec_v v10h).[0] == v (vec_v v10).[1]);
let v20 = vec_add_mod v10 v10h in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2 + v o1 + v o3) (pow2 64)
val lemma_fmul_r4_normalize51_expand:
v2:felem5 4
-> out:felem5 4 ->
Lemma
(requires
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(uint64xN_v v20).[0] == (uint64xN_v o0).[0] + (uint64xN_v o0).[1] + (uint64xN_v o0).[2] + (uint64xN_v o0).[3] /\
(uint64xN_v v21).[0] == (uint64xN_v o1).[0] + (uint64xN_v o1).[1] + (uint64xN_v o1).[2] + (uint64xN_v o1).[3] /\
(uint64xN_v v22).[0] == (uint64xN_v o2).[0] + (uint64xN_v o2).[1] + (uint64xN_v o2).[2] + (uint64xN_v o2).[3] /\
(uint64xN_v v23).[0] == (uint64xN_v o3).[0] + (uint64xN_v o3).[1] + (uint64xN_v o3).[2] + (uint64xN_v o3).[3] /\
(uint64xN_v v24).[0] == (uint64xN_v o4).[0] + (uint64xN_v o4).[1] + (uint64xN_v o4).[2] + (uint64xN_v o4).[3]))
(ensures
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(feval5 v2).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 out).[0] (feval5 out).[1]) (feval5 out).[2]) (feval5 out).[3]))
let lemma_fmul_r4_normalize51_expand v2 out =
let (v20, v21, v22, v23, v24) = as_tup64_i v2 0 in
let (o0, o1, o2, o3, o4) = out in
calc (==) {
as_nat5 (v20, v21, v22, v23, v24) % Vec.prime;
(==) { }
(as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1) + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1))
(as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) Vec.prime }
((as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1)) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.modulo_distributivity (as_nat5 (as_tup64_i out 0)) (as_nat5 (as_tup64_i out 1)) Vec.prime }
(((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2))
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2)) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime) (as_nat5 (as_tup64_i out 2)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r ((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime)
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime;
};
assert ((feval5 v2).[0] ==
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime)
val fmul_r4_normalize51: a:felem5 4 ->
Pure (felem5 4)
(requires felem_fits5 a (1, 2, 1, 1, 2))
(ensures fun res ->
felem_fits5 res (4, 8, 4, 4, 8) /\
(feval5 res).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 a).[0] (feval5 a).[1]) (feval5 a).[2]) (feval5 a).[3])
let fmul_r4_normalize51 fa =
let (o0, o1, o2, o3, o4) = fa in
let v00 = vec_interleave_high_n 2 o0 o0 in
let v10 = vec_add_mod o0 v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
lemma_fmul_r4_normalize51 #1 o0;
let v01 = vec_interleave_high_n 2 o1 o1 in
let v11 = vec_add_mod o1 v01 in
let v11h = vec_interleave_high v11 v11 in
let v21 = vec_add_mod v11 v11h in
lemma_fmul_r4_normalize51 #2 o1;
let v02 = vec_interleave_high_n 2 o2 o2 in
let v12 = vec_add_mod o2 v02 in
let v12h = vec_interleave_high v12 v12 in
let v22 = vec_add_mod v12 v12h in
lemma_fmul_r4_normalize51 #1 o2;
let v03 = vec_interleave_high_n 2 o3 o3 in
let v13 = vec_add_mod o3 v03 in
let v13h = vec_interleave_high v13 v13 in
let v23 = vec_add_mod v13 v13h in
lemma_fmul_r4_normalize51 #1 o3;
let v04 = vec_interleave_high_n 2 o4 o4 in
let v14 = vec_add_mod o4 v04 in
let v14h = vec_interleave_high v14 v14 in
let v24 = vec_add_mod v14 v14h in
lemma_fmul_r4_normalize51 #2 o4;
let res = (v20, v21, v22, v23, v24) in
lemma_fmul_r4_normalize51_expand res fa;
res
val fmul_r4_normalize5_lemma:
acc:felem5 4
-> r:felem5 4
-> r_5:felem5 4
-> r4:felem5 4 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
r_5 == precomp_r5 r /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures
(let out = fmul_r4_normalize5 acc r r_5 r4 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_4 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r4_normalize5 acc r r_5 r4)]
#restart-solver
#push-options "--z3rlimit 500"
let fmul_r4_normalize5_lemma acc fr fr_5 fr4 =
let fr2 = fmul_r5 #4 fr fr fr_5 in
let fr3 = fmul_r5 #4 fr2 fr fr_5 in
let out = fmul_r4_normalize50 acc fr fr2 fr3 fr4 in
let v2 = fmul_r4_normalize51 out in
let res = carry_full_felem5 v2 in
carry_full_felem5_lemma v2
#pop-options
val load_felem5_lemma:
#w:lanes
-> lo:uint64xN w
-> hi:uint64xN w ->
Lemma
(let f = load_felem5 #w lo hi in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == createi #Vec.pfelem w
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i])) | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val load_felem5_lemma:
#w:lanes
-> lo:uint64xN w
-> hi:uint64xN w ->
Lemma
(let f = load_felem5 #w lo hi in
felem_fits5 f (1, 1, 1, 1, 1) /\
felem_less5 f (pow2 128) /\
feval5 f == createi #Vec.pfelem w
(fun i -> (uint64xN_v hi).[i] * pow2 64 + (uint64xN_v lo).[i])) | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.load_felem5_lemma | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | lo: Hacl.Spec.Poly1305.Field32xN.uint64xN w -> hi: Hacl.Spec.Poly1305.Field32xN.uint64xN w
-> FStar.Pervasives.Lemma
(ensures
(let f = Hacl.Spec.Poly1305.Field32xN.load_felem5 lo hi in
Hacl.Spec.Poly1305.Field32xN.felem_fits5 f (1, 1, 1, 1, 1) /\
Hacl.Spec.Poly1305.Field32xN.felem_less5 f (Prims.pow2 128) /\
Hacl.Spec.Poly1305.Field32xN.feval5 f ==
Lib.Sequence.createi w
(fun i ->
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v hi).[ i ] * Prims.pow2 64 +
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v lo).[ i ]))) | {
"end_col": 27,
"end_line": 654,
"start_col": 32,
"start_line": 634
} |
FStar.Pervasives.Lemma | val lemma_fmul_r4_normalize51_expand:
v2:felem5 4
-> out:felem5 4 ->
Lemma
(requires
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(uint64xN_v v20).[0] == (uint64xN_v o0).[0] + (uint64xN_v o0).[1] + (uint64xN_v o0).[2] + (uint64xN_v o0).[3] /\
(uint64xN_v v21).[0] == (uint64xN_v o1).[0] + (uint64xN_v o1).[1] + (uint64xN_v o1).[2] + (uint64xN_v o1).[3] /\
(uint64xN_v v22).[0] == (uint64xN_v o2).[0] + (uint64xN_v o2).[1] + (uint64xN_v o2).[2] + (uint64xN_v o2).[3] /\
(uint64xN_v v23).[0] == (uint64xN_v o3).[0] + (uint64xN_v o3).[1] + (uint64xN_v o3).[2] + (uint64xN_v o3).[3] /\
(uint64xN_v v24).[0] == (uint64xN_v o4).[0] + (uint64xN_v o4).[1] + (uint64xN_v o4).[2] + (uint64xN_v o4).[3]))
(ensures
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(feval5 v2).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 out).[0] (feval5 out).[1]) (feval5 out).[2]) (feval5 out).[3])) | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let lemma_fmul_r4_normalize51_expand v2 out =
let (v20, v21, v22, v23, v24) = as_tup64_i v2 0 in
let (o0, o1, o2, o3, o4) = out in
calc (==) {
as_nat5 (v20, v21, v22, v23, v24) % Vec.prime;
(==) { }
(as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1) + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1))
(as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) Vec.prime }
((as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1)) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.modulo_distributivity (as_nat5 (as_tup64_i out 0)) (as_nat5 (as_tup64_i out 1)) Vec.prime }
(((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2))
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2)) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime) (as_nat5 (as_tup64_i out 2)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r ((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime)
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime;
};
assert ((feval5 v2).[0] ==
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime) | val lemma_fmul_r4_normalize51_expand:
v2:felem5 4
-> out:felem5 4 ->
Lemma
(requires
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(uint64xN_v v20).[0] == (uint64xN_v o0).[0] + (uint64xN_v o0).[1] + (uint64xN_v o0).[2] + (uint64xN_v o0).[3] /\
(uint64xN_v v21).[0] == (uint64xN_v o1).[0] + (uint64xN_v o1).[1] + (uint64xN_v o1).[2] + (uint64xN_v o1).[3] /\
(uint64xN_v v22).[0] == (uint64xN_v o2).[0] + (uint64xN_v o2).[1] + (uint64xN_v o2).[2] + (uint64xN_v o2).[3] /\
(uint64xN_v v23).[0] == (uint64xN_v o3).[0] + (uint64xN_v o3).[1] + (uint64xN_v o3).[2] + (uint64xN_v o3).[3] /\
(uint64xN_v v24).[0] == (uint64xN_v o4).[0] + (uint64xN_v o4).[1] + (uint64xN_v o4).[2] + (uint64xN_v o4).[3]))
(ensures
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(feval5 v2).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 out).[0] (feval5 out).[1]) (feval5 out).[2]) (feval5 out).[3]))
let lemma_fmul_r4_normalize51_expand v2 out = | false | null | true | let v20, v21, v22, v23, v24 = as_tup64_i v2 0 in
let o0, o1, o2, o3, o4 = out in
calc ( == ) {
as_nat5 (v20, v21, v22, v23, v24) % Vec.prime;
( == ) { () }
(as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1) + as_nat5 (as_tup64_i out 2) +
as_nat5 (as_tup64_i out 3)) %
Vec.prime;
( == ) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (as_nat5 (as_tup64_i out 0) +
as_nat5 (as_tup64_i out 1))
(as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3))
Vec.prime }
((as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1)) % Vec.prime +
as_nat5 (as_tup64_i out 2) +
as_nat5 (as_tup64_i out 3)) %
Vec.prime;
( == ) { FStar.Math.Lemmas.modulo_distributivity (as_nat5 (as_tup64_i out 0))
(as_nat5 (as_tup64_i out 1))
Vec.prime }
(((feval5 out).[ 0 ] + (feval5 out).[ 1 ]) % Vec.prime + as_nat5 (as_tup64_i out 2) +
as_nat5 (as_tup64_i out 3)) %
Vec.prime;
( == ) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (((feval5 out).[ 0 ] + (feval5 out).[ 1 ]) %
Vec.prime +
as_nat5 (as_tup64_i out 2))
(as_nat5 (as_tup64_i out 3))
Vec.prime }
((((feval5 out).[ 0 ] + (feval5 out).[ 1 ]) % Vec.prime + as_nat5 (as_tup64_i out 2)) % Vec.prime +
as_nat5 (as_tup64_i out 3)) %
Vec.prime;
( == ) { FStar.Math.Lemmas.lemma_mod_plus_distr_r (((feval5 out).[ 0 ] + (feval5 out).[ 1 ]) %
Vec.prime)
(as_nat5 (as_tup64_i out 2))
Vec.prime }
((((feval5 out).[ 0 ] + (feval5 out).[ 1 ]) % Vec.prime + (feval5 out).[ 2 ]) % Vec.prime +
as_nat5 (as_tup64_i out 3)) %
Vec.prime;
( == ) { FStar.Math.Lemmas.lemma_mod_plus_distr_r ((((feval5 out).[ 0 ] + (feval5 out).[ 1 ]) %
Vec.prime +
(feval5 out).[ 2 ]) %
Vec.prime)
(as_nat5 (as_tup64_i out 3))
Vec.prime }
((((feval5 out).[ 0 ] + (feval5 out).[ 1 ]) % Vec.prime + (feval5 out).[ 2 ]) % Vec.prime +
(feval5 out).[ 3 ]) %
Vec.prime;
};
assert ((feval5 v2).[ 0 ] ==
((((feval5 out).[ 0 ] + (feval5 out).[ 1 ]) % Vec.prime + (feval5 out).[ 2 ]) % Vec.prime +
(feval5 out).[ 3 ]) %
Vec.prime) | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [
"lemma"
] | [
"Hacl.Spec.Poly1305.Field32xN.felem5",
"Lib.IntTypes.uint64",
"Hacl.Spec.Poly1305.Field32xN.uint64xN",
"Prims._assert",
"Prims.eq2",
"Prims.int",
"Lib.Sequence.op_String_Access",
"Hacl.Spec.Poly1305.Vec.pfelem",
"Hacl.Spec.Poly1305.Field32xN.feval5",
"Prims.op_Modulus",
"Prims.op_Addition",
"Hacl.Spec.Poly1305.Vec.prime",
"Prims.unit",
"FStar.Calc.calc_finish",
"Hacl.Spec.Poly1305.Field32xN.as_nat5",
"FStar.Pervasives.Native.Mktuple5",
"Prims.Cons",
"FStar.Preorder.relation",
"Prims.Nil",
"FStar.Calc.calc_step",
"Hacl.Spec.Poly1305.Field32xN.as_tup64_i",
"FStar.Calc.calc_init",
"FStar.Calc.calc_pack",
"Prims.squash",
"FStar.Math.Lemmas.lemma_mod_plus_distr_l",
"FStar.Math.Lemmas.modulo_distributivity",
"FStar.Math.Lemmas.lemma_mod_plus_distr_r",
"Hacl.Spec.Poly1305.Field32xN.tup64_5"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w)
val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)]
let fadd5_fits_lemma #w f1 f2 =
let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24
val fadd5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (feval5 (fadd5 f1 f2) == map2 Vec.pfadd (feval5 f1) (feval5 f2))
[SMTPat (fadd5 f1 f2)]
let fadd5_eval_lemma #w f1 f2 =
let o = fadd5 f1 f2 in
FStar.Classical.forall_intro (fadd5_eval_lemma_i f1 f2);
eq_intro (feval5 o) (map2 Vec.pfadd (feval5 f1) (feval5 f2))
val mul_felem5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_wide_fits5 (mul_felem5 #w f1 r r5) (126, 102, 78, 54, 30))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_fits_lemma #w f1 r r5 =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34)
val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i])
let mul_felem5_eval_lemma_i #w f1 r r5 i =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_eval_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
assert ((fas_nat5 (a0,a1,a2,a3,a4)).[i] == (uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i]);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_eval_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
assert ((fas_nat5 (a10,a11,a12,a13,a14)).[i] == (fas_nat5 (a0,a1,a2,a3,a4)).[i] + (uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i]);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_eval_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
assert ((fas_nat5 (a20,a21,a22,a23,a24)).[i] == (fas_nat5 (a10,a11,a12,a13,a14)).[i] + (uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i]);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
assert ((fas_nat5 (a30,a31,a32,a33,a34)).[i] == (fas_nat5 (a20,a21,a22,a23,a24)).[i] + (uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i]);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] == (fas_nat5 (a30,a31,a32,a33,a34)).[i] + (uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] ==
(uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i] +
(uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i] +
(uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i] +
(uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i] +
(uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
mul_felem5_eval_as_tup64 #w f1 r r5 i;
mul_felem5_lemma (as_tup64_i f1 i) (as_tup64_i r i)
val mul_felem5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (mul_felem5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_eval_lemma #w f1 r r5 =
let tmp = map2 (Vec.pfmul) (feval5 f1) (feval5 r) in
FStar.Classical.forall_intro (mul_felem5_eval_lemma_i #w f1 r r5);
eq_intro (feval5 (mul_felem5 #w f1 r r5)) tmp
val fmul_r5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_fits5 (fmul_r5 #w f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_fits_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_fits_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fmul_r5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fmul_r5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_eval_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_eval_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_fits_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_fits_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_fits_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fadd_mul_r5_eval_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fadd_mul_r5 acc f1 r r5) ==
map2 (Vec.pfmul) (map2 (Vec.pfadd) (feval5 acc) (feval5 f1)) (feval5 r))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_eval_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_eval_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_eval_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val reduce_felem5_eval_lemma:
#w:lanes
-> f:felem5 w{felem_fits5 f (2, 2, 2, 2, 2)} ->
Lemma
(felem_fits5 (reduce_felem5 f) (1, 1, 1, 1, 1) /\
(feval5 f).[0] == (fas_nat5 (reduce_felem5 f)).[0])
[SMTPat (reduce_felem5 f)]
let reduce_felem5_eval_lemma #w f =
carry_full_felem5_eval_lemma f;
carry_full_felem5_fits_lemma f;
let f = carry_full_felem5 f in
carry_reduce_felem5_lemma #w f;
subtract_p5_felem5_lemma #w (carry_full_felem5 f)
val fmul_r2_normalize50:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures fun a ->
let fr21 = create2 (feval5 r2).[0] (feval5 r).[0] in
feval5 a == Vec.fmul (feval5 acc) fr21 /\
felem_fits5 a (1, 2, 1, 1, 2))
let fmul_r2_normalize50 (a0, a1, a2, a3, a4) (r0, r1, r2, r3, r4) (r20, r21, r22, r23, r24) =
let r210 = vec_interleave_low r20 r0 in
vec_interleave_low_lemma2 r20 r0;
let r211 = vec_interleave_low r21 r1 in
vec_interleave_low_lemma2 r21 r1;
let r212 = vec_interleave_low r22 r2 in
vec_interleave_low_lemma2 r22 r2;
let r213 = vec_interleave_low r23 r3 in
vec_interleave_low_lemma2 r23 r3;
let r214 = vec_interleave_low r24 r4 in
vec_interleave_low_lemma2 r24 r4;
let acc = (a0, a1, a2, a3, a4) in
let fr = (r0, r1, r2, r3, r4) in
let fr2 = (r20, r21, r22, r23, r24) in
assert ((feval5 fr2).[0] == Vec.pfmul ((feval5 fr).[0]) ((feval5 fr).[0]));
let fr21 = (r210, r211, r212, r213, r214) in
eq_intro (feval5 fr21) (create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (feval5 fr21 == create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (felem_fits5 fr21 (2, 2, 2, 2, 2));
let fr215 = precomp_r5 #2 fr21 in
let a = fmul_r5 #2 acc fr21 fr215 in
fmul_r5_eval_lemma acc fr21 fr215;
fmul_r5_fits_lemma acc fr21 fr215;
assert (feval5 a == Vec.fmul (feval5 acc) (feval5 fr21));
assert (felem_fits5 a (1, 2, 1, 1, 2));
a
#push-options "--z3rlimit 150"
val fmul_r2_normalize51:
a:felem5 2
-> fa1:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 a (1, 2, 1, 1, 2) /\
felem_fits5 fa1 (1, 2, 1, 1, 2) /\
feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1])
(ensures fun out ->
(feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1] /\
felem_fits5 out (2, 4, 2, 2, 4))
let fmul_r2_normalize51 a fa1 =
let (a0, a1, a2, a3, a4) = a in
let (a10, a11, a12, a13, a14) = fa1 in
let o0 = vec_add_mod a0 a10 in
let o1 = vec_add_mod a1 a11 in
let o2 = vec_add_mod a2 a12 in
let o3 = vec_add_mod a3 a13 in
let o4 = vec_add_mod a4 a14 in
let out = (o0, o1, o2, o3, o4) in
let (a0, a1, a2, a3, a4) = as_tup64_i a 0 in
let (a10, a11, a12, a13, a14) = as_tup64_i fa1 0 in
let (o0, o1, o2, o3, o4) = as_tup64_i out 0 in
FStar.Math.Lemmas.modulo_lemma (v a0 + v a10) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a1 + v a11) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a2 + v a12) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a3 + v a13) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a4 + v a14) (pow2 64);
assert (felem_fits5 out (2, 4, 2, 2, 4));
calc (==) {
((feval5 a).[0] + (feval5 a).[1]) % Vec.prime;
(==) { }
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime + as_nat5 (a10, a11, a12, a13, a14) % Vec.prime) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (a0, a1, a2, a3, a4)) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime;
FStar.Math.Lemmas.lemma_mod_plus_distr_r
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime }
(as_nat5 (a0, a1, a2, a3, a4) + as_nat5 (a10, a11, a12, a13, a14)) % Vec.prime;
(==) { }
(feval5 out).[0];
};
out
#pop-options
val fmul_r2_normalize5_lemma:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures
(let out = fmul_r2_normalize5 acc r r2 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_2 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r2_normalize5 acc r r2)]
let fmul_r2_normalize5_lemma acc r r2 =
let a = fmul_r2_normalize50 acc r r2 in
let (a0, a1, a2, a3, a4) = a in
let a10 = vec_interleave_high a0 a0 in
vec_interleave_high_lemma2 a0 a0;
let a11 = vec_interleave_high a1 a1 in
vec_interleave_high_lemma2 a1 a1;
let a12 = vec_interleave_high a2 a2 in
vec_interleave_high_lemma2 a2 a2;
let a13 = vec_interleave_high a3 a3 in
vec_interleave_high_lemma2 a3 a3;
let a14 = vec_interleave_high a4 a4 in
vec_interleave_high_lemma2 a4 a4;
let fa1 = (a10, a11, a12, a13, a14) in
eq_intro (feval5 fa1) (create2 (feval5 a).[1] (feval5 a).[1]);
assert (feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1]);
assert (felem_fits5 fa1 (1, 2, 1, 1, 2));
let out = fmul_r2_normalize51 a fa1 in
assert ((feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1]);
let res = carry_full_felem5 out in
carry_full_felem5_lemma out
val fmul_r4_normalize50:
acc:felem5 4
-> r:felem5 4
-> r2:felem5 4
-> r3:felem5 4
-> r4:felem5 4 ->
Pure (felem5 4)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
felem_fits5 r3 (2, 2, 2, 2, 2) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.fmul (feval5 r) (feval5 r) /\
feval5 r3 == Vec.fmul (feval5 r2) (feval5 r) /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures fun out ->
let fr4321 = create4 (feval5 r4).[0] (feval5 r3).[0] (feval5 r2).[0] (feval5 r).[0] in
feval5 out == Vec.fmul (feval5 acc) fr4321 /\
felem_fits5 out (1, 2, 1, 1, 2))
let fmul_r4_normalize50 acc fr fr2 fr3 fr4 =
let (r10, r11, r12, r13, r14) = fr in
let (r20, r21, r22, r23, r24) = fr2 in
let (r30, r31, r32, r33, r34) = fr3 in
let (r40, r41, r42, r43, r44) = fr4 in
let (a0, a1, a2, a3, a4) = acc in
let v12120 = vec_interleave_low r20 r10 in
vec_interleave_low_lemma_uint64_4 r20 r10;
let v34340 = vec_interleave_low r40 r30 in
vec_interleave_low_lemma_uint64_4 r40 r30;
let r12340 = vec_interleave_low_n 2 v34340 v12120 in
vec_interleave_low_n_lemma_uint64_4_2 v34340 v12120;
let v12121 = vec_interleave_low r21 r11 in
vec_interleave_low_lemma_uint64_4 r21 r11;
let v34341 = vec_interleave_low r41 r31 in
vec_interleave_low_lemma_uint64_4 r41 r31;
let r12341 = vec_interleave_low_n 2 v34341 v12121 in
vec_interleave_low_n_lemma_uint64_4_2 v34341 v12121;
let v12122 = vec_interleave_low r22 r12 in
vec_interleave_low_lemma_uint64_4 r22 r12;
let v34342 = vec_interleave_low r42 r32 in
vec_interleave_low_lemma_uint64_4 r42 r32;
let r12342 = vec_interleave_low_n 2 v34342 v12122 in
vec_interleave_low_n_lemma_uint64_4_2 v34342 v12122;
let v12123 = vec_interleave_low r23 r13 in
vec_interleave_low_lemma_uint64_4 r23 r13;
let v34343 = vec_interleave_low r43 r33 in
vec_interleave_low_lemma_uint64_4 r43 r33;
let r12343 = vec_interleave_low_n 2 v34343 v12123 in
vec_interleave_low_n_lemma_uint64_4_2 v34343 v12123;
let v12124 = vec_interleave_low r24 r14 in
vec_interleave_low_lemma_uint64_4 r24 r14;
let v34344 = vec_interleave_low r44 r34 in
vec_interleave_low_lemma_uint64_4 r44 r34;
let r12344 = vec_interleave_low_n 2 v34344 v12124 in
vec_interleave_low_n_lemma_uint64_4_2 v34344 v12124;
let fr1234 = (r12340, r12341, r12342, r12343, r12344) in
eq_intro (feval5 fr1234) (create4 (feval5 fr4).[0] (feval5 fr3).[0] (feval5 fr2).[0] (feval5 fr).[0]);
let fr12345 = precomp_r5 #4 fr1234 in
let out = fmul_r5 #4 acc fr1234 fr12345 in
fmul_r5_eval_lemma acc fr1234 fr12345;
fmul_r5_fits_lemma acc fr1234 fr12345;
out
val lemma_fmul_r4_normalize51:
#m:scale32{m <= 2}
-> o:uint64xN 4{felem_fits1 o m} ->
Lemma
(let v00 = vec_interleave_high_n 2 o o in
let v10 = vec_add_mod o v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
felem_fits1 v20 (4 * m) /\
(uint64xN_v v20).[0] == (uint64xN_v o).[0] + (uint64xN_v o).[1] + (uint64xN_v o).[2] + (uint64xN_v o).[3])
let lemma_fmul_r4_normalize51 #m o =
let v00 = vec_interleave_high_n 2 o o in
vec_interleave_high_n_lemma_uint64_4_2 o o;
let (o0, o1, o2, o3) = ((vec_v o).[0], (vec_v o).[1], (vec_v o).[2], (vec_v o).[3]) in
assert (vec_v v00 == create4 o2 o3 o2 o3);
let v10 = vec_add_mod o v00 in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v o1 + v o3) (pow2 64);
assert (v (vec_v v10).[0] == v o0 + v o2);
assert (v (vec_v v10).[1] == v o1 + v o3);
let v10h = vec_interleave_high v10 v10 in
vec_interleave_high_lemma_uint64_4 v10 v10;
assert (v (vec_v v10h).[0] == v (vec_v v10).[1]);
let v20 = vec_add_mod v10 v10h in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2 + v o1 + v o3) (pow2 64)
val lemma_fmul_r4_normalize51_expand:
v2:felem5 4
-> out:felem5 4 ->
Lemma
(requires
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(uint64xN_v v20).[0] == (uint64xN_v o0).[0] + (uint64xN_v o0).[1] + (uint64xN_v o0).[2] + (uint64xN_v o0).[3] /\
(uint64xN_v v21).[0] == (uint64xN_v o1).[0] + (uint64xN_v o1).[1] + (uint64xN_v o1).[2] + (uint64xN_v o1).[3] /\
(uint64xN_v v22).[0] == (uint64xN_v o2).[0] + (uint64xN_v o2).[1] + (uint64xN_v o2).[2] + (uint64xN_v o2).[3] /\
(uint64xN_v v23).[0] == (uint64xN_v o3).[0] + (uint64xN_v o3).[1] + (uint64xN_v o3).[2] + (uint64xN_v o3).[3] /\
(uint64xN_v v24).[0] == (uint64xN_v o4).[0] + (uint64xN_v o4).[1] + (uint64xN_v o4).[2] + (uint64xN_v o4).[3]))
(ensures
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(feval5 v2).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 out).[0] (feval5 out).[1]) (feval5 out).[2]) (feval5 out).[3])) | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val lemma_fmul_r4_normalize51_expand:
v2:felem5 4
-> out:felem5 4 ->
Lemma
(requires
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(uint64xN_v v20).[0] == (uint64xN_v o0).[0] + (uint64xN_v o0).[1] + (uint64xN_v o0).[2] + (uint64xN_v o0).[3] /\
(uint64xN_v v21).[0] == (uint64xN_v o1).[0] + (uint64xN_v o1).[1] + (uint64xN_v o1).[2] + (uint64xN_v o1).[3] /\
(uint64xN_v v22).[0] == (uint64xN_v o2).[0] + (uint64xN_v o2).[1] + (uint64xN_v o2).[2] + (uint64xN_v o2).[3] /\
(uint64xN_v v23).[0] == (uint64xN_v o3).[0] + (uint64xN_v o3).[1] + (uint64xN_v o3).[2] + (uint64xN_v o3).[3] /\
(uint64xN_v v24).[0] == (uint64xN_v o4).[0] + (uint64xN_v o4).[1] + (uint64xN_v o4).[2] + (uint64xN_v o4).[3]))
(ensures
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(feval5 v2).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 out).[0] (feval5 out).[1]) (feval5 out).[2]) (feval5 out).[3])) | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.lemma_fmul_r4_normalize51_expand | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | v2: Hacl.Spec.Poly1305.Field32xN.felem5 4 -> out: Hacl.Spec.Poly1305.Field32xN.felem5 4
-> FStar.Pervasives.Lemma
(requires
(let _ = v2 in
(let FStar.Pervasives.Native.Mktuple5 #_ #_ #_ #_ #_ v20 v21 v22 v23 v24 = _ in
let _ = out in
(let FStar.Pervasives.Native.Mktuple5 #_ #_ #_ #_ #_ o0 o1 o2 o3 o4 = _ in
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v v20).[ 0 ] ==
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o0).[ 0 ] +
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o0).[ 1 ] +
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o0).[ 2 ] +
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o0).[ 3 ] /\
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v v21).[ 0 ] ==
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o1).[ 0 ] +
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o1).[ 1 ] +
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o1).[ 2 ] +
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o1).[ 3 ] /\
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v v22).[ 0 ] ==
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o2).[ 0 ] +
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o2).[ 1 ] +
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o2).[ 2 ] +
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o2).[ 3 ] /\
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v v23).[ 0 ] ==
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o3).[ 0 ] +
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o3).[ 1 ] +
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o3).[ 2 ] +
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o3).[ 3 ] /\
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v v24).[ 0 ] ==
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o4).[ 0 ] +
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o4).[ 1 ] +
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o4).[ 2 ] +
(Hacl.Spec.Poly1305.Field32xN.uint64xN_v o4).[ 3 ])
<:
Type0)
<:
Type0))
(ensures
(let _ = v2 in
(let FStar.Pervasives.Native.Mktuple5 #_ #_ #_ #_ #_ _ _ _ _ _ = _ in
let _ = out in
(let FStar.Pervasives.Native.Mktuple5 #_ #_ #_ #_ #_ _ _ _ _ _ = _ in
(Hacl.Spec.Poly1305.Field32xN.feval5 v2).[ 0 ] ==
Hacl.Spec.Poly1305.Vec.pfadd (Hacl.Spec.Poly1305.Vec.pfadd (Hacl.Spec.Poly1305.Vec.pfadd
(Hacl.Spec.Poly1305.Field32xN.feval5 out).[ 0 ]
(Hacl.Spec.Poly1305.Field32xN.feval5 out).[ 1 ])
(Hacl.Spec.Poly1305.Field32xN.feval5 out).[ 2 ])
(Hacl.Spec.Poly1305.Field32xN.feval5 out).[ 3 ])
<:
Type0)
<:
Type0)) | {
"end_col": 120,
"end_line": 548,
"start_col": 45,
"start_line": 525
} |
FStar.Pervasives.Lemma | val fmul_r4_normalize5_lemma:
acc:felem5 4
-> r:felem5 4
-> r_5:felem5 4
-> r4:felem5 4 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
r_5 == precomp_r5 r /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures
(let out = fmul_r4_normalize5 acc r r_5 r4 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_4 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r4_normalize5 acc r r_5 r4)] | [
{
"abbrev": true,
"full_module": "Hacl.Spec.Poly1305.Vec",
"short_module": "Vec"
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas2",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas1",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Poly1305.Field32xN.Lemmas0",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.Sequence",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib.IntTypes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "Hacl.Spec.Poly1305.Field32xN",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let fmul_r4_normalize5_lemma acc fr fr_5 fr4 =
let fr2 = fmul_r5 #4 fr fr fr_5 in
let fr3 = fmul_r5 #4 fr2 fr fr_5 in
let out = fmul_r4_normalize50 acc fr fr2 fr3 fr4 in
let v2 = fmul_r4_normalize51 out in
let res = carry_full_felem5 v2 in
carry_full_felem5_lemma v2 | val fmul_r4_normalize5_lemma:
acc:felem5 4
-> r:felem5 4
-> r_5:felem5 4
-> r4:felem5 4 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
r_5 == precomp_r5 r /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures
(let out = fmul_r4_normalize5 acc r r_5 r4 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_4 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r4_normalize5 acc r r_5 r4)]
let fmul_r4_normalize5_lemma acc fr fr_5 fr4 = | false | null | true | let fr2 = fmul_r5 #4 fr fr fr_5 in
let fr3 = fmul_r5 #4 fr2 fr fr_5 in
let out = fmul_r4_normalize50 acc fr fr2 fr3 fr4 in
let v2 = fmul_r4_normalize51 out in
let res = carry_full_felem5 v2 in
carry_full_felem5_lemma v2 | {
"checked_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst.checked",
"dependencies": [
"prims.fst.checked",
"Lib.Sequence.fsti.checked",
"Lib.IntVector.fsti.checked",
"Lib.IntTypes.fsti.checked",
"Hacl.Spec.Poly1305.Vec.fst.checked",
"Hacl.Spec.Poly1305.Field32xN.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas2.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas1.fst.checked",
"Hacl.Poly1305.Field32xN.Lemmas0.fst.checked",
"Hacl.Impl.Poly1305.Lemmas.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": false,
"source_file": "Hacl.Spec.Poly1305.Field32xN.Lemmas.fst"
} | [
"lemma"
] | [
"Hacl.Spec.Poly1305.Field32xN.felem5",
"Hacl.Poly1305.Field32xN.Lemmas1.carry_full_felem5_lemma",
"Hacl.Spec.Poly1305.Field32xN.carry_full_felem5",
"Hacl.Spec.Poly1305.Field32xN.Lemmas.fmul_r4_normalize51",
"Hacl.Spec.Poly1305.Field32xN.Lemmas.fmul_r4_normalize50",
"Hacl.Spec.Poly1305.Field32xN.fmul_r5",
"Prims.unit"
] | [] | module Hacl.Spec.Poly1305.Field32xN.Lemmas
open Lib.IntTypes
open Lib.IntVector
open Lib.Sequence
open FStar.Mul
open Hacl.Spec.Poly1305.Field32xN
open Hacl.Poly1305.Field32xN.Lemmas0
open Hacl.Poly1305.Field32xN.Lemmas1
open Hacl.Poly1305.Field32xN.Lemmas2
module Vec = Hacl.Spec.Poly1305.Vec
#set-options "--z3rlimit 100 --max_fuel 0 --initial_ifuel 1 --max_ifuel 1 --using_facts_from '* -FStar.Seq'"
val lemma_feval_is_fas_nat_i:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)}
-> i:size_nat{i < w} ->
Lemma ((feval5 f).[i] == (fas_nat5 f).[i])
let lemma_feval_is_fas_nat_i #w f i =
assert_norm (pow2 128 < Vec.prime);
assert ((feval5 f).[i] == (as_nat5 (transpose f).[i]) % Vec.prime);
FStar.Math.Lemmas.modulo_lemma (as_nat5 (transpose f).[i]) Vec.prime
val lemma_feval_is_fas_nat:
#w:lanes
-> f:felem5 w{felem_less5 f (pow2 128)} ->
Lemma (forall (i:nat). i < w ==> (fas_nat5 f).[i] == (feval5 f).[i])
let lemma_feval_is_fas_nat #w f =
FStar.Classical.forall_intro (lemma_feval_is_fas_nat_i #w f)
val precomp_r5_fits_lemma:
#w:lanes
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (5, 5, 5, 5, 5))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_fits_lemma2:
#w:lanes
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)} ->
Lemma (felem_fits5 (precomp_r5 #w r) (10, 10, 10, 10, 10))
[SMTPat (precomp_r5 #w r)]
let precomp_r5_fits_lemma2 #w r =
FStar.Classical.forall_intro (precomp_r5_as_tup64 #w r)
val precomp_r5_zeros: w:lanes -> Lemma
(let r = (zero w, zero w, zero w, zero w, zero w) in
precomp_r5 r == (zero w, zero w, zero w, zero w, zero w))
let precomp_r5_zeros w =
let r = (zero w, zero w, zero w, zero w, zero w) in
let (r0, r1, r2, r3, r4) = precomp_r5 r in
let aux (i:nat{i < w}) : Lemma ((vec_v (vec_smul_mod (zero w) (u64 5))).[i] == u64 0) = () in
Classical.forall_intro aux;
eq_intro (vec_v (vec_smul_mod (zero w) (u64 5))) (vec_v (zero w));
vecv_extensionality (vec_smul_mod (zero w) (u64 5)) (zero w)
val fadd5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (felem_fits5 (fadd5 f1 f2) (3,3,3,3,3))
[SMTPat (fadd5 f1 f2)]
let fadd5_fits_lemma #w f1 f2 =
let (f10, f11, f12, f13, f14) = f1 in
let (f20, f21, f22, f23, f24) = f2 in
let o = fadd5 f1 f2 in
vec_add_mod_lemma f10 f20;
vec_add_mod_lemma f11 f21;
vec_add_mod_lemma f12 f22;
vec_add_mod_lemma f13 f23;
vec_add_mod_lemma f14 f24
val fadd5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (2,2,2,2,2)}
-> f2:felem5 w{felem_fits5 f2 (1,1,1,1,1)} ->
Lemma (feval5 (fadd5 f1 f2) == map2 Vec.pfadd (feval5 f1) (feval5 f2))
[SMTPat (fadd5 f1 f2)]
let fadd5_eval_lemma #w f1 f2 =
let o = fadd5 f1 f2 in
FStar.Classical.forall_intro (fadd5_eval_lemma_i f1 f2);
eq_intro (feval5 o) (map2 Vec.pfadd (feval5 f1) (feval5 f2))
val mul_felem5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_wide_fits5 (mul_felem5 #w f1 r r5) (126, 102, 78, 54, 30))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_fits_lemma #w f1 r r5 =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34)
val mul_felem5_eval_lemma_i:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r}
-> i:nat{i < w} ->
Lemma ((feval5 (mul_felem5 #w f1 r r5)).[i] == (feval5 f1).[i] `Vec.pfmul` (feval5 r).[i])
let mul_felem5_eval_lemma_i #w f1 r r5 i =
let (r0, r1, r2, r3, r4) = r in
let (f10, f11, f12, f13, f14) = f1 in
let (r50, r51, r52, r53, r54) = r5 in
let (a0,a1,a2,a3,a4) = smul_felem5 #w f10 (r0,r1,r2,r3,r4) in
smul_felem5_eval_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
smul_felem5_fits_lemma #w #3 #(2,2,2,2,2) f10 (r0,r1,r2,r3,r4);
assert ((fas_nat5 (a0,a1,a2,a3,a4)).[i] == (uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i]);
let (a10,a11,a12,a13,a14) = smul_add_felem5 #w f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4) in
smul_add_felem5_eval_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
smul_add_felem5_fits_lemma #w #3 #(10,2,2,2,2) #(6,6,6,6,6) f11 (r54,r0,r1,r2,r3) (a0,a1,a2,a3,a4);
assert ((fas_nat5 (a10,a11,a12,a13,a14)).[i] == (fas_nat5 (a0,a1,a2,a3,a4)).[i] + (uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i]);
let (a20,a21,a22,a23,a24) = smul_add_felem5 #w f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14) in
smul_add_felem5_eval_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
smul_add_felem5_fits_lemma #w #3 #(10,10,2,2,2) #(36,12,12,12,12) f12 (r53,r54,r0,r1,r2) (a10,a11,a12,a13,a14);
assert ((fas_nat5 (a20,a21,a22,a23,a24)).[i] == (fas_nat5 (a10,a11,a12,a13,a14)).[i] + (uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i]);
let (a30,a31,a32,a33,a34) = smul_add_felem5 #w f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,2,2) #(66,42,18,18,18) f13 (r52,r53,r54,r0,r1) (a20,a21,a22,a23,a24);
assert ((fas_nat5 (a30,a31,a32,a33,a34)).[i] == (fas_nat5 (a20,a21,a22,a23,a24)).[i] + (uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i]);
let (a40,a41,a42,a43,a44) = smul_add_felem5 #w f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34) in
smul_add_felem5_eval_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
smul_add_felem5_fits_lemma #w #3 #(10,10,10,10,2) #(96,72,48,24,24) f14 (r51,r52,r53,r54,r0) (a30,a31,a32,a33,a34);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] == (fas_nat5 (a30,a31,a32,a33,a34)).[i] + (uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
assert ((fas_nat5 (a40,a41,a42,a43,a44)).[i] ==
(uint64xN_v f10).[i] * (fas_nat5 (r0,r1,r2,r3,r4)).[i] +
(uint64xN_v f11).[i] * (fas_nat5 (r54,r0,r1,r2,r3)).[i] +
(uint64xN_v f12).[i] * (fas_nat5 (r53,r54,r0,r1,r2)).[i] +
(uint64xN_v f13).[i] * (fas_nat5 (r52,r53,r54,r0,r1)).[i] +
(uint64xN_v f14).[i] * (fas_nat5 (r51,r52,r53,r54,r0)).[i]);
mul_felem5_eval_as_tup64 #w f1 r r5 i;
mul_felem5_lemma (as_tup64_i f1 i) (as_tup64_i r i)
val mul_felem5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (mul_felem5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (mul_felem5 #w f1 r r5)]
let mul_felem5_eval_lemma #w f1 r r5 =
let tmp = map2 (Vec.pfmul) (feval5 f1) (feval5 r) in
FStar.Classical.forall_intro (mul_felem5_eval_lemma_i #w f1 r r5);
eq_intro (feval5 (mul_felem5 #w f1 r r5)) tmp
val fmul_r5_fits_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10)} ->
Lemma (felem_fits5 (fmul_r5 #w f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_fits_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_fits_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fmul_r5_eval_lemma:
#w:lanes
-> f1:felem5 w{felem_fits5 f1 (3, 3, 3, 3, 3)}
-> r:felem5 w{felem_fits5 r (2, 2, 2, 2, 2)}
-> r5:felem5 w{felem_fits5 r5 (10, 10, 10, 10, 10) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fmul_r5 #w f1 r r5) == map2 (Vec.pfmul) (feval5 f1) (feval5 r))
[SMTPat (fmul_r5 #w f1 r r5)]
let fmul_r5_eval_lemma #w f1 r r5 =
let tmp = mul_felem5 f1 r r5 in
mul_felem5_eval_lemma #w f1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val fadd_mul_r5_fits_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5)} ->
Lemma (felem_fits5 (fadd_mul_r5 acc f1 r r5) (1, 2, 1, 1, 2))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_fits_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_fits_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_fits_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_fits_lemma #w tmp
val fadd_mul_r5_eval_lemma:
#w:lanes
-> acc:felem5 w{felem_fits5 acc (2, 2, 2, 2, 2)}
-> f1:felem5 w{felem_fits5 f1 (1, 1, 1, 1, 1)}
-> r:felem5 w{felem_fits5 r (1, 1, 1, 1, 1)}
-> r5:felem5 w{felem_fits5 r5 (5, 5, 5, 5, 5) /\ r5 == precomp_r5 r} ->
Lemma (feval5 (fadd_mul_r5 acc f1 r r5) ==
map2 (Vec.pfmul) (map2 (Vec.pfadd) (feval5 acc) (feval5 f1)) (feval5 r))
[SMTPat (fadd_mul_r5 acc f1 r r5)]
let fadd_mul_r5_eval_lemma #w acc f1 r r5 =
let acc1 = fadd5 acc f1 in
fadd5_eval_lemma #w acc f1;
let tmp = mul_felem5 acc1 r r5 in
mul_felem5_eval_lemma #w acc1 r r5;
let res = carry_wide_felem5 tmp in
carry_wide_felem5_eval_lemma #w tmp
val reduce_felem5_eval_lemma:
#w:lanes
-> f:felem5 w{felem_fits5 f (2, 2, 2, 2, 2)} ->
Lemma
(felem_fits5 (reduce_felem5 f) (1, 1, 1, 1, 1) /\
(feval5 f).[0] == (fas_nat5 (reduce_felem5 f)).[0])
[SMTPat (reduce_felem5 f)]
let reduce_felem5_eval_lemma #w f =
carry_full_felem5_eval_lemma f;
carry_full_felem5_fits_lemma f;
let f = carry_full_felem5 f in
carry_reduce_felem5_lemma #w f;
subtract_p5_felem5_lemma #w (carry_full_felem5 f)
val fmul_r2_normalize50:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures fun a ->
let fr21 = create2 (feval5 r2).[0] (feval5 r).[0] in
feval5 a == Vec.fmul (feval5 acc) fr21 /\
felem_fits5 a (1, 2, 1, 1, 2))
let fmul_r2_normalize50 (a0, a1, a2, a3, a4) (r0, r1, r2, r3, r4) (r20, r21, r22, r23, r24) =
let r210 = vec_interleave_low r20 r0 in
vec_interleave_low_lemma2 r20 r0;
let r211 = vec_interleave_low r21 r1 in
vec_interleave_low_lemma2 r21 r1;
let r212 = vec_interleave_low r22 r2 in
vec_interleave_low_lemma2 r22 r2;
let r213 = vec_interleave_low r23 r3 in
vec_interleave_low_lemma2 r23 r3;
let r214 = vec_interleave_low r24 r4 in
vec_interleave_low_lemma2 r24 r4;
let acc = (a0, a1, a2, a3, a4) in
let fr = (r0, r1, r2, r3, r4) in
let fr2 = (r20, r21, r22, r23, r24) in
assert ((feval5 fr2).[0] == Vec.pfmul ((feval5 fr).[0]) ((feval5 fr).[0]));
let fr21 = (r210, r211, r212, r213, r214) in
eq_intro (feval5 fr21) (create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (feval5 fr21 == create2 (feval5 fr2).[0] (feval5 fr).[0]);
assert (felem_fits5 fr21 (2, 2, 2, 2, 2));
let fr215 = precomp_r5 #2 fr21 in
let a = fmul_r5 #2 acc fr21 fr215 in
fmul_r5_eval_lemma acc fr21 fr215;
fmul_r5_fits_lemma acc fr21 fr215;
assert (feval5 a == Vec.fmul (feval5 acc) (feval5 fr21));
assert (felem_fits5 a (1, 2, 1, 1, 2));
a
#push-options "--z3rlimit 150"
val fmul_r2_normalize51:
a:felem5 2
-> fa1:felem5 2 ->
Pure (felem5 2)
(requires
felem_fits5 a (1, 2, 1, 1, 2) /\
felem_fits5 fa1 (1, 2, 1, 1, 2) /\
feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1])
(ensures fun out ->
(feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1] /\
felem_fits5 out (2, 4, 2, 2, 4))
let fmul_r2_normalize51 a fa1 =
let (a0, a1, a2, a3, a4) = a in
let (a10, a11, a12, a13, a14) = fa1 in
let o0 = vec_add_mod a0 a10 in
let o1 = vec_add_mod a1 a11 in
let o2 = vec_add_mod a2 a12 in
let o3 = vec_add_mod a3 a13 in
let o4 = vec_add_mod a4 a14 in
let out = (o0, o1, o2, o3, o4) in
let (a0, a1, a2, a3, a4) = as_tup64_i a 0 in
let (a10, a11, a12, a13, a14) = as_tup64_i fa1 0 in
let (o0, o1, o2, o3, o4) = as_tup64_i out 0 in
FStar.Math.Lemmas.modulo_lemma (v a0 + v a10) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a1 + v a11) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a2 + v a12) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a3 + v a13) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v a4 + v a14) (pow2 64);
assert (felem_fits5 out (2, 4, 2, 2, 4));
calc (==) {
((feval5 a).[0] + (feval5 a).[1]) % Vec.prime;
(==) { }
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime + as_nat5 (a10, a11, a12, a13, a14) % Vec.prime) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l
(as_nat5 (a0, a1, a2, a3, a4)) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime;
FStar.Math.Lemmas.lemma_mod_plus_distr_r
(as_nat5 (a0, a1, a2, a3, a4) % Vec.prime) (as_nat5 (a10, a11, a12, a13, a14)) Vec.prime }
(as_nat5 (a0, a1, a2, a3, a4) + as_nat5 (a10, a11, a12, a13, a14)) % Vec.prime;
(==) { }
(feval5 out).[0];
};
out
#pop-options
val fmul_r2_normalize5_lemma:
acc:felem5 2
-> r:felem5 2
-> r2:felem5 2 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.compute_r2 (feval5 r).[0])
(ensures
(let out = fmul_r2_normalize5 acc r r2 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_2 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r2_normalize5 acc r r2)]
let fmul_r2_normalize5_lemma acc r r2 =
let a = fmul_r2_normalize50 acc r r2 in
let (a0, a1, a2, a3, a4) = a in
let a10 = vec_interleave_high a0 a0 in
vec_interleave_high_lemma2 a0 a0;
let a11 = vec_interleave_high a1 a1 in
vec_interleave_high_lemma2 a1 a1;
let a12 = vec_interleave_high a2 a2 in
vec_interleave_high_lemma2 a2 a2;
let a13 = vec_interleave_high a3 a3 in
vec_interleave_high_lemma2 a3 a3;
let a14 = vec_interleave_high a4 a4 in
vec_interleave_high_lemma2 a4 a4;
let fa1 = (a10, a11, a12, a13, a14) in
eq_intro (feval5 fa1) (create2 (feval5 a).[1] (feval5 a).[1]);
assert (feval5 fa1 == create2 (feval5 a).[1] (feval5 a).[1]);
assert (felem_fits5 fa1 (1, 2, 1, 1, 2));
let out = fmul_r2_normalize51 a fa1 in
assert ((feval5 out).[0] == Vec.pfadd (feval5 a).[0] (feval5 a).[1]);
let res = carry_full_felem5 out in
carry_full_felem5_lemma out
val fmul_r4_normalize50:
acc:felem5 4
-> r:felem5 4
-> r2:felem5 4
-> r3:felem5 4
-> r4:felem5 4 ->
Pure (felem5 4)
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r2 (2, 2, 2, 2, 2) /\
felem_fits5 r3 (2, 2, 2, 2, 2) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
feval5 r2 == Vec.fmul (feval5 r) (feval5 r) /\
feval5 r3 == Vec.fmul (feval5 r2) (feval5 r) /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures fun out ->
let fr4321 = create4 (feval5 r4).[0] (feval5 r3).[0] (feval5 r2).[0] (feval5 r).[0] in
feval5 out == Vec.fmul (feval5 acc) fr4321 /\
felem_fits5 out (1, 2, 1, 1, 2))
let fmul_r4_normalize50 acc fr fr2 fr3 fr4 =
let (r10, r11, r12, r13, r14) = fr in
let (r20, r21, r22, r23, r24) = fr2 in
let (r30, r31, r32, r33, r34) = fr3 in
let (r40, r41, r42, r43, r44) = fr4 in
let (a0, a1, a2, a3, a4) = acc in
let v12120 = vec_interleave_low r20 r10 in
vec_interleave_low_lemma_uint64_4 r20 r10;
let v34340 = vec_interleave_low r40 r30 in
vec_interleave_low_lemma_uint64_4 r40 r30;
let r12340 = vec_interleave_low_n 2 v34340 v12120 in
vec_interleave_low_n_lemma_uint64_4_2 v34340 v12120;
let v12121 = vec_interleave_low r21 r11 in
vec_interleave_low_lemma_uint64_4 r21 r11;
let v34341 = vec_interleave_low r41 r31 in
vec_interleave_low_lemma_uint64_4 r41 r31;
let r12341 = vec_interleave_low_n 2 v34341 v12121 in
vec_interleave_low_n_lemma_uint64_4_2 v34341 v12121;
let v12122 = vec_interleave_low r22 r12 in
vec_interleave_low_lemma_uint64_4 r22 r12;
let v34342 = vec_interleave_low r42 r32 in
vec_interleave_low_lemma_uint64_4 r42 r32;
let r12342 = vec_interleave_low_n 2 v34342 v12122 in
vec_interleave_low_n_lemma_uint64_4_2 v34342 v12122;
let v12123 = vec_interleave_low r23 r13 in
vec_interleave_low_lemma_uint64_4 r23 r13;
let v34343 = vec_interleave_low r43 r33 in
vec_interleave_low_lemma_uint64_4 r43 r33;
let r12343 = vec_interleave_low_n 2 v34343 v12123 in
vec_interleave_low_n_lemma_uint64_4_2 v34343 v12123;
let v12124 = vec_interleave_low r24 r14 in
vec_interleave_low_lemma_uint64_4 r24 r14;
let v34344 = vec_interleave_low r44 r34 in
vec_interleave_low_lemma_uint64_4 r44 r34;
let r12344 = vec_interleave_low_n 2 v34344 v12124 in
vec_interleave_low_n_lemma_uint64_4_2 v34344 v12124;
let fr1234 = (r12340, r12341, r12342, r12343, r12344) in
eq_intro (feval5 fr1234) (create4 (feval5 fr4).[0] (feval5 fr3).[0] (feval5 fr2).[0] (feval5 fr).[0]);
let fr12345 = precomp_r5 #4 fr1234 in
let out = fmul_r5 #4 acc fr1234 fr12345 in
fmul_r5_eval_lemma acc fr1234 fr12345;
fmul_r5_fits_lemma acc fr1234 fr12345;
out
val lemma_fmul_r4_normalize51:
#m:scale32{m <= 2}
-> o:uint64xN 4{felem_fits1 o m} ->
Lemma
(let v00 = vec_interleave_high_n 2 o o in
let v10 = vec_add_mod o v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
felem_fits1 v20 (4 * m) /\
(uint64xN_v v20).[0] == (uint64xN_v o).[0] + (uint64xN_v o).[1] + (uint64xN_v o).[2] + (uint64xN_v o).[3])
let lemma_fmul_r4_normalize51 #m o =
let v00 = vec_interleave_high_n 2 o o in
vec_interleave_high_n_lemma_uint64_4_2 o o;
let (o0, o1, o2, o3) = ((vec_v o).[0], (vec_v o).[1], (vec_v o).[2], (vec_v o).[3]) in
assert (vec_v v00 == create4 o2 o3 o2 o3);
let v10 = vec_add_mod o v00 in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2) (pow2 64);
FStar.Math.Lemmas.modulo_lemma (v o1 + v o3) (pow2 64);
assert (v (vec_v v10).[0] == v o0 + v o2);
assert (v (vec_v v10).[1] == v o1 + v o3);
let v10h = vec_interleave_high v10 v10 in
vec_interleave_high_lemma_uint64_4 v10 v10;
assert (v (vec_v v10h).[0] == v (vec_v v10).[1]);
let v20 = vec_add_mod v10 v10h in
FStar.Math.Lemmas.modulo_lemma (v o0 + v o2 + v o1 + v o3) (pow2 64)
val lemma_fmul_r4_normalize51_expand:
v2:felem5 4
-> out:felem5 4 ->
Lemma
(requires
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(uint64xN_v v20).[0] == (uint64xN_v o0).[0] + (uint64xN_v o0).[1] + (uint64xN_v o0).[2] + (uint64xN_v o0).[3] /\
(uint64xN_v v21).[0] == (uint64xN_v o1).[0] + (uint64xN_v o1).[1] + (uint64xN_v o1).[2] + (uint64xN_v o1).[3] /\
(uint64xN_v v22).[0] == (uint64xN_v o2).[0] + (uint64xN_v o2).[1] + (uint64xN_v o2).[2] + (uint64xN_v o2).[3] /\
(uint64xN_v v23).[0] == (uint64xN_v o3).[0] + (uint64xN_v o3).[1] + (uint64xN_v o3).[2] + (uint64xN_v o3).[3] /\
(uint64xN_v v24).[0] == (uint64xN_v o4).[0] + (uint64xN_v o4).[1] + (uint64xN_v o4).[2] + (uint64xN_v o4).[3]))
(ensures
(let (v20, v21, v22, v23, v24) = v2 in
let (o0, o1, o2, o3, o4) = out in
(feval5 v2).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 out).[0] (feval5 out).[1]) (feval5 out).[2]) (feval5 out).[3]))
let lemma_fmul_r4_normalize51_expand v2 out =
let (v20, v21, v22, v23, v24) = as_tup64_i v2 0 in
let (o0, o1, o2, o3, o4) = out in
calc (==) {
as_nat5 (v20, v21, v22, v23, v24) % Vec.prime;
(==) { }
(as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1) + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1))
(as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) Vec.prime }
((as_nat5 (as_tup64_i out 0) + as_nat5 (as_tup64_i out 1)) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.modulo_distributivity (as_nat5 (as_tup64_i out 0)) (as_nat5 (as_tup64_i out 1)) Vec.prime }
(((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2) + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_l (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2))
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + as_nat5 (as_tup64_i out 2)) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r (((feval5 out).[0] + (feval5 out).[1]) % Vec.prime) (as_nat5 (as_tup64_i out 2)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + as_nat5 (as_tup64_i out 3)) % Vec.prime;
(==) { FStar.Math.Lemmas.lemma_mod_plus_distr_r ((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime)
(as_nat5 (as_tup64_i out 3)) Vec.prime }
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime;
};
assert ((feval5 v2).[0] ==
((((feval5 out).[0] + (feval5 out).[1]) % Vec.prime + (feval5 out).[2]) % Vec.prime + (feval5 out).[3]) % Vec.prime)
val fmul_r4_normalize51: a:felem5 4 ->
Pure (felem5 4)
(requires felem_fits5 a (1, 2, 1, 1, 2))
(ensures fun res ->
felem_fits5 res (4, 8, 4, 4, 8) /\
(feval5 res).[0] == Vec.pfadd (Vec.pfadd (Vec.pfadd (feval5 a).[0] (feval5 a).[1]) (feval5 a).[2]) (feval5 a).[3])
let fmul_r4_normalize51 fa =
let (o0, o1, o2, o3, o4) = fa in
let v00 = vec_interleave_high_n 2 o0 o0 in
let v10 = vec_add_mod o0 v00 in
let v10h = vec_interleave_high v10 v10 in
let v20 = vec_add_mod v10 v10h in
lemma_fmul_r4_normalize51 #1 o0;
let v01 = vec_interleave_high_n 2 o1 o1 in
let v11 = vec_add_mod o1 v01 in
let v11h = vec_interleave_high v11 v11 in
let v21 = vec_add_mod v11 v11h in
lemma_fmul_r4_normalize51 #2 o1;
let v02 = vec_interleave_high_n 2 o2 o2 in
let v12 = vec_add_mod o2 v02 in
let v12h = vec_interleave_high v12 v12 in
let v22 = vec_add_mod v12 v12h in
lemma_fmul_r4_normalize51 #1 o2;
let v03 = vec_interleave_high_n 2 o3 o3 in
let v13 = vec_add_mod o3 v03 in
let v13h = vec_interleave_high v13 v13 in
let v23 = vec_add_mod v13 v13h in
lemma_fmul_r4_normalize51 #1 o3;
let v04 = vec_interleave_high_n 2 o4 o4 in
let v14 = vec_add_mod o4 v04 in
let v14h = vec_interleave_high v14 v14 in
let v24 = vec_add_mod v14 v14h in
lemma_fmul_r4_normalize51 #2 o4;
let res = (v20, v21, v22, v23, v24) in
lemma_fmul_r4_normalize51_expand res fa;
res
val fmul_r4_normalize5_lemma:
acc:felem5 4
-> r:felem5 4
-> r_5:felem5 4
-> r4:felem5 4 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
r_5 == precomp_r5 r /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures
(let out = fmul_r4_normalize5 acc r r_5 r4 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_4 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r4_normalize5 acc r r_5 r4)]
#restart-solver | false | false | Hacl.Spec.Poly1305.Field32xN.Lemmas.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 500,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val fmul_r4_normalize5_lemma:
acc:felem5 4
-> r:felem5 4
-> r_5:felem5 4
-> r4:felem5 4 ->
Lemma
(requires
felem_fits5 acc (3, 3, 3, 3, 3) /\
felem_fits5 r (1, 1, 1, 1, 1) /\
felem_fits5 r4 (2, 2, 2, 2, 2) /\
r_5 == precomp_r5 r /\
feval5 r4 == Vec.compute_r4 (feval5 r).[0])
(ensures
(let out = fmul_r4_normalize5 acc r r_5 r4 in
felem_fits5 out (2, 1, 1, 1, 1) /\
(feval5 out).[0] == Vec.normalize_4 (feval5 r).[0] (feval5 acc)))
[SMTPat (fmul_r4_normalize5 acc r r_5 r4)] | [] | Hacl.Spec.Poly1305.Field32xN.Lemmas.fmul_r4_normalize5_lemma | {
"file_name": "code/poly1305/Hacl.Spec.Poly1305.Field32xN.Lemmas.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
acc: Hacl.Spec.Poly1305.Field32xN.felem5 4 ->
r: Hacl.Spec.Poly1305.Field32xN.felem5 4 ->
r_5: Hacl.Spec.Poly1305.Field32xN.felem5 4 ->
r4: Hacl.Spec.Poly1305.Field32xN.felem5 4
-> FStar.Pervasives.Lemma
(requires
Hacl.Spec.Poly1305.Field32xN.felem_fits5 acc (3, 3, 3, 3, 3) /\
Hacl.Spec.Poly1305.Field32xN.felem_fits5 r (1, 1, 1, 1, 1) /\
Hacl.Spec.Poly1305.Field32xN.felem_fits5 r4 (2, 2, 2, 2, 2) /\
r_5 == Hacl.Spec.Poly1305.Field32xN.precomp_r5 r /\
Hacl.Spec.Poly1305.Field32xN.feval5 r4 ==
Hacl.Spec.Poly1305.Vec.compute_r4 (Hacl.Spec.Poly1305.Field32xN.feval5 r).[ 0 ])
(ensures
(let out = Hacl.Spec.Poly1305.Field32xN.fmul_r4_normalize5 acc r r_5 r4 in
Hacl.Spec.Poly1305.Field32xN.felem_fits5 out (2, 1, 1, 1, 1) /\
(Hacl.Spec.Poly1305.Field32xN.feval5 out).[ 0 ] ==
Hacl.Spec.Poly1305.Vec.normalize_4 (Hacl.Spec.Poly1305.Field32xN.feval5 r).[ 0 ]
(Hacl.Spec.Poly1305.Field32xN.feval5 acc)))
[SMTPat (Hacl.Spec.Poly1305.Field32xN.fmul_r4_normalize5 acc r r_5 r4)] | {
"end_col": 28,
"end_line": 620,
"start_col": 46,
"start_line": 614
} |
Prims.Tot | val validate_u8_with_error_code (c: error_code) : validator parse_u8 | [
{
"abbrev": true,
"full_module": "LowParse.Low.Endianness",
"short_module": "LE"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Low.Combinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Int",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let validate_u8_with_error_code (c: error_code) : validator parse_u8 =
validate_total_constant_size_with_error_code parse_u8 1uL c | val validate_u8_with_error_code (c: error_code) : validator parse_u8
let validate_u8_with_error_code (c: error_code) : validator parse_u8 = | false | null | false | validate_total_constant_size_with_error_code parse_u8 1uL c | {
"checked_file": "LowParse.Low.Int.fsti.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Int.fsti.checked",
"LowParse.Low.Base.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "LowParse.Low.Int.fsti"
} | [
"total"
] | [
"LowParse.Low.ErrorCode.error_code",
"LowParse.Low.Base.validate_total_constant_size_with_error_code",
"LowParse.Spec.Int.parse_u8_kind",
"FStar.UInt8.t",
"LowParse.Spec.Int.parse_u8",
"FStar.UInt64.__uint_to_t",
"LowParse.Low.Base.validator"
] | [] | module LowParse.Low.Int
include LowParse.Spec.Int
include LowParse.Low.Base
inline_for_extraction
val read_u8: leaf_reader parse_u8
inline_for_extraction
val read_u16: leaf_reader parse_u16
inline_for_extraction
val read_u32: leaf_reader parse_u32
inline_for_extraction
val read_u64 : leaf_reader parse_u64
inline_for_extraction
val read_u64_le : leaf_reader parse_u64_le
inline_for_extraction
let validate_u8 () : validator parse_u8 =
validate_total_constant_size parse_u8 1uL ()
inline_for_extraction | false | true | LowParse.Low.Int.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val validate_u8_with_error_code (c: error_code) : validator parse_u8 | [] | LowParse.Low.Int.validate_u8_with_error_code | {
"file_name": "src/lowparse/LowParse.Low.Int.fsti",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | c: LowParse.Low.ErrorCode.error_code -> LowParse.Low.Base.validator LowParse.Spec.Int.parse_u8 | {
"end_col": 61,
"end_line": 26,
"start_col": 2,
"start_line": 26
} |
Prims.Tot | val validate_u64: Prims.unit -> validator parse_u64 | [
{
"abbrev": true,
"full_module": "LowParse.Low.Endianness",
"short_module": "LE"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Low.Combinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Int",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let validate_u64 () : validator parse_u64 =
validate_total_constant_size parse_u64 8uL () | val validate_u64: Prims.unit -> validator parse_u64
let validate_u64 () : validator parse_u64 = | false | null | false | validate_total_constant_size parse_u64 8uL () | {
"checked_file": "LowParse.Low.Int.fsti.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Int.fsti.checked",
"LowParse.Low.Base.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "LowParse.Low.Int.fsti"
} | [
"total"
] | [
"Prims.unit",
"LowParse.Low.Base.validate_total_constant_size",
"LowParse.Spec.Int.parse_u64_kind",
"FStar.UInt64.t",
"LowParse.Spec.Int.parse_u64",
"FStar.UInt64.__uint_to_t",
"LowParse.Low.Base.validator"
] | [] | module LowParse.Low.Int
include LowParse.Spec.Int
include LowParse.Low.Base
inline_for_extraction
val read_u8: leaf_reader parse_u8
inline_for_extraction
val read_u16: leaf_reader parse_u16
inline_for_extraction
val read_u32: leaf_reader parse_u32
inline_for_extraction
val read_u64 : leaf_reader parse_u64
inline_for_extraction
val read_u64_le : leaf_reader parse_u64_le
inline_for_extraction
let validate_u8 () : validator parse_u8 =
validate_total_constant_size parse_u8 1uL ()
inline_for_extraction
let validate_u8_with_error_code (c: error_code) : validator parse_u8 =
validate_total_constant_size_with_error_code parse_u8 1uL c
inline_for_extraction
let validate_u16 () : validator parse_u16 =
validate_total_constant_size parse_u16 2uL ()
inline_for_extraction
let validate_u32 () : validator parse_u32 =
validate_total_constant_size parse_u32 4uL ()
inline_for_extraction | false | true | LowParse.Low.Int.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val validate_u64: Prims.unit -> validator parse_u64 | [] | LowParse.Low.Int.validate_u64 | {
"file_name": "src/lowparse/LowParse.Low.Int.fsti",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | _: Prims.unit -> LowParse.Low.Base.validator LowParse.Spec.Int.parse_u64 | {
"end_col": 47,
"end_line": 38,
"start_col": 2,
"start_line": 38
} |
Prims.Tot | val validate_u16: Prims.unit -> validator parse_u16 | [
{
"abbrev": true,
"full_module": "LowParse.Low.Endianness",
"short_module": "LE"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Low.Combinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Int",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let validate_u16 () : validator parse_u16 =
validate_total_constant_size parse_u16 2uL () | val validate_u16: Prims.unit -> validator parse_u16
let validate_u16 () : validator parse_u16 = | false | null | false | validate_total_constant_size parse_u16 2uL () | {
"checked_file": "LowParse.Low.Int.fsti.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Int.fsti.checked",
"LowParse.Low.Base.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "LowParse.Low.Int.fsti"
} | [
"total"
] | [
"Prims.unit",
"LowParse.Low.Base.validate_total_constant_size",
"LowParse.Spec.Int.parse_u16_kind",
"FStar.UInt16.t",
"LowParse.Spec.Int.parse_u16",
"FStar.UInt64.__uint_to_t",
"LowParse.Low.Base.validator"
] | [] | module LowParse.Low.Int
include LowParse.Spec.Int
include LowParse.Low.Base
inline_for_extraction
val read_u8: leaf_reader parse_u8
inline_for_extraction
val read_u16: leaf_reader parse_u16
inline_for_extraction
val read_u32: leaf_reader parse_u32
inline_for_extraction
val read_u64 : leaf_reader parse_u64
inline_for_extraction
val read_u64_le : leaf_reader parse_u64_le
inline_for_extraction
let validate_u8 () : validator parse_u8 =
validate_total_constant_size parse_u8 1uL ()
inline_for_extraction
let validate_u8_with_error_code (c: error_code) : validator parse_u8 =
validate_total_constant_size_with_error_code parse_u8 1uL c
inline_for_extraction | false | true | LowParse.Low.Int.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val validate_u16: Prims.unit -> validator parse_u16 | [] | LowParse.Low.Int.validate_u16 | {
"file_name": "src/lowparse/LowParse.Low.Int.fsti",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | _: Prims.unit -> LowParse.Low.Base.validator LowParse.Spec.Int.parse_u16 | {
"end_col": 47,
"end_line": 30,
"start_col": 2,
"start_line": 30
} |
Prims.Tot | val jump_u64_le:jumper parse_u64_le | [
{
"abbrev": true,
"full_module": "LowParse.Low.Endianness",
"short_module": "LE"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Low.Combinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Int",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let jump_u64_le : jumper parse_u64_le =
jump_constant_size parse_u64_le 8ul () | val jump_u64_le:jumper parse_u64_le
let jump_u64_le:jumper parse_u64_le = | false | null | false | jump_constant_size parse_u64_le 8ul () | {
"checked_file": "LowParse.Low.Int.fsti.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Int.fsti.checked",
"LowParse.Low.Base.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "LowParse.Low.Int.fsti"
} | [
"total"
] | [
"LowParse.Low.Base.jump_constant_size",
"LowParse.Spec.Int.parse_u64_kind",
"FStar.UInt64.t",
"LowParse.Spec.Int.parse_u64_le",
"FStar.UInt32.__uint_to_t"
] | [] | module LowParse.Low.Int
include LowParse.Spec.Int
include LowParse.Low.Base
inline_for_extraction
val read_u8: leaf_reader parse_u8
inline_for_extraction
val read_u16: leaf_reader parse_u16
inline_for_extraction
val read_u32: leaf_reader parse_u32
inline_for_extraction
val read_u64 : leaf_reader parse_u64
inline_for_extraction
val read_u64_le : leaf_reader parse_u64_le
inline_for_extraction
let validate_u8 () : validator parse_u8 =
validate_total_constant_size parse_u8 1uL ()
inline_for_extraction
let validate_u8_with_error_code (c: error_code) : validator parse_u8 =
validate_total_constant_size_with_error_code parse_u8 1uL c
inline_for_extraction
let validate_u16 () : validator parse_u16 =
validate_total_constant_size parse_u16 2uL ()
inline_for_extraction
let validate_u32 () : validator parse_u32 =
validate_total_constant_size parse_u32 4uL ()
inline_for_extraction
let validate_u64 () : validator parse_u64 =
validate_total_constant_size parse_u64 8uL ()
inline_for_extraction
let validate_u64_le () : validator parse_u64_le =
validate_total_constant_size parse_u64_le 8uL ()
inline_for_extraction
let validate_u64_le_with_error_code (c: error_code) : validator parse_u64_le =
validate_total_constant_size_with_error_code parse_u64_le 8uL c
inline_for_extraction
let jump_u8 : jumper parse_u8 =
jump_constant_size parse_u8 1ul ()
inline_for_extraction
let jump_u16 : jumper parse_u16 =
jump_constant_size parse_u16 2ul ()
inline_for_extraction
let jump_u32 : jumper parse_u32 =
jump_constant_size parse_u32 4ul ()
inline_for_extraction
let jump_u64 : jumper parse_u64 =
jump_constant_size parse_u64 8ul ()
inline_for_extraction | false | true | LowParse.Low.Int.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val jump_u64_le:jumper parse_u64_le | [] | LowParse.Low.Int.jump_u64_le | {
"file_name": "src/lowparse/LowParse.Low.Int.fsti",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | LowParse.Low.Base.jumper LowParse.Spec.Int.parse_u64_le | {
"end_col": 40,
"end_line": 66,
"start_col": 2,
"start_line": 66
} |
Prims.Tot | val validate_u64_le: Prims.unit -> validator parse_u64_le | [
{
"abbrev": true,
"full_module": "LowParse.Low.Endianness",
"short_module": "LE"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Low.Combinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Int",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let validate_u64_le () : validator parse_u64_le =
validate_total_constant_size parse_u64_le 8uL () | val validate_u64_le: Prims.unit -> validator parse_u64_le
let validate_u64_le () : validator parse_u64_le = | false | null | false | validate_total_constant_size parse_u64_le 8uL () | {
"checked_file": "LowParse.Low.Int.fsti.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Int.fsti.checked",
"LowParse.Low.Base.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "LowParse.Low.Int.fsti"
} | [
"total"
] | [
"Prims.unit",
"LowParse.Low.Base.validate_total_constant_size",
"LowParse.Spec.Int.parse_u64_kind",
"FStar.UInt64.t",
"LowParse.Spec.Int.parse_u64_le",
"FStar.UInt64.__uint_to_t",
"LowParse.Low.Base.validator"
] | [] | module LowParse.Low.Int
include LowParse.Spec.Int
include LowParse.Low.Base
inline_for_extraction
val read_u8: leaf_reader parse_u8
inline_for_extraction
val read_u16: leaf_reader parse_u16
inline_for_extraction
val read_u32: leaf_reader parse_u32
inline_for_extraction
val read_u64 : leaf_reader parse_u64
inline_for_extraction
val read_u64_le : leaf_reader parse_u64_le
inline_for_extraction
let validate_u8 () : validator parse_u8 =
validate_total_constant_size parse_u8 1uL ()
inline_for_extraction
let validate_u8_with_error_code (c: error_code) : validator parse_u8 =
validate_total_constant_size_with_error_code parse_u8 1uL c
inline_for_extraction
let validate_u16 () : validator parse_u16 =
validate_total_constant_size parse_u16 2uL ()
inline_for_extraction
let validate_u32 () : validator parse_u32 =
validate_total_constant_size parse_u32 4uL ()
inline_for_extraction
let validate_u64 () : validator parse_u64 =
validate_total_constant_size parse_u64 8uL ()
inline_for_extraction | false | true | LowParse.Low.Int.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val validate_u64_le: Prims.unit -> validator parse_u64_le | [] | LowParse.Low.Int.validate_u64_le | {
"file_name": "src/lowparse/LowParse.Low.Int.fsti",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | _: Prims.unit -> LowParse.Low.Base.validator LowParse.Spec.Int.parse_u64_le | {
"end_col": 50,
"end_line": 42,
"start_col": 2,
"start_line": 42
} |
Prims.Tot | val jump_u16:jumper parse_u16 | [
{
"abbrev": true,
"full_module": "LowParse.Low.Endianness",
"short_module": "LE"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Low.Combinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Int",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let jump_u16 : jumper parse_u16 =
jump_constant_size parse_u16 2ul () | val jump_u16:jumper parse_u16
let jump_u16:jumper parse_u16 = | false | null | false | jump_constant_size parse_u16 2ul () | {
"checked_file": "LowParse.Low.Int.fsti.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Int.fsti.checked",
"LowParse.Low.Base.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "LowParse.Low.Int.fsti"
} | [
"total"
] | [
"LowParse.Low.Base.jump_constant_size",
"LowParse.Spec.Int.parse_u16_kind",
"FStar.UInt16.t",
"LowParse.Spec.Int.parse_u16",
"FStar.UInt32.__uint_to_t"
] | [] | module LowParse.Low.Int
include LowParse.Spec.Int
include LowParse.Low.Base
inline_for_extraction
val read_u8: leaf_reader parse_u8
inline_for_extraction
val read_u16: leaf_reader parse_u16
inline_for_extraction
val read_u32: leaf_reader parse_u32
inline_for_extraction
val read_u64 : leaf_reader parse_u64
inline_for_extraction
val read_u64_le : leaf_reader parse_u64_le
inline_for_extraction
let validate_u8 () : validator parse_u8 =
validate_total_constant_size parse_u8 1uL ()
inline_for_extraction
let validate_u8_with_error_code (c: error_code) : validator parse_u8 =
validate_total_constant_size_with_error_code parse_u8 1uL c
inline_for_extraction
let validate_u16 () : validator parse_u16 =
validate_total_constant_size parse_u16 2uL ()
inline_for_extraction
let validate_u32 () : validator parse_u32 =
validate_total_constant_size parse_u32 4uL ()
inline_for_extraction
let validate_u64 () : validator parse_u64 =
validate_total_constant_size parse_u64 8uL ()
inline_for_extraction
let validate_u64_le () : validator parse_u64_le =
validate_total_constant_size parse_u64_le 8uL ()
inline_for_extraction
let validate_u64_le_with_error_code (c: error_code) : validator parse_u64_le =
validate_total_constant_size_with_error_code parse_u64_le 8uL c
inline_for_extraction
let jump_u8 : jumper parse_u8 =
jump_constant_size parse_u8 1ul ()
inline_for_extraction | false | true | LowParse.Low.Int.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val jump_u16:jumper parse_u16 | [] | LowParse.Low.Int.jump_u16 | {
"file_name": "src/lowparse/LowParse.Low.Int.fsti",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | LowParse.Low.Base.jumper LowParse.Spec.Int.parse_u16 | {
"end_col": 37,
"end_line": 54,
"start_col": 2,
"start_line": 54
} |
Prims.Tot | val validate_u32: Prims.unit -> validator parse_u32 | [
{
"abbrev": true,
"full_module": "LowParse.Low.Endianness",
"short_module": "LE"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Low.Combinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Int",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let validate_u32 () : validator parse_u32 =
validate_total_constant_size parse_u32 4uL () | val validate_u32: Prims.unit -> validator parse_u32
let validate_u32 () : validator parse_u32 = | false | null | false | validate_total_constant_size parse_u32 4uL () | {
"checked_file": "LowParse.Low.Int.fsti.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Int.fsti.checked",
"LowParse.Low.Base.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "LowParse.Low.Int.fsti"
} | [
"total"
] | [
"Prims.unit",
"LowParse.Low.Base.validate_total_constant_size",
"LowParse.Spec.Int.parse_u32_kind",
"FStar.UInt32.t",
"LowParse.Spec.Int.parse_u32",
"FStar.UInt64.__uint_to_t",
"LowParse.Low.Base.validator"
] | [] | module LowParse.Low.Int
include LowParse.Spec.Int
include LowParse.Low.Base
inline_for_extraction
val read_u8: leaf_reader parse_u8
inline_for_extraction
val read_u16: leaf_reader parse_u16
inline_for_extraction
val read_u32: leaf_reader parse_u32
inline_for_extraction
val read_u64 : leaf_reader parse_u64
inline_for_extraction
val read_u64_le : leaf_reader parse_u64_le
inline_for_extraction
let validate_u8 () : validator parse_u8 =
validate_total_constant_size parse_u8 1uL ()
inline_for_extraction
let validate_u8_with_error_code (c: error_code) : validator parse_u8 =
validate_total_constant_size_with_error_code parse_u8 1uL c
inline_for_extraction
let validate_u16 () : validator parse_u16 =
validate_total_constant_size parse_u16 2uL ()
inline_for_extraction | false | true | LowParse.Low.Int.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val validate_u32: Prims.unit -> validator parse_u32 | [] | LowParse.Low.Int.validate_u32 | {
"file_name": "src/lowparse/LowParse.Low.Int.fsti",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | _: Prims.unit -> LowParse.Low.Base.validator LowParse.Spec.Int.parse_u32 | {
"end_col": 47,
"end_line": 34,
"start_col": 2,
"start_line": 34
} |
Prims.Tot | val jump_u32:jumper parse_u32 | [
{
"abbrev": true,
"full_module": "LowParse.Low.Endianness",
"short_module": "LE"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Low.Combinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Int",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let jump_u32 : jumper parse_u32 =
jump_constant_size parse_u32 4ul () | val jump_u32:jumper parse_u32
let jump_u32:jumper parse_u32 = | false | null | false | jump_constant_size parse_u32 4ul () | {
"checked_file": "LowParse.Low.Int.fsti.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Int.fsti.checked",
"LowParse.Low.Base.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "LowParse.Low.Int.fsti"
} | [
"total"
] | [
"LowParse.Low.Base.jump_constant_size",
"LowParse.Spec.Int.parse_u32_kind",
"FStar.UInt32.t",
"LowParse.Spec.Int.parse_u32",
"FStar.UInt32.__uint_to_t"
] | [] | module LowParse.Low.Int
include LowParse.Spec.Int
include LowParse.Low.Base
inline_for_extraction
val read_u8: leaf_reader parse_u8
inline_for_extraction
val read_u16: leaf_reader parse_u16
inline_for_extraction
val read_u32: leaf_reader parse_u32
inline_for_extraction
val read_u64 : leaf_reader parse_u64
inline_for_extraction
val read_u64_le : leaf_reader parse_u64_le
inline_for_extraction
let validate_u8 () : validator parse_u8 =
validate_total_constant_size parse_u8 1uL ()
inline_for_extraction
let validate_u8_with_error_code (c: error_code) : validator parse_u8 =
validate_total_constant_size_with_error_code parse_u8 1uL c
inline_for_extraction
let validate_u16 () : validator parse_u16 =
validate_total_constant_size parse_u16 2uL ()
inline_for_extraction
let validate_u32 () : validator parse_u32 =
validate_total_constant_size parse_u32 4uL ()
inline_for_extraction
let validate_u64 () : validator parse_u64 =
validate_total_constant_size parse_u64 8uL ()
inline_for_extraction
let validate_u64_le () : validator parse_u64_le =
validate_total_constant_size parse_u64_le 8uL ()
inline_for_extraction
let validate_u64_le_with_error_code (c: error_code) : validator parse_u64_le =
validate_total_constant_size_with_error_code parse_u64_le 8uL c
inline_for_extraction
let jump_u8 : jumper parse_u8 =
jump_constant_size parse_u8 1ul ()
inline_for_extraction
let jump_u16 : jumper parse_u16 =
jump_constant_size parse_u16 2ul ()
inline_for_extraction | false | true | LowParse.Low.Int.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val jump_u32:jumper parse_u32 | [] | LowParse.Low.Int.jump_u32 | {
"file_name": "src/lowparse/LowParse.Low.Int.fsti",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | LowParse.Low.Base.jumper LowParse.Spec.Int.parse_u32 | {
"end_col": 37,
"end_line": 58,
"start_col": 2,
"start_line": 58
} |
Prims.Tot | val jump_u8:jumper parse_u8 | [
{
"abbrev": true,
"full_module": "LowParse.Low.Endianness",
"short_module": "LE"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Low.Combinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Int",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let jump_u8 : jumper parse_u8 =
jump_constant_size parse_u8 1ul () | val jump_u8:jumper parse_u8
let jump_u8:jumper parse_u8 = | false | null | false | jump_constant_size parse_u8 1ul () | {
"checked_file": "LowParse.Low.Int.fsti.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Int.fsti.checked",
"LowParse.Low.Base.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "LowParse.Low.Int.fsti"
} | [
"total"
] | [
"LowParse.Low.Base.jump_constant_size",
"LowParse.Spec.Int.parse_u8_kind",
"FStar.UInt8.t",
"LowParse.Spec.Int.parse_u8",
"FStar.UInt32.__uint_to_t"
] | [] | module LowParse.Low.Int
include LowParse.Spec.Int
include LowParse.Low.Base
inline_for_extraction
val read_u8: leaf_reader parse_u8
inline_for_extraction
val read_u16: leaf_reader parse_u16
inline_for_extraction
val read_u32: leaf_reader parse_u32
inline_for_extraction
val read_u64 : leaf_reader parse_u64
inline_for_extraction
val read_u64_le : leaf_reader parse_u64_le
inline_for_extraction
let validate_u8 () : validator parse_u8 =
validate_total_constant_size parse_u8 1uL ()
inline_for_extraction
let validate_u8_with_error_code (c: error_code) : validator parse_u8 =
validate_total_constant_size_with_error_code parse_u8 1uL c
inline_for_extraction
let validate_u16 () : validator parse_u16 =
validate_total_constant_size parse_u16 2uL ()
inline_for_extraction
let validate_u32 () : validator parse_u32 =
validate_total_constant_size parse_u32 4uL ()
inline_for_extraction
let validate_u64 () : validator parse_u64 =
validate_total_constant_size parse_u64 8uL ()
inline_for_extraction
let validate_u64_le () : validator parse_u64_le =
validate_total_constant_size parse_u64_le 8uL ()
inline_for_extraction
let validate_u64_le_with_error_code (c: error_code) : validator parse_u64_le =
validate_total_constant_size_with_error_code parse_u64_le 8uL c
inline_for_extraction | false | true | LowParse.Low.Int.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val jump_u8:jumper parse_u8 | [] | LowParse.Low.Int.jump_u8 | {
"file_name": "src/lowparse/LowParse.Low.Int.fsti",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | LowParse.Low.Base.jumper LowParse.Spec.Int.parse_u8 | {
"end_col": 36,
"end_line": 50,
"start_col": 2,
"start_line": 50
} |
Prims.Tot | val jump_u64:jumper parse_u64 | [
{
"abbrev": true,
"full_module": "LowParse.Low.Endianness",
"short_module": "LE"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Low.Combinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Int",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let jump_u64 : jumper parse_u64 =
jump_constant_size parse_u64 8ul () | val jump_u64:jumper parse_u64
let jump_u64:jumper parse_u64 = | false | null | false | jump_constant_size parse_u64 8ul () | {
"checked_file": "LowParse.Low.Int.fsti.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Int.fsti.checked",
"LowParse.Low.Base.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "LowParse.Low.Int.fsti"
} | [
"total"
] | [
"LowParse.Low.Base.jump_constant_size",
"LowParse.Spec.Int.parse_u64_kind",
"FStar.UInt64.t",
"LowParse.Spec.Int.parse_u64",
"FStar.UInt32.__uint_to_t"
] | [] | module LowParse.Low.Int
include LowParse.Spec.Int
include LowParse.Low.Base
inline_for_extraction
val read_u8: leaf_reader parse_u8
inline_for_extraction
val read_u16: leaf_reader parse_u16
inline_for_extraction
val read_u32: leaf_reader parse_u32
inline_for_extraction
val read_u64 : leaf_reader parse_u64
inline_for_extraction
val read_u64_le : leaf_reader parse_u64_le
inline_for_extraction
let validate_u8 () : validator parse_u8 =
validate_total_constant_size parse_u8 1uL ()
inline_for_extraction
let validate_u8_with_error_code (c: error_code) : validator parse_u8 =
validate_total_constant_size_with_error_code parse_u8 1uL c
inline_for_extraction
let validate_u16 () : validator parse_u16 =
validate_total_constant_size parse_u16 2uL ()
inline_for_extraction
let validate_u32 () : validator parse_u32 =
validate_total_constant_size parse_u32 4uL ()
inline_for_extraction
let validate_u64 () : validator parse_u64 =
validate_total_constant_size parse_u64 8uL ()
inline_for_extraction
let validate_u64_le () : validator parse_u64_le =
validate_total_constant_size parse_u64_le 8uL ()
inline_for_extraction
let validate_u64_le_with_error_code (c: error_code) : validator parse_u64_le =
validate_total_constant_size_with_error_code parse_u64_le 8uL c
inline_for_extraction
let jump_u8 : jumper parse_u8 =
jump_constant_size parse_u8 1ul ()
inline_for_extraction
let jump_u16 : jumper parse_u16 =
jump_constant_size parse_u16 2ul ()
inline_for_extraction
let jump_u32 : jumper parse_u32 =
jump_constant_size parse_u32 4ul ()
inline_for_extraction | false | true | LowParse.Low.Int.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val jump_u64:jumper parse_u64 | [] | LowParse.Low.Int.jump_u64 | {
"file_name": "src/lowparse/LowParse.Low.Int.fsti",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | LowParse.Low.Base.jumper LowParse.Spec.Int.parse_u64 | {
"end_col": 37,
"end_line": 62,
"start_col": 2,
"start_line": 62
} |
Prims.Tot | val write_u16_weak:leaf_writer_weak serialize_u16 | [
{
"abbrev": true,
"full_module": "LowParse.Low.Endianness",
"short_module": "LE"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Low.Combinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Int",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let write_u16_weak : leaf_writer_weak serialize_u16 =
leaf_writer_weak_of_strong_constant_size write_u16 2ul () | val write_u16_weak:leaf_writer_weak serialize_u16
let write_u16_weak:leaf_writer_weak serialize_u16 = | false | null | false | leaf_writer_weak_of_strong_constant_size write_u16 2ul () | {
"checked_file": "LowParse.Low.Int.fsti.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Int.fsti.checked",
"LowParse.Low.Base.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "LowParse.Low.Int.fsti"
} | [
"total"
] | [
"LowParse.Low.Base.leaf_writer_weak_of_strong_constant_size",
"LowParse.Spec.Int.parse_u16_kind",
"FStar.UInt16.t",
"LowParse.Spec.Int.parse_u16",
"LowParse.Spec.Int.serialize_u16",
"LowParse.Low.Int.write_u16",
"FStar.UInt32.__uint_to_t"
] | [] | module LowParse.Low.Int
include LowParse.Spec.Int
include LowParse.Low.Base
inline_for_extraction
val read_u8: leaf_reader parse_u8
inline_for_extraction
val read_u16: leaf_reader parse_u16
inline_for_extraction
val read_u32: leaf_reader parse_u32
inline_for_extraction
val read_u64 : leaf_reader parse_u64
inline_for_extraction
val read_u64_le : leaf_reader parse_u64_le
inline_for_extraction
let validate_u8 () : validator parse_u8 =
validate_total_constant_size parse_u8 1uL ()
inline_for_extraction
let validate_u8_with_error_code (c: error_code) : validator parse_u8 =
validate_total_constant_size_with_error_code parse_u8 1uL c
inline_for_extraction
let validate_u16 () : validator parse_u16 =
validate_total_constant_size parse_u16 2uL ()
inline_for_extraction
let validate_u32 () : validator parse_u32 =
validate_total_constant_size parse_u32 4uL ()
inline_for_extraction
let validate_u64 () : validator parse_u64 =
validate_total_constant_size parse_u64 8uL ()
inline_for_extraction
let validate_u64_le () : validator parse_u64_le =
validate_total_constant_size parse_u64_le 8uL ()
inline_for_extraction
let validate_u64_le_with_error_code (c: error_code) : validator parse_u64_le =
validate_total_constant_size_with_error_code parse_u64_le 8uL c
inline_for_extraction
let jump_u8 : jumper parse_u8 =
jump_constant_size parse_u8 1ul ()
inline_for_extraction
let jump_u16 : jumper parse_u16 =
jump_constant_size parse_u16 2ul ()
inline_for_extraction
let jump_u32 : jumper parse_u32 =
jump_constant_size parse_u32 4ul ()
inline_for_extraction
let jump_u64 : jumper parse_u64 =
jump_constant_size parse_u64 8ul ()
inline_for_extraction
let jump_u64_le : jumper parse_u64_le =
jump_constant_size parse_u64_le 8ul ()
inline_for_extraction
val serialize32_u8 : serializer32 serialize_u8
inline_for_extraction
val serialize32_u16 : serializer32 serialize_u16
inline_for_extraction
val serialize32_u32 : serializer32 serialize_u32
inline_for_extraction
val serialize32_u64 : serializer32 serialize_u64
inline_for_extraction
val write_u8 : leaf_writer_strong serialize_u8
inline_for_extraction
let write_u8_weak : leaf_writer_weak serialize_u8 =
leaf_writer_weak_of_strong_constant_size write_u8 1ul ()
inline_for_extraction
val write_u16 : leaf_writer_strong serialize_u16
inline_for_extraction | false | true | LowParse.Low.Int.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val write_u16_weak:leaf_writer_weak serialize_u16 | [] | LowParse.Low.Int.write_u16_weak | {
"file_name": "src/lowparse/LowParse.Low.Int.fsti",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | LowParse.Low.Base.leaf_writer_weak LowParse.Spec.Int.serialize_u16 | {
"end_col": 59,
"end_line": 92,
"start_col": 2,
"start_line": 92
} |
Prims.Tot | val write_u64_weak:leaf_writer_weak serialize_u64 | [
{
"abbrev": true,
"full_module": "LowParse.Low.Endianness",
"short_module": "LE"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Low.Combinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Int",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let write_u64_weak : leaf_writer_weak serialize_u64 =
leaf_writer_weak_of_strong_constant_size write_u64 8ul () | val write_u64_weak:leaf_writer_weak serialize_u64
let write_u64_weak:leaf_writer_weak serialize_u64 = | false | null | false | leaf_writer_weak_of_strong_constant_size write_u64 8ul () | {
"checked_file": "LowParse.Low.Int.fsti.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Int.fsti.checked",
"LowParse.Low.Base.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "LowParse.Low.Int.fsti"
} | [
"total"
] | [
"LowParse.Low.Base.leaf_writer_weak_of_strong_constant_size",
"LowParse.Spec.Int.parse_u64_kind",
"FStar.UInt64.t",
"LowParse.Spec.Int.parse_u64",
"LowParse.Spec.Int.serialize_u64",
"LowParse.Low.Int.write_u64",
"FStar.UInt32.__uint_to_t"
] | [] | module LowParse.Low.Int
include LowParse.Spec.Int
include LowParse.Low.Base
inline_for_extraction
val read_u8: leaf_reader parse_u8
inline_for_extraction
val read_u16: leaf_reader parse_u16
inline_for_extraction
val read_u32: leaf_reader parse_u32
inline_for_extraction
val read_u64 : leaf_reader parse_u64
inline_for_extraction
val read_u64_le : leaf_reader parse_u64_le
inline_for_extraction
let validate_u8 () : validator parse_u8 =
validate_total_constant_size parse_u8 1uL ()
inline_for_extraction
let validate_u8_with_error_code (c: error_code) : validator parse_u8 =
validate_total_constant_size_with_error_code parse_u8 1uL c
inline_for_extraction
let validate_u16 () : validator parse_u16 =
validate_total_constant_size parse_u16 2uL ()
inline_for_extraction
let validate_u32 () : validator parse_u32 =
validate_total_constant_size parse_u32 4uL ()
inline_for_extraction
let validate_u64 () : validator parse_u64 =
validate_total_constant_size parse_u64 8uL ()
inline_for_extraction
let validate_u64_le () : validator parse_u64_le =
validate_total_constant_size parse_u64_le 8uL ()
inline_for_extraction
let validate_u64_le_with_error_code (c: error_code) : validator parse_u64_le =
validate_total_constant_size_with_error_code parse_u64_le 8uL c
inline_for_extraction
let jump_u8 : jumper parse_u8 =
jump_constant_size parse_u8 1ul ()
inline_for_extraction
let jump_u16 : jumper parse_u16 =
jump_constant_size parse_u16 2ul ()
inline_for_extraction
let jump_u32 : jumper parse_u32 =
jump_constant_size parse_u32 4ul ()
inline_for_extraction
let jump_u64 : jumper parse_u64 =
jump_constant_size parse_u64 8ul ()
inline_for_extraction
let jump_u64_le : jumper parse_u64_le =
jump_constant_size parse_u64_le 8ul ()
inline_for_extraction
val serialize32_u8 : serializer32 serialize_u8
inline_for_extraction
val serialize32_u16 : serializer32 serialize_u16
inline_for_extraction
val serialize32_u32 : serializer32 serialize_u32
inline_for_extraction
val serialize32_u64 : serializer32 serialize_u64
inline_for_extraction
val write_u8 : leaf_writer_strong serialize_u8
inline_for_extraction
let write_u8_weak : leaf_writer_weak serialize_u8 =
leaf_writer_weak_of_strong_constant_size write_u8 1ul ()
inline_for_extraction
val write_u16 : leaf_writer_strong serialize_u16
inline_for_extraction
let write_u16_weak : leaf_writer_weak serialize_u16 =
leaf_writer_weak_of_strong_constant_size write_u16 2ul ()
inline_for_extraction
val write_u32 : leaf_writer_strong serialize_u32
inline_for_extraction
let write_u32_weak : leaf_writer_weak serialize_u32 =
leaf_writer_weak_of_strong_constant_size write_u32 4ul ()
inline_for_extraction
val write_u64 : leaf_writer_strong serialize_u64
inline_for_extraction | false | true | LowParse.Low.Int.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val write_u64_weak:leaf_writer_weak serialize_u64 | [] | LowParse.Low.Int.write_u64_weak | {
"file_name": "src/lowparse/LowParse.Low.Int.fsti",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | LowParse.Low.Base.leaf_writer_weak LowParse.Spec.Int.serialize_u64 | {
"end_col": 59,
"end_line": 106,
"start_col": 2,
"start_line": 106
} |
Prims.Tot | val validate_u8: Prims.unit -> validator parse_u8 | [
{
"abbrev": true,
"full_module": "LowParse.Low.Endianness",
"short_module": "LE"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Low.Combinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Int",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let validate_u8 () : validator parse_u8 =
validate_total_constant_size parse_u8 1uL () | val validate_u8: Prims.unit -> validator parse_u8
let validate_u8 () : validator parse_u8 = | false | null | false | validate_total_constant_size parse_u8 1uL () | {
"checked_file": "LowParse.Low.Int.fsti.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Int.fsti.checked",
"LowParse.Low.Base.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "LowParse.Low.Int.fsti"
} | [
"total"
] | [
"Prims.unit",
"LowParse.Low.Base.validate_total_constant_size",
"LowParse.Spec.Int.parse_u8_kind",
"FStar.UInt8.t",
"LowParse.Spec.Int.parse_u8",
"FStar.UInt64.__uint_to_t",
"LowParse.Low.Base.validator"
] | [] | module LowParse.Low.Int
include LowParse.Spec.Int
include LowParse.Low.Base
inline_for_extraction
val read_u8: leaf_reader parse_u8
inline_for_extraction
val read_u16: leaf_reader parse_u16
inline_for_extraction
val read_u32: leaf_reader parse_u32
inline_for_extraction
val read_u64 : leaf_reader parse_u64
inline_for_extraction
val read_u64_le : leaf_reader parse_u64_le
inline_for_extraction | false | true | LowParse.Low.Int.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val validate_u8: Prims.unit -> validator parse_u8 | [] | LowParse.Low.Int.validate_u8 | {
"file_name": "src/lowparse/LowParse.Low.Int.fsti",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | _: Prims.unit -> LowParse.Low.Base.validator LowParse.Spec.Int.parse_u8 | {
"end_col": 46,
"end_line": 22,
"start_col": 2,
"start_line": 22
} |
Prims.Tot | val write_u8_weak:leaf_writer_weak serialize_u8 | [
{
"abbrev": true,
"full_module": "LowParse.Low.Endianness",
"short_module": "LE"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Low.Combinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Int",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let write_u8_weak : leaf_writer_weak serialize_u8 =
leaf_writer_weak_of_strong_constant_size write_u8 1ul () | val write_u8_weak:leaf_writer_weak serialize_u8
let write_u8_weak:leaf_writer_weak serialize_u8 = | false | null | false | leaf_writer_weak_of_strong_constant_size write_u8 1ul () | {
"checked_file": "LowParse.Low.Int.fsti.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Int.fsti.checked",
"LowParse.Low.Base.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "LowParse.Low.Int.fsti"
} | [
"total"
] | [
"LowParse.Low.Base.leaf_writer_weak_of_strong_constant_size",
"LowParse.Spec.Int.parse_u8_kind",
"FStar.UInt8.t",
"LowParse.Spec.Int.parse_u8",
"LowParse.Spec.Int.serialize_u8",
"LowParse.Low.Int.write_u8",
"FStar.UInt32.__uint_to_t"
] | [] | module LowParse.Low.Int
include LowParse.Spec.Int
include LowParse.Low.Base
inline_for_extraction
val read_u8: leaf_reader parse_u8
inline_for_extraction
val read_u16: leaf_reader parse_u16
inline_for_extraction
val read_u32: leaf_reader parse_u32
inline_for_extraction
val read_u64 : leaf_reader parse_u64
inline_for_extraction
val read_u64_le : leaf_reader parse_u64_le
inline_for_extraction
let validate_u8 () : validator parse_u8 =
validate_total_constant_size parse_u8 1uL ()
inline_for_extraction
let validate_u8_with_error_code (c: error_code) : validator parse_u8 =
validate_total_constant_size_with_error_code parse_u8 1uL c
inline_for_extraction
let validate_u16 () : validator parse_u16 =
validate_total_constant_size parse_u16 2uL ()
inline_for_extraction
let validate_u32 () : validator parse_u32 =
validate_total_constant_size parse_u32 4uL ()
inline_for_extraction
let validate_u64 () : validator parse_u64 =
validate_total_constant_size parse_u64 8uL ()
inline_for_extraction
let validate_u64_le () : validator parse_u64_le =
validate_total_constant_size parse_u64_le 8uL ()
inline_for_extraction
let validate_u64_le_with_error_code (c: error_code) : validator parse_u64_le =
validate_total_constant_size_with_error_code parse_u64_le 8uL c
inline_for_extraction
let jump_u8 : jumper parse_u8 =
jump_constant_size parse_u8 1ul ()
inline_for_extraction
let jump_u16 : jumper parse_u16 =
jump_constant_size parse_u16 2ul ()
inline_for_extraction
let jump_u32 : jumper parse_u32 =
jump_constant_size parse_u32 4ul ()
inline_for_extraction
let jump_u64 : jumper parse_u64 =
jump_constant_size parse_u64 8ul ()
inline_for_extraction
let jump_u64_le : jumper parse_u64_le =
jump_constant_size parse_u64_le 8ul ()
inline_for_extraction
val serialize32_u8 : serializer32 serialize_u8
inline_for_extraction
val serialize32_u16 : serializer32 serialize_u16
inline_for_extraction
val serialize32_u32 : serializer32 serialize_u32
inline_for_extraction
val serialize32_u64 : serializer32 serialize_u64
inline_for_extraction
val write_u8 : leaf_writer_strong serialize_u8
inline_for_extraction | false | true | LowParse.Low.Int.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val write_u8_weak:leaf_writer_weak serialize_u8 | [] | LowParse.Low.Int.write_u8_weak | {
"file_name": "src/lowparse/LowParse.Low.Int.fsti",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | LowParse.Low.Base.leaf_writer_weak LowParse.Spec.Int.serialize_u8 | {
"end_col": 58,
"end_line": 85,
"start_col": 2,
"start_line": 85
} |
Prims.Tot | val validate_u64_le_with_error_code (c: error_code) : validator parse_u64_le | [
{
"abbrev": true,
"full_module": "LowParse.Low.Endianness",
"short_module": "LE"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Low.Combinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Int",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let validate_u64_le_with_error_code (c: error_code) : validator parse_u64_le =
validate_total_constant_size_with_error_code parse_u64_le 8uL c | val validate_u64_le_with_error_code (c: error_code) : validator parse_u64_le
let validate_u64_le_with_error_code (c: error_code) : validator parse_u64_le = | false | null | false | validate_total_constant_size_with_error_code parse_u64_le 8uL c | {
"checked_file": "LowParse.Low.Int.fsti.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Int.fsti.checked",
"LowParse.Low.Base.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "LowParse.Low.Int.fsti"
} | [
"total"
] | [
"LowParse.Low.ErrorCode.error_code",
"LowParse.Low.Base.validate_total_constant_size_with_error_code",
"LowParse.Spec.Int.parse_u64_kind",
"FStar.UInt64.t",
"LowParse.Spec.Int.parse_u64_le",
"FStar.UInt64.__uint_to_t",
"LowParse.Low.Base.validator"
] | [] | module LowParse.Low.Int
include LowParse.Spec.Int
include LowParse.Low.Base
inline_for_extraction
val read_u8: leaf_reader parse_u8
inline_for_extraction
val read_u16: leaf_reader parse_u16
inline_for_extraction
val read_u32: leaf_reader parse_u32
inline_for_extraction
val read_u64 : leaf_reader parse_u64
inline_for_extraction
val read_u64_le : leaf_reader parse_u64_le
inline_for_extraction
let validate_u8 () : validator parse_u8 =
validate_total_constant_size parse_u8 1uL ()
inline_for_extraction
let validate_u8_with_error_code (c: error_code) : validator parse_u8 =
validate_total_constant_size_with_error_code parse_u8 1uL c
inline_for_extraction
let validate_u16 () : validator parse_u16 =
validate_total_constant_size parse_u16 2uL ()
inline_for_extraction
let validate_u32 () : validator parse_u32 =
validate_total_constant_size parse_u32 4uL ()
inline_for_extraction
let validate_u64 () : validator parse_u64 =
validate_total_constant_size parse_u64 8uL ()
inline_for_extraction
let validate_u64_le () : validator parse_u64_le =
validate_total_constant_size parse_u64_le 8uL ()
inline_for_extraction | false | true | LowParse.Low.Int.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val validate_u64_le_with_error_code (c: error_code) : validator parse_u64_le | [] | LowParse.Low.Int.validate_u64_le_with_error_code | {
"file_name": "src/lowparse/LowParse.Low.Int.fsti",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | c: LowParse.Low.ErrorCode.error_code -> LowParse.Low.Base.validator LowParse.Spec.Int.parse_u64_le | {
"end_col": 65,
"end_line": 46,
"start_col": 2,
"start_line": 46
} |
Prims.Tot | val write_u32_weak:leaf_writer_weak serialize_u32 | [
{
"abbrev": true,
"full_module": "LowParse.Low.Endianness",
"short_module": "LE"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Low.Combinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Int",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let write_u32_weak : leaf_writer_weak serialize_u32 =
leaf_writer_weak_of_strong_constant_size write_u32 4ul () | val write_u32_weak:leaf_writer_weak serialize_u32
let write_u32_weak:leaf_writer_weak serialize_u32 = | false | null | false | leaf_writer_weak_of_strong_constant_size write_u32 4ul () | {
"checked_file": "LowParse.Low.Int.fsti.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Int.fsti.checked",
"LowParse.Low.Base.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "LowParse.Low.Int.fsti"
} | [
"total"
] | [
"LowParse.Low.Base.leaf_writer_weak_of_strong_constant_size",
"LowParse.Spec.Int.parse_u32_kind",
"FStar.UInt32.t",
"LowParse.Spec.Int.parse_u32",
"LowParse.Spec.Int.serialize_u32",
"LowParse.Low.Int.write_u32",
"FStar.UInt32.__uint_to_t"
] | [] | module LowParse.Low.Int
include LowParse.Spec.Int
include LowParse.Low.Base
inline_for_extraction
val read_u8: leaf_reader parse_u8
inline_for_extraction
val read_u16: leaf_reader parse_u16
inline_for_extraction
val read_u32: leaf_reader parse_u32
inline_for_extraction
val read_u64 : leaf_reader parse_u64
inline_for_extraction
val read_u64_le : leaf_reader parse_u64_le
inline_for_extraction
let validate_u8 () : validator parse_u8 =
validate_total_constant_size parse_u8 1uL ()
inline_for_extraction
let validate_u8_with_error_code (c: error_code) : validator parse_u8 =
validate_total_constant_size_with_error_code parse_u8 1uL c
inline_for_extraction
let validate_u16 () : validator parse_u16 =
validate_total_constant_size parse_u16 2uL ()
inline_for_extraction
let validate_u32 () : validator parse_u32 =
validate_total_constant_size parse_u32 4uL ()
inline_for_extraction
let validate_u64 () : validator parse_u64 =
validate_total_constant_size parse_u64 8uL ()
inline_for_extraction
let validate_u64_le () : validator parse_u64_le =
validate_total_constant_size parse_u64_le 8uL ()
inline_for_extraction
let validate_u64_le_with_error_code (c: error_code) : validator parse_u64_le =
validate_total_constant_size_with_error_code parse_u64_le 8uL c
inline_for_extraction
let jump_u8 : jumper parse_u8 =
jump_constant_size parse_u8 1ul ()
inline_for_extraction
let jump_u16 : jumper parse_u16 =
jump_constant_size parse_u16 2ul ()
inline_for_extraction
let jump_u32 : jumper parse_u32 =
jump_constant_size parse_u32 4ul ()
inline_for_extraction
let jump_u64 : jumper parse_u64 =
jump_constant_size parse_u64 8ul ()
inline_for_extraction
let jump_u64_le : jumper parse_u64_le =
jump_constant_size parse_u64_le 8ul ()
inline_for_extraction
val serialize32_u8 : serializer32 serialize_u8
inline_for_extraction
val serialize32_u16 : serializer32 serialize_u16
inline_for_extraction
val serialize32_u32 : serializer32 serialize_u32
inline_for_extraction
val serialize32_u64 : serializer32 serialize_u64
inline_for_extraction
val write_u8 : leaf_writer_strong serialize_u8
inline_for_extraction
let write_u8_weak : leaf_writer_weak serialize_u8 =
leaf_writer_weak_of_strong_constant_size write_u8 1ul ()
inline_for_extraction
val write_u16 : leaf_writer_strong serialize_u16
inline_for_extraction
let write_u16_weak : leaf_writer_weak serialize_u16 =
leaf_writer_weak_of_strong_constant_size write_u16 2ul ()
inline_for_extraction
val write_u32 : leaf_writer_strong serialize_u32
inline_for_extraction | false | true | LowParse.Low.Int.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val write_u32_weak:leaf_writer_weak serialize_u32 | [] | LowParse.Low.Int.write_u32_weak | {
"file_name": "src/lowparse/LowParse.Low.Int.fsti",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | LowParse.Low.Base.leaf_writer_weak LowParse.Spec.Int.serialize_u32 | {
"end_col": 59,
"end_line": 99,
"start_col": 2,
"start_line": 99
} |
Prims.Tot | val write_u64_le_weak:leaf_writer_weak serialize_u64_le | [
{
"abbrev": true,
"full_module": "LowParse.Low.Endianness",
"short_module": "LE"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "LowStar.Buffer",
"short_module": "B"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Low.Combinators",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Int",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Low",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let write_u64_le_weak : leaf_writer_weak serialize_u64_le =
leaf_writer_weak_of_strong_constant_size write_u64_le 8ul () | val write_u64_le_weak:leaf_writer_weak serialize_u64_le
let write_u64_le_weak:leaf_writer_weak serialize_u64_le = | false | null | false | leaf_writer_weak_of_strong_constant_size write_u64_le 8ul () | {
"checked_file": "LowParse.Low.Int.fsti.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Int.fsti.checked",
"LowParse.Low.Base.fst.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "LowParse.Low.Int.fsti"
} | [
"total"
] | [
"LowParse.Low.Base.leaf_writer_weak_of_strong_constant_size",
"LowParse.Spec.Int.parse_u64_kind",
"FStar.UInt64.t",
"LowParse.Spec.Int.parse_u64_le",
"LowParse.Spec.Int.serialize_u64_le",
"LowParse.Low.Int.write_u64_le",
"FStar.UInt32.__uint_to_t"
] | [] | module LowParse.Low.Int
include LowParse.Spec.Int
include LowParse.Low.Base
inline_for_extraction
val read_u8: leaf_reader parse_u8
inline_for_extraction
val read_u16: leaf_reader parse_u16
inline_for_extraction
val read_u32: leaf_reader parse_u32
inline_for_extraction
val read_u64 : leaf_reader parse_u64
inline_for_extraction
val read_u64_le : leaf_reader parse_u64_le
inline_for_extraction
let validate_u8 () : validator parse_u8 =
validate_total_constant_size parse_u8 1uL ()
inline_for_extraction
let validate_u8_with_error_code (c: error_code) : validator parse_u8 =
validate_total_constant_size_with_error_code parse_u8 1uL c
inline_for_extraction
let validate_u16 () : validator parse_u16 =
validate_total_constant_size parse_u16 2uL ()
inline_for_extraction
let validate_u32 () : validator parse_u32 =
validate_total_constant_size parse_u32 4uL ()
inline_for_extraction
let validate_u64 () : validator parse_u64 =
validate_total_constant_size parse_u64 8uL ()
inline_for_extraction
let validate_u64_le () : validator parse_u64_le =
validate_total_constant_size parse_u64_le 8uL ()
inline_for_extraction
let validate_u64_le_with_error_code (c: error_code) : validator parse_u64_le =
validate_total_constant_size_with_error_code parse_u64_le 8uL c
inline_for_extraction
let jump_u8 : jumper parse_u8 =
jump_constant_size parse_u8 1ul ()
inline_for_extraction
let jump_u16 : jumper parse_u16 =
jump_constant_size parse_u16 2ul ()
inline_for_extraction
let jump_u32 : jumper parse_u32 =
jump_constant_size parse_u32 4ul ()
inline_for_extraction
let jump_u64 : jumper parse_u64 =
jump_constant_size parse_u64 8ul ()
inline_for_extraction
let jump_u64_le : jumper parse_u64_le =
jump_constant_size parse_u64_le 8ul ()
inline_for_extraction
val serialize32_u8 : serializer32 serialize_u8
inline_for_extraction
val serialize32_u16 : serializer32 serialize_u16
inline_for_extraction
val serialize32_u32 : serializer32 serialize_u32
inline_for_extraction
val serialize32_u64 : serializer32 serialize_u64
inline_for_extraction
val write_u8 : leaf_writer_strong serialize_u8
inline_for_extraction
let write_u8_weak : leaf_writer_weak serialize_u8 =
leaf_writer_weak_of_strong_constant_size write_u8 1ul ()
inline_for_extraction
val write_u16 : leaf_writer_strong serialize_u16
inline_for_extraction
let write_u16_weak : leaf_writer_weak serialize_u16 =
leaf_writer_weak_of_strong_constant_size write_u16 2ul ()
inline_for_extraction
val write_u32 : leaf_writer_strong serialize_u32
inline_for_extraction
let write_u32_weak : leaf_writer_weak serialize_u32 =
leaf_writer_weak_of_strong_constant_size write_u32 4ul ()
inline_for_extraction
val write_u64 : leaf_writer_strong serialize_u64
inline_for_extraction
let write_u64_weak : leaf_writer_weak serialize_u64 =
leaf_writer_weak_of_strong_constant_size write_u64 8ul ()
inline_for_extraction
val write_u64_le : leaf_writer_strong serialize_u64_le
inline_for_extraction | false | true | LowParse.Low.Int.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val write_u64_le_weak:leaf_writer_weak serialize_u64_le | [] | LowParse.Low.Int.write_u64_le_weak | {
"file_name": "src/lowparse/LowParse.Low.Int.fsti",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | LowParse.Low.Base.leaf_writer_weak LowParse.Spec.Int.serialize_u64_le | {
"end_col": 62,
"end_line": 113,
"start_col": 2,
"start_line": 113
} |
FStar.All.ALL | [
{
"abbrev": true,
"full_module": "Options",
"short_module": "O"
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let emit_config_as_fstar_module (module_name:string) (c:config) =
let flags =
List.map
(Printf.sprintf "[@@ CIfDef]\nassume\nval ___%s : bool" )
c.compile_time_flags.flags
in
let assumes = String.concat "\n\n" flags in
Printf.sprintf "module %s\n%s\n" module_name assumes | let emit_config_as_fstar_module (module_name: string) (c: config) = | true | null | false | let flags =
List.map (Printf.sprintf "[@@ CIfDef]\nassume\nval ___%s : bool") c.compile_time_flags.flags
in
let assumes = String.concat "\n\n" flags in
Printf.sprintf "module %s\n%s\n" module_name assumes | {
"checked_file": "Config.fst.checked",
"dependencies": [
"prims.fst.checked",
"Options.fsti.checked",
"FStar.String.fsti.checked",
"FStar.Printf.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.List.fst.checked"
],
"interface_file": false,
"source_file": "Config.fst"
} | [
"trivial_postcondition"
] | [
"Prims.string",
"Config.config",
"FStar.Printf.sprintf",
"FStar.String.concat",
"Prims.list",
"FStar.List.map",
"Config.__proj__Mkcompile_time_flags__item__flags",
"Config.__proj__Mkconfig__item__compile_time_flags"
] | [] | module Config
module O = Options
[@@ PpxDerivingYoJson]
type compile_time_flags = {
flags : list string;
include_file : string;
}
[@@ PpxDerivingYoJson]
type config = {
compile_time_flags : compile_time_flags
} | false | false | Config.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val emit_config_as_fstar_module : module_name: Prims.string -> c: Config.config -> FStar.All.ALL (Prims.string <: Type0) | [] | Config.emit_config_as_fstar_module | {
"file_name": "src/3d/Config.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | module_name: Prims.string -> c: Config.config -> FStar.All.ALL (Prims.string <: Type0) | {
"end_col": 54,
"end_line": 21,
"start_col": 65,
"start_line": 14
} |
|
Prims.GTot | val fill_buffer_precond' (#t: typ) (b: buffer t) (h: HS.mem) : GTot Type0 | [
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HH"
},
{
"abbrev": false,
"full_module": "FStar.Pointer.Derived1",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let fill_buffer_precond'
(#t: typ)
(b: buffer t) (* destination *)
(h: HS.mem)
: GTot Type0
= buffer_live h b | val fill_buffer_precond' (#t: typ) (b: buffer t) (h: HS.mem) : GTot Type0
let fill_buffer_precond' (#t: typ) (b: buffer t) (h: HS.mem) : GTot Type0 = | false | null | false | buffer_live h b | {
"checked_file": "FStar.Pointer.Derived3.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Pointer.Derived3.fst"
} | [
"sometrivial"
] | [
"FStar.Pointer.Base.typ",
"FStar.Pointer.Base.buffer",
"FStar.Monotonic.HyperStack.mem",
"FStar.Pointer.Base.buffer_live"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pointer.Derived3
module HH = FStar.HyperStack
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
private
let fill_buffer_precond'
(#t: typ)
(b: buffer t) (* destination *)
(h: HS.mem) | false | false | FStar.Pointer.Derived3.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val fill_buffer_precond' (#t: typ) (b: buffer t) (h: HS.mem) : GTot Type0 | [] | FStar.Pointer.Derived3.fill_buffer_precond' | {
"file_name": "ulib/legacy/FStar.Pointer.Derived3.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} | b: FStar.Pointer.Base.buffer t -> h: FStar.Monotonic.HyperStack.mem -> Prims.GTot Type0 | {
"end_col": 17,
"end_line": 28,
"start_col": 2,
"start_line": 28
} |
FStar.HyperStack.ST.Stack | val fill_buffer
(#t: typ)
(b: buffer t) (* destination *)
(idx_b: UInt32.t)
(len: UInt32.t)
(v: type_of_typ t)
: HST.Stack unit
(requires (fun h ->
fill_buffer_precond b idx_b len h
))
(ensures (fun h0 _ h1 ->
fill_buffer_postcond b idx_b len v h0 h1
)) | [
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HH"
},
{
"abbrev": false,
"full_module": "FStar.Pointer.Derived1",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let fill_buffer = fill_buffer' | val fill_buffer
(#t: typ)
(b: buffer t) (* destination *)
(idx_b: UInt32.t)
(len: UInt32.t)
(v: type_of_typ t)
: HST.Stack unit
(requires (fun h ->
fill_buffer_precond b idx_b len h
))
(ensures (fun h0 _ h1 ->
fill_buffer_postcond b idx_b len v h0 h1
))
let fill_buffer = | true | null | false | fill_buffer' | {
"checked_file": "FStar.Pointer.Derived3.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Pointer.Derived3.fst"
} | [] | [
"FStar.Pointer.Derived3.fill_buffer'"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pointer.Derived3
module HH = FStar.HyperStack
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
private
let fill_buffer_precond'
(#t: typ)
(b: buffer t) (* destination *)
(h: HS.mem)
: GTot Type0
= buffer_live h b
private
let fill_buffer_postcond'
(#t: typ)
(b: buffer t) (* destination *)
(v: type_of_typ t)
(h: HS.mem)
(h' : HS.mem)
: GTot Type0
= fill_buffer_precond' b h /\
modifies (loc_buffer b) h h' /\
buffer_readable h' b /\
buffer_as_seq h' b == Seq.create (UInt32.v (buffer_length b)) v
private
let fill_buffer_inv
(#t: typ)
(b: buffer t) (* destination *)
(len' : UInt32.t)
(v: type_of_typ t)
(h: HS.mem)
(h' : HS.mem)
: GTot Type0
= fill_buffer_precond' b h /\
modifies (loc_buffer b) h h' /\
UInt32.v len' <= UInt32.v (buffer_length b) /\
buffer_readable h' (gsub_buffer b 0ul len') /\
buffer_as_seq h' (gsub_buffer b 0ul len') == Seq.create (UInt32.v len') v
private
val fill_buffer_init
(#t: typ)
(b: buffer t) (* destination *)
(v: type_of_typ t)
(h: HS.mem)
: Lemma
(requires (fill_buffer_precond' b h))
(ensures (fill_buffer_inv b 0ul v h h))
let fill_buffer_init #t b v h =
buffer_readable_intro_empty h (gsub_buffer b 0ul 0ul);
Seq.lemma_eq_intro (buffer_as_seq h (gsub_buffer b 0ul 0ul)) (Seq.create 0 v)
private
val fill_buffer_advance
(#t: typ)
(b: buffer t) (* destination *)
(len' : UInt32.t)
(v: type_of_typ t)
(h: Ghost.erased HS.mem)
: HST.Stack unit
(requires (fun h0 ->
fill_buffer_inv b len' v (Ghost.reveal h) h0 /\
UInt32.v len' < UInt32.v (buffer_length b)
))
(ensures (fun h1 _ h2 ->
fill_buffer_inv b len' v (Ghost.reveal h) h1 /\
UInt32.v len' < UInt32.v (buffer_length b) /\
fill_buffer_inv b (UInt32.add len' 1ul) v (Ghost.reveal h) h2
))
#set-options "--z3rlimit 16"
let fill_buffer_advance #t b len' v h =
buffer_snoc b 0ul len' v;
Seq.lemma_eq_intro (Seq.snoc (Seq.create (UInt32.v len') v) v) (Seq.create (UInt32.v (UInt32.add len' 1ul)) v)
private
val fill_buffer_aux
(#t: typ)
(b: buffer t) (* destination *)
(len: UInt32.t)
(len': UInt32.t)
(v: type_of_typ t)
(h: Ghost.erased HS.mem)
: HST.Stack unit
(requires (fun h0 ->
fill_buffer_inv b len' v (Ghost.reveal h) h0 /\
len == buffer_length b
))
(ensures (fun h0 _ h1 ->
fill_buffer_inv b len' v (Ghost.reveal h) h0 /\
fill_buffer_postcond' b v (Ghost.reveal h) h1
))
(decreases (UInt32.v (buffer_length b) - UInt32.v len'))
let rec fill_buffer_aux #t b len len' v h =
if len = len'
then ()
else begin
fill_buffer_advance b len' v h;
fill_buffer_aux b len (UInt32.add len' 1ul) v h
end
let fill_buffer_fin
(#t: typ)
(b: buffer t) (* destination *)
(idx_b: UInt32.t)
(len: UInt32.t)
(v: type_of_typ t)
(h: HS.mem)
(h' : HS.mem)
: Lemma
(requires (
fill_buffer_precond b idx_b len h /\
fill_buffer_postcond' (gsub_buffer b idx_b len) v h h'
))
(ensures (
fill_buffer_precond b idx_b len h /\
fill_buffer_postcond b idx_b len v h h'
))
= ()
let fill_buffer'
(#t: typ)
(b: buffer t) (* destination *)
(idx_b: UInt32.t)
(len: UInt32.t)
(v: type_of_typ t)
: HST.Stack unit
(requires (fun h ->
fill_buffer_precond b idx_b len h
))
(ensures (fun h0 _ h1 ->
fill_buffer_postcond b idx_b len v h0 h1
))
= let h0 = HST.get () in
let b' = sub_buffer b idx_b len in
fill_buffer_init b' v h0;
fill_buffer_aux b' len 0ul v (Ghost.hide h0);
let h1 = HST.get () in
fill_buffer_fin b idx_b len v h0 h1 | false | false | FStar.Pointer.Derived3.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 16,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val fill_buffer
(#t: typ)
(b: buffer t) (* destination *)
(idx_b: UInt32.t)
(len: UInt32.t)
(v: type_of_typ t)
: HST.Stack unit
(requires (fun h ->
fill_buffer_precond b idx_b len h
))
(ensures (fun h0 _ h1 ->
fill_buffer_postcond b idx_b len v h0 h1
)) | [] | FStar.Pointer.Derived3.fill_buffer | {
"file_name": "ulib/legacy/FStar.Pointer.Derived3.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
b: FStar.Pointer.Base.buffer t ->
idx_b: FStar.UInt32.t ->
len: FStar.UInt32.t ->
v: FStar.Pointer.Base.type_of_typ t
-> FStar.HyperStack.ST.Stack Prims.unit | {
"end_col": 30,
"end_line": 163,
"start_col": 18,
"start_line": 163
} |
FStar.Pervasives.Lemma | val fill_buffer_init
(#t: typ)
(b: buffer t) (* destination *)
(v: type_of_typ t)
(h: HS.mem)
: Lemma
(requires (fill_buffer_precond' b h))
(ensures (fill_buffer_inv b 0ul v h h)) | [
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HH"
},
{
"abbrev": false,
"full_module": "FStar.Pointer.Derived1",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let fill_buffer_init #t b v h =
buffer_readable_intro_empty h (gsub_buffer b 0ul 0ul);
Seq.lemma_eq_intro (buffer_as_seq h (gsub_buffer b 0ul 0ul)) (Seq.create 0 v) | val fill_buffer_init
(#t: typ)
(b: buffer t) (* destination *)
(v: type_of_typ t)
(h: HS.mem)
: Lemma
(requires (fill_buffer_precond' b h))
(ensures (fill_buffer_inv b 0ul v h h))
let fill_buffer_init #t b v h = | false | null | true | buffer_readable_intro_empty h (gsub_buffer b 0ul 0ul);
Seq.lemma_eq_intro (buffer_as_seq h (gsub_buffer b 0ul 0ul)) (Seq.create 0 v) | {
"checked_file": "FStar.Pointer.Derived3.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Pointer.Derived3.fst"
} | [
"lemma"
] | [
"FStar.Pointer.Base.typ",
"FStar.Pointer.Base.buffer",
"FStar.Pointer.Base.type_of_typ",
"FStar.Monotonic.HyperStack.mem",
"FStar.Seq.Base.lemma_eq_intro",
"FStar.Pointer.Base.buffer_as_seq",
"FStar.Pointer.Base.gsub_buffer",
"FStar.UInt32.__uint_to_t",
"FStar.Seq.Base.create",
"Prims.unit",
"FStar.Pointer.Derived1.buffer_readable_intro_empty"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pointer.Derived3
module HH = FStar.HyperStack
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
private
let fill_buffer_precond'
(#t: typ)
(b: buffer t) (* destination *)
(h: HS.mem)
: GTot Type0
= buffer_live h b
private
let fill_buffer_postcond'
(#t: typ)
(b: buffer t) (* destination *)
(v: type_of_typ t)
(h: HS.mem)
(h' : HS.mem)
: GTot Type0
= fill_buffer_precond' b h /\
modifies (loc_buffer b) h h' /\
buffer_readable h' b /\
buffer_as_seq h' b == Seq.create (UInt32.v (buffer_length b)) v
private
let fill_buffer_inv
(#t: typ)
(b: buffer t) (* destination *)
(len' : UInt32.t)
(v: type_of_typ t)
(h: HS.mem)
(h' : HS.mem)
: GTot Type0
= fill_buffer_precond' b h /\
modifies (loc_buffer b) h h' /\
UInt32.v len' <= UInt32.v (buffer_length b) /\
buffer_readable h' (gsub_buffer b 0ul len') /\
buffer_as_seq h' (gsub_buffer b 0ul len') == Seq.create (UInt32.v len') v
private
val fill_buffer_init
(#t: typ)
(b: buffer t) (* destination *)
(v: type_of_typ t)
(h: HS.mem)
: Lemma
(requires (fill_buffer_precond' b h))
(ensures (fill_buffer_inv b 0ul v h h)) | false | false | FStar.Pointer.Derived3.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val fill_buffer_init
(#t: typ)
(b: buffer t) (* destination *)
(v: type_of_typ t)
(h: HS.mem)
: Lemma
(requires (fill_buffer_precond' b h))
(ensures (fill_buffer_inv b 0ul v h h)) | [] | FStar.Pointer.Derived3.fill_buffer_init | {
"file_name": "ulib/legacy/FStar.Pointer.Derived3.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
b: FStar.Pointer.Base.buffer t ->
v: FStar.Pointer.Base.type_of_typ t ->
h: FStar.Monotonic.HyperStack.mem
-> FStar.Pervasives.Lemma (requires FStar.Pointer.Derived3.fill_buffer_precond' b h)
(ensures FStar.Pointer.Derived3.fill_buffer_inv b 0ul v h h) | {
"end_col": 79,
"end_line": 70,
"start_col": 2,
"start_line": 69
} |
Prims.GTot | val fill_buffer_inv (#t: typ) (b: buffer t) (len': UInt32.t) (v: type_of_typ t) (h h': HS.mem)
: GTot Type0 | [
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HH"
},
{
"abbrev": false,
"full_module": "FStar.Pointer.Derived1",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let fill_buffer_inv
(#t: typ)
(b: buffer t) (* destination *)
(len' : UInt32.t)
(v: type_of_typ t)
(h: HS.mem)
(h' : HS.mem)
: GTot Type0
= fill_buffer_precond' b h /\
modifies (loc_buffer b) h h' /\
UInt32.v len' <= UInt32.v (buffer_length b) /\
buffer_readable h' (gsub_buffer b 0ul len') /\
buffer_as_seq h' (gsub_buffer b 0ul len') == Seq.create (UInt32.v len') v | val fill_buffer_inv (#t: typ) (b: buffer t) (len': UInt32.t) (v: type_of_typ t) (h h': HS.mem)
: GTot Type0
let fill_buffer_inv (#t: typ) (b: buffer t) (len': UInt32.t) (v: type_of_typ t) (h h': HS.mem)
: GTot Type0 = | false | null | false | fill_buffer_precond' b h /\ modifies (loc_buffer b) h h' /\
UInt32.v len' <= UInt32.v (buffer_length b) /\ buffer_readable h' (gsub_buffer b 0ul len') /\
buffer_as_seq h' (gsub_buffer b 0ul len') == Seq.create (UInt32.v len') v | {
"checked_file": "FStar.Pointer.Derived3.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Pointer.Derived3.fst"
} | [
"sometrivial"
] | [
"FStar.Pointer.Base.typ",
"FStar.Pointer.Base.buffer",
"FStar.UInt32.t",
"FStar.Pointer.Base.type_of_typ",
"FStar.Monotonic.HyperStack.mem",
"Prims.l_and",
"FStar.Pointer.Derived3.fill_buffer_precond'",
"FStar.Pointer.Base.modifies",
"FStar.Pointer.Base.loc_buffer",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"FStar.UInt32.v",
"FStar.Pointer.Base.buffer_length",
"FStar.Pointer.Base.buffer_readable",
"FStar.Pointer.Base.gsub_buffer",
"FStar.UInt32.__uint_to_t",
"Prims.eq2",
"FStar.Seq.Base.seq",
"FStar.Pointer.Base.buffer_as_seq",
"FStar.Seq.Base.create"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pointer.Derived3
module HH = FStar.HyperStack
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
private
let fill_buffer_precond'
(#t: typ)
(b: buffer t) (* destination *)
(h: HS.mem)
: GTot Type0
= buffer_live h b
private
let fill_buffer_postcond'
(#t: typ)
(b: buffer t) (* destination *)
(v: type_of_typ t)
(h: HS.mem)
(h' : HS.mem)
: GTot Type0
= fill_buffer_precond' b h /\
modifies (loc_buffer b) h h' /\
buffer_readable h' b /\
buffer_as_seq h' b == Seq.create (UInt32.v (buffer_length b)) v
private
let fill_buffer_inv
(#t: typ)
(b: buffer t) (* destination *)
(len' : UInt32.t)
(v: type_of_typ t)
(h: HS.mem)
(h' : HS.mem) | false | false | FStar.Pointer.Derived3.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val fill_buffer_inv (#t: typ) (b: buffer t) (len': UInt32.t) (v: type_of_typ t) (h h': HS.mem)
: GTot Type0 | [] | FStar.Pointer.Derived3.fill_buffer_inv | {
"file_name": "ulib/legacy/FStar.Pointer.Derived3.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
b: FStar.Pointer.Base.buffer t ->
len': FStar.UInt32.t ->
v: FStar.Pointer.Base.type_of_typ t ->
h: FStar.Monotonic.HyperStack.mem ->
h': FStar.Monotonic.HyperStack.mem
-> Prims.GTot Type0 | {
"end_col": 75,
"end_line": 56,
"start_col": 2,
"start_line": 52
} |
FStar.HyperStack.ST.Stack | val fill_buffer' (#t: typ) (b: buffer t) (idx_b len: UInt32.t) (v: type_of_typ t)
: HST.Stack unit
(requires (fun h -> fill_buffer_precond b idx_b len h))
(ensures (fun h0 _ h1 -> fill_buffer_postcond b idx_b len v h0 h1)) | [
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HH"
},
{
"abbrev": false,
"full_module": "FStar.Pointer.Derived1",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let fill_buffer'
(#t: typ)
(b: buffer t) (* destination *)
(idx_b: UInt32.t)
(len: UInt32.t)
(v: type_of_typ t)
: HST.Stack unit
(requires (fun h ->
fill_buffer_precond b idx_b len h
))
(ensures (fun h0 _ h1 ->
fill_buffer_postcond b idx_b len v h0 h1
))
= let h0 = HST.get () in
let b' = sub_buffer b idx_b len in
fill_buffer_init b' v h0;
fill_buffer_aux b' len 0ul v (Ghost.hide h0);
let h1 = HST.get () in
fill_buffer_fin b idx_b len v h0 h1 | val fill_buffer' (#t: typ) (b: buffer t) (idx_b len: UInt32.t) (v: type_of_typ t)
: HST.Stack unit
(requires (fun h -> fill_buffer_precond b idx_b len h))
(ensures (fun h0 _ h1 -> fill_buffer_postcond b idx_b len v h0 h1))
let fill_buffer' (#t: typ) (b: buffer t) (idx_b len: UInt32.t) (v: type_of_typ t)
: HST.Stack unit
(requires (fun h -> fill_buffer_precond b idx_b len h))
(ensures (fun h0 _ h1 -> fill_buffer_postcond b idx_b len v h0 h1)) = | true | null | false | let h0 = HST.get () in
let b' = sub_buffer b idx_b len in
fill_buffer_init b' v h0;
fill_buffer_aux b' len 0ul v (Ghost.hide h0);
let h1 = HST.get () in
fill_buffer_fin b idx_b len v h0 h1 | {
"checked_file": "FStar.Pointer.Derived3.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Pointer.Derived3.fst"
} | [] | [
"FStar.Pointer.Base.typ",
"FStar.Pointer.Base.buffer",
"FStar.UInt32.t",
"FStar.Pointer.Base.type_of_typ",
"FStar.Pointer.Derived3.fill_buffer_fin",
"Prims.unit",
"FStar.Monotonic.HyperStack.mem",
"FStar.HyperStack.ST.get",
"FStar.Pointer.Derived3.fill_buffer_aux",
"FStar.UInt32.__uint_to_t",
"FStar.Ghost.hide",
"FStar.Pointer.Derived3.fill_buffer_init",
"FStar.Pointer.Base.sub_buffer",
"FStar.Pointer.Derived3.fill_buffer_precond",
"FStar.Pointer.Derived3.fill_buffer_postcond"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pointer.Derived3
module HH = FStar.HyperStack
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
private
let fill_buffer_precond'
(#t: typ)
(b: buffer t) (* destination *)
(h: HS.mem)
: GTot Type0
= buffer_live h b
private
let fill_buffer_postcond'
(#t: typ)
(b: buffer t) (* destination *)
(v: type_of_typ t)
(h: HS.mem)
(h' : HS.mem)
: GTot Type0
= fill_buffer_precond' b h /\
modifies (loc_buffer b) h h' /\
buffer_readable h' b /\
buffer_as_seq h' b == Seq.create (UInt32.v (buffer_length b)) v
private
let fill_buffer_inv
(#t: typ)
(b: buffer t) (* destination *)
(len' : UInt32.t)
(v: type_of_typ t)
(h: HS.mem)
(h' : HS.mem)
: GTot Type0
= fill_buffer_precond' b h /\
modifies (loc_buffer b) h h' /\
UInt32.v len' <= UInt32.v (buffer_length b) /\
buffer_readable h' (gsub_buffer b 0ul len') /\
buffer_as_seq h' (gsub_buffer b 0ul len') == Seq.create (UInt32.v len') v
private
val fill_buffer_init
(#t: typ)
(b: buffer t) (* destination *)
(v: type_of_typ t)
(h: HS.mem)
: Lemma
(requires (fill_buffer_precond' b h))
(ensures (fill_buffer_inv b 0ul v h h))
let fill_buffer_init #t b v h =
buffer_readable_intro_empty h (gsub_buffer b 0ul 0ul);
Seq.lemma_eq_intro (buffer_as_seq h (gsub_buffer b 0ul 0ul)) (Seq.create 0 v)
private
val fill_buffer_advance
(#t: typ)
(b: buffer t) (* destination *)
(len' : UInt32.t)
(v: type_of_typ t)
(h: Ghost.erased HS.mem)
: HST.Stack unit
(requires (fun h0 ->
fill_buffer_inv b len' v (Ghost.reveal h) h0 /\
UInt32.v len' < UInt32.v (buffer_length b)
))
(ensures (fun h1 _ h2 ->
fill_buffer_inv b len' v (Ghost.reveal h) h1 /\
UInt32.v len' < UInt32.v (buffer_length b) /\
fill_buffer_inv b (UInt32.add len' 1ul) v (Ghost.reveal h) h2
))
#set-options "--z3rlimit 16"
let fill_buffer_advance #t b len' v h =
buffer_snoc b 0ul len' v;
Seq.lemma_eq_intro (Seq.snoc (Seq.create (UInt32.v len') v) v) (Seq.create (UInt32.v (UInt32.add len' 1ul)) v)
private
val fill_buffer_aux
(#t: typ)
(b: buffer t) (* destination *)
(len: UInt32.t)
(len': UInt32.t)
(v: type_of_typ t)
(h: Ghost.erased HS.mem)
: HST.Stack unit
(requires (fun h0 ->
fill_buffer_inv b len' v (Ghost.reveal h) h0 /\
len == buffer_length b
))
(ensures (fun h0 _ h1 ->
fill_buffer_inv b len' v (Ghost.reveal h) h0 /\
fill_buffer_postcond' b v (Ghost.reveal h) h1
))
(decreases (UInt32.v (buffer_length b) - UInt32.v len'))
let rec fill_buffer_aux #t b len len' v h =
if len = len'
then ()
else begin
fill_buffer_advance b len' v h;
fill_buffer_aux b len (UInt32.add len' 1ul) v h
end
let fill_buffer_fin
(#t: typ)
(b: buffer t) (* destination *)
(idx_b: UInt32.t)
(len: UInt32.t)
(v: type_of_typ t)
(h: HS.mem)
(h' : HS.mem)
: Lemma
(requires (
fill_buffer_precond b idx_b len h /\
fill_buffer_postcond' (gsub_buffer b idx_b len) v h h'
))
(ensures (
fill_buffer_precond b idx_b len h /\
fill_buffer_postcond b idx_b len v h h'
))
= ()
let fill_buffer'
(#t: typ)
(b: buffer t) (* destination *)
(idx_b: UInt32.t)
(len: UInt32.t)
(v: type_of_typ t)
: HST.Stack unit
(requires (fun h ->
fill_buffer_precond b idx_b len h
))
(ensures (fun h0 _ h1 ->
fill_buffer_postcond b idx_b len v h0 h1 | false | false | FStar.Pointer.Derived3.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 16,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val fill_buffer' (#t: typ) (b: buffer t) (idx_b len: UInt32.t) (v: type_of_typ t)
: HST.Stack unit
(requires (fun h -> fill_buffer_precond b idx_b len h))
(ensures (fun h0 _ h1 -> fill_buffer_postcond b idx_b len v h0 h1)) | [] | FStar.Pointer.Derived3.fill_buffer' | {
"file_name": "ulib/legacy/FStar.Pointer.Derived3.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
b: FStar.Pointer.Base.buffer t ->
idx_b: FStar.UInt32.t ->
len: FStar.UInt32.t ->
v: FStar.Pointer.Base.type_of_typ t
-> FStar.HyperStack.ST.Stack Prims.unit | {
"end_col": 37,
"end_line": 160,
"start_col": 1,
"start_line": 155
} |
Prims.GTot | val fill_buffer_postcond' (#t: typ) (b: buffer t) (v: type_of_typ t) (h h': HS.mem) : GTot Type0 | [
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HH"
},
{
"abbrev": false,
"full_module": "FStar.Pointer.Derived1",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let fill_buffer_postcond'
(#t: typ)
(b: buffer t) (* destination *)
(v: type_of_typ t)
(h: HS.mem)
(h' : HS.mem)
: GTot Type0
= fill_buffer_precond' b h /\
modifies (loc_buffer b) h h' /\
buffer_readable h' b /\
buffer_as_seq h' b == Seq.create (UInt32.v (buffer_length b)) v | val fill_buffer_postcond' (#t: typ) (b: buffer t) (v: type_of_typ t) (h h': HS.mem) : GTot Type0
let fill_buffer_postcond' (#t: typ) (b: buffer t) (v: type_of_typ t) (h h': HS.mem) : GTot Type0 = | false | null | false | fill_buffer_precond' b h /\ modifies (loc_buffer b) h h' /\ buffer_readable h' b /\
buffer_as_seq h' b == Seq.create (UInt32.v (buffer_length b)) v | {
"checked_file": "FStar.Pointer.Derived3.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Pointer.Derived3.fst"
} | [
"sometrivial"
] | [
"FStar.Pointer.Base.typ",
"FStar.Pointer.Base.buffer",
"FStar.Pointer.Base.type_of_typ",
"FStar.Monotonic.HyperStack.mem",
"Prims.l_and",
"FStar.Pointer.Derived3.fill_buffer_precond'",
"FStar.Pointer.Base.modifies",
"FStar.Pointer.Base.loc_buffer",
"FStar.Pointer.Base.buffer_readable",
"Prims.eq2",
"FStar.Seq.Base.seq",
"FStar.Pointer.Base.buffer_as_seq",
"FStar.Seq.Base.create",
"FStar.UInt32.v",
"FStar.Pointer.Base.buffer_length"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pointer.Derived3
module HH = FStar.HyperStack
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
private
let fill_buffer_precond'
(#t: typ)
(b: buffer t) (* destination *)
(h: HS.mem)
: GTot Type0
= buffer_live h b
private
let fill_buffer_postcond'
(#t: typ)
(b: buffer t) (* destination *)
(v: type_of_typ t)
(h: HS.mem)
(h' : HS.mem) | false | false | FStar.Pointer.Derived3.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val fill_buffer_postcond' (#t: typ) (b: buffer t) (v: type_of_typ t) (h h': HS.mem) : GTot Type0 | [] | FStar.Pointer.Derived3.fill_buffer_postcond' | {
"file_name": "ulib/legacy/FStar.Pointer.Derived3.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
b: FStar.Pointer.Base.buffer t ->
v: FStar.Pointer.Base.type_of_typ t ->
h: FStar.Monotonic.HyperStack.mem ->
h': FStar.Monotonic.HyperStack.mem
-> Prims.GTot Type0 | {
"end_col": 65,
"end_line": 41,
"start_col": 2,
"start_line": 38
} |
FStar.HyperStack.ST.Stack | val fill_buffer_aux
(#t: typ)
(b: buffer t) (* destination *)
(len: UInt32.t)
(len': UInt32.t)
(v: type_of_typ t)
(h: Ghost.erased HS.mem)
: HST.Stack unit
(requires (fun h0 ->
fill_buffer_inv b len' v (Ghost.reveal h) h0 /\
len == buffer_length b
))
(ensures (fun h0 _ h1 ->
fill_buffer_inv b len' v (Ghost.reveal h) h0 /\
fill_buffer_postcond' b v (Ghost.reveal h) h1
))
(decreases (UInt32.v (buffer_length b) - UInt32.v len')) | [
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HH"
},
{
"abbrev": false,
"full_module": "FStar.Pointer.Derived1",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec fill_buffer_aux #t b len len' v h =
if len = len'
then ()
else begin
fill_buffer_advance b len' v h;
fill_buffer_aux b len (UInt32.add len' 1ul) v h
end | val fill_buffer_aux
(#t: typ)
(b: buffer t) (* destination *)
(len: UInt32.t)
(len': UInt32.t)
(v: type_of_typ t)
(h: Ghost.erased HS.mem)
: HST.Stack unit
(requires (fun h0 ->
fill_buffer_inv b len' v (Ghost.reveal h) h0 /\
len == buffer_length b
))
(ensures (fun h0 _ h1 ->
fill_buffer_inv b len' v (Ghost.reveal h) h0 /\
fill_buffer_postcond' b v (Ghost.reveal h) h1
))
(decreases (UInt32.v (buffer_length b) - UInt32.v len'))
let rec fill_buffer_aux #t b len len' v h = | true | null | false | if len = len'
then ()
else
(fill_buffer_advance b len' v h;
fill_buffer_aux b len (UInt32.add len' 1ul) v h) | {
"checked_file": "FStar.Pointer.Derived3.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Pointer.Derived3.fst"
} | [
""
] | [
"FStar.Pointer.Base.typ",
"FStar.Pointer.Base.buffer",
"FStar.UInt32.t",
"FStar.Pointer.Base.type_of_typ",
"FStar.Ghost.erased",
"FStar.Monotonic.HyperStack.mem",
"Prims.op_Equality",
"Prims.unit",
"Prims.bool",
"FStar.Pointer.Derived3.fill_buffer_aux",
"FStar.UInt32.add",
"FStar.UInt32.__uint_to_t",
"FStar.Pointer.Derived3.fill_buffer_advance"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pointer.Derived3
module HH = FStar.HyperStack
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
private
let fill_buffer_precond'
(#t: typ)
(b: buffer t) (* destination *)
(h: HS.mem)
: GTot Type0
= buffer_live h b
private
let fill_buffer_postcond'
(#t: typ)
(b: buffer t) (* destination *)
(v: type_of_typ t)
(h: HS.mem)
(h' : HS.mem)
: GTot Type0
= fill_buffer_precond' b h /\
modifies (loc_buffer b) h h' /\
buffer_readable h' b /\
buffer_as_seq h' b == Seq.create (UInt32.v (buffer_length b)) v
private
let fill_buffer_inv
(#t: typ)
(b: buffer t) (* destination *)
(len' : UInt32.t)
(v: type_of_typ t)
(h: HS.mem)
(h' : HS.mem)
: GTot Type0
= fill_buffer_precond' b h /\
modifies (loc_buffer b) h h' /\
UInt32.v len' <= UInt32.v (buffer_length b) /\
buffer_readable h' (gsub_buffer b 0ul len') /\
buffer_as_seq h' (gsub_buffer b 0ul len') == Seq.create (UInt32.v len') v
private
val fill_buffer_init
(#t: typ)
(b: buffer t) (* destination *)
(v: type_of_typ t)
(h: HS.mem)
: Lemma
(requires (fill_buffer_precond' b h))
(ensures (fill_buffer_inv b 0ul v h h))
let fill_buffer_init #t b v h =
buffer_readable_intro_empty h (gsub_buffer b 0ul 0ul);
Seq.lemma_eq_intro (buffer_as_seq h (gsub_buffer b 0ul 0ul)) (Seq.create 0 v)
private
val fill_buffer_advance
(#t: typ)
(b: buffer t) (* destination *)
(len' : UInt32.t)
(v: type_of_typ t)
(h: Ghost.erased HS.mem)
: HST.Stack unit
(requires (fun h0 ->
fill_buffer_inv b len' v (Ghost.reveal h) h0 /\
UInt32.v len' < UInt32.v (buffer_length b)
))
(ensures (fun h1 _ h2 ->
fill_buffer_inv b len' v (Ghost.reveal h) h1 /\
UInt32.v len' < UInt32.v (buffer_length b) /\
fill_buffer_inv b (UInt32.add len' 1ul) v (Ghost.reveal h) h2
))
#set-options "--z3rlimit 16"
let fill_buffer_advance #t b len' v h =
buffer_snoc b 0ul len' v;
Seq.lemma_eq_intro (Seq.snoc (Seq.create (UInt32.v len') v) v) (Seq.create (UInt32.v (UInt32.add len' 1ul)) v)
private
val fill_buffer_aux
(#t: typ)
(b: buffer t) (* destination *)
(len: UInt32.t)
(len': UInt32.t)
(v: type_of_typ t)
(h: Ghost.erased HS.mem)
: HST.Stack unit
(requires (fun h0 ->
fill_buffer_inv b len' v (Ghost.reveal h) h0 /\
len == buffer_length b
))
(ensures (fun h0 _ h1 ->
fill_buffer_inv b len' v (Ghost.reveal h) h0 /\
fill_buffer_postcond' b v (Ghost.reveal h) h1
))
(decreases (UInt32.v (buffer_length b) - UInt32.v len')) | false | false | FStar.Pointer.Derived3.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 16,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val fill_buffer_aux
(#t: typ)
(b: buffer t) (* destination *)
(len: UInt32.t)
(len': UInt32.t)
(v: type_of_typ t)
(h: Ghost.erased HS.mem)
: HST.Stack unit
(requires (fun h0 ->
fill_buffer_inv b len' v (Ghost.reveal h) h0 /\
len == buffer_length b
))
(ensures (fun h0 _ h1 ->
fill_buffer_inv b len' v (Ghost.reveal h) h0 /\
fill_buffer_postcond' b v (Ghost.reveal h) h1
))
(decreases (UInt32.v (buffer_length b) - UInt32.v len')) | [
"recursion"
] | FStar.Pointer.Derived3.fill_buffer_aux | {
"file_name": "ulib/legacy/FStar.Pointer.Derived3.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
b: FStar.Pointer.Base.buffer t ->
len: FStar.UInt32.t ->
len': FStar.UInt32.t ->
v: FStar.Pointer.Base.type_of_typ t ->
h: FStar.Ghost.erased FStar.Monotonic.HyperStack.mem
-> FStar.HyperStack.ST.Stack Prims.unit | {
"end_col": 5,
"end_line": 121,
"start_col": 2,
"start_line": 116
} |
FStar.HyperStack.ST.Stack | val fill_buffer_advance
(#t: typ)
(b: buffer t) (* destination *)
(len' : UInt32.t)
(v: type_of_typ t)
(h: Ghost.erased HS.mem)
: HST.Stack unit
(requires (fun h0 ->
fill_buffer_inv b len' v (Ghost.reveal h) h0 /\
UInt32.v len' < UInt32.v (buffer_length b)
))
(ensures (fun h1 _ h2 ->
fill_buffer_inv b len' v (Ghost.reveal h) h1 /\
UInt32.v len' < UInt32.v (buffer_length b) /\
fill_buffer_inv b (UInt32.add len' 1ul) v (Ghost.reveal h) h2
)) | [
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HH"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack.ST",
"short_module": "HST"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HS"
},
{
"abbrev": true,
"full_module": "FStar.HyperStack",
"short_module": "HH"
},
{
"abbrev": false,
"full_module": "FStar.Pointer.Derived1",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pointer",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let fill_buffer_advance #t b len' v h =
buffer_snoc b 0ul len' v;
Seq.lemma_eq_intro (Seq.snoc (Seq.create (UInt32.v len') v) v) (Seq.create (UInt32.v (UInt32.add len' 1ul)) v) | val fill_buffer_advance
(#t: typ)
(b: buffer t) (* destination *)
(len' : UInt32.t)
(v: type_of_typ t)
(h: Ghost.erased HS.mem)
: HST.Stack unit
(requires (fun h0 ->
fill_buffer_inv b len' v (Ghost.reveal h) h0 /\
UInt32.v len' < UInt32.v (buffer_length b)
))
(ensures (fun h1 _ h2 ->
fill_buffer_inv b len' v (Ghost.reveal h) h1 /\
UInt32.v len' < UInt32.v (buffer_length b) /\
fill_buffer_inv b (UInt32.add len' 1ul) v (Ghost.reveal h) h2
))
let fill_buffer_advance #t b len' v h = | true | null | false | buffer_snoc b 0ul len' v;
Seq.lemma_eq_intro (Seq.snoc (Seq.create (UInt32.v len') v) v)
(Seq.create (UInt32.v (UInt32.add len' 1ul)) v) | {
"checked_file": "FStar.Pointer.Derived3.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt32.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.HyperStack.ST.fsti.checked",
"FStar.HyperStack.fst.checked",
"FStar.Ghost.fsti.checked"
],
"interface_file": true,
"source_file": "FStar.Pointer.Derived3.fst"
} | [] | [
"FStar.Pointer.Base.typ",
"FStar.Pointer.Base.buffer",
"FStar.UInt32.t",
"FStar.Pointer.Base.type_of_typ",
"FStar.Ghost.erased",
"FStar.Monotonic.HyperStack.mem",
"FStar.Seq.Base.lemma_eq_intro",
"FStar.Seq.Properties.snoc",
"FStar.Seq.Base.create",
"FStar.UInt32.v",
"FStar.UInt32.add",
"FStar.UInt32.__uint_to_t",
"Prims.unit",
"FStar.Pointer.Derived1.buffer_snoc"
] | [] | (*
Copyright 2008-2018 Microsoft Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
module FStar.Pointer.Derived3
module HH = FStar.HyperStack
module HS = FStar.HyperStack
module HST = FStar.HyperStack.ST
private
let fill_buffer_precond'
(#t: typ)
(b: buffer t) (* destination *)
(h: HS.mem)
: GTot Type0
= buffer_live h b
private
let fill_buffer_postcond'
(#t: typ)
(b: buffer t) (* destination *)
(v: type_of_typ t)
(h: HS.mem)
(h' : HS.mem)
: GTot Type0
= fill_buffer_precond' b h /\
modifies (loc_buffer b) h h' /\
buffer_readable h' b /\
buffer_as_seq h' b == Seq.create (UInt32.v (buffer_length b)) v
private
let fill_buffer_inv
(#t: typ)
(b: buffer t) (* destination *)
(len' : UInt32.t)
(v: type_of_typ t)
(h: HS.mem)
(h' : HS.mem)
: GTot Type0
= fill_buffer_precond' b h /\
modifies (loc_buffer b) h h' /\
UInt32.v len' <= UInt32.v (buffer_length b) /\
buffer_readable h' (gsub_buffer b 0ul len') /\
buffer_as_seq h' (gsub_buffer b 0ul len') == Seq.create (UInt32.v len') v
private
val fill_buffer_init
(#t: typ)
(b: buffer t) (* destination *)
(v: type_of_typ t)
(h: HS.mem)
: Lemma
(requires (fill_buffer_precond' b h))
(ensures (fill_buffer_inv b 0ul v h h))
let fill_buffer_init #t b v h =
buffer_readable_intro_empty h (gsub_buffer b 0ul 0ul);
Seq.lemma_eq_intro (buffer_as_seq h (gsub_buffer b 0ul 0ul)) (Seq.create 0 v)
private
val fill_buffer_advance
(#t: typ)
(b: buffer t) (* destination *)
(len' : UInt32.t)
(v: type_of_typ t)
(h: Ghost.erased HS.mem)
: HST.Stack unit
(requires (fun h0 ->
fill_buffer_inv b len' v (Ghost.reveal h) h0 /\
UInt32.v len' < UInt32.v (buffer_length b)
))
(ensures (fun h1 _ h2 ->
fill_buffer_inv b len' v (Ghost.reveal h) h1 /\
UInt32.v len' < UInt32.v (buffer_length b) /\
fill_buffer_inv b (UInt32.add len' 1ul) v (Ghost.reveal h) h2
))
#set-options "--z3rlimit 16" | false | false | FStar.Pointer.Derived3.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 16,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val fill_buffer_advance
(#t: typ)
(b: buffer t) (* destination *)
(len' : UInt32.t)
(v: type_of_typ t)
(h: Ghost.erased HS.mem)
: HST.Stack unit
(requires (fun h0 ->
fill_buffer_inv b len' v (Ghost.reveal h) h0 /\
UInt32.v len' < UInt32.v (buffer_length b)
))
(ensures (fun h1 _ h2 ->
fill_buffer_inv b len' v (Ghost.reveal h) h1 /\
UInt32.v len' < UInt32.v (buffer_length b) /\
fill_buffer_inv b (UInt32.add len' 1ul) v (Ghost.reveal h) h2
)) | [] | FStar.Pointer.Derived3.fill_buffer_advance | {
"file_name": "ulib/legacy/FStar.Pointer.Derived3.fst",
"git_rev": "f4cbb7a38d67eeb13fbdb2f4fb8a44a65cbcdc1f",
"git_url": "https://github.com/FStarLang/FStar.git",
"project_name": "FStar"
} |
b: FStar.Pointer.Base.buffer t ->
len': FStar.UInt32.t ->
v: FStar.Pointer.Base.type_of_typ t ->
h: FStar.Ghost.erased FStar.Monotonic.HyperStack.mem
-> FStar.HyperStack.ST.Stack Prims.unit | {
"end_col": 112,
"end_line": 94,
"start_col": 2,
"start_line": 93
} |
Prims.Tot | val parse_u16: parser parse_u16_kind U16.t | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let parse_u16 =
decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16 | val parse_u16: parser parse_u16_kind U16.t
let parse_u16 = | false | null | false | decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16 | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"total"
] | [
"LowParse.Spec.Combinators.make_total_constant_size_parser",
"FStar.UInt16.t",
"LowParse.Spec.Int.decode_u16",
"Prims.unit",
"LowParse.Spec.Int.decode_u16_injective"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective' | false | true | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val parse_u16: parser parse_u16_kind U16.t | [] | LowParse.Spec.Int.parse_u16 | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | LowParse.Spec.Base.parser LowParse.Spec.Int.parse_u16_kind FStar.UInt16.t | {
"end_col": 52,
"end_line": 60,
"start_col": 2,
"start_line": 59
} |
Prims.Tot | val parse_u64: parser parse_u64_kind U64.t | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let parse_u64 =
decode_u64_injective ();
make_total_constant_size_parser 8 U64.t decode_u64 | val parse_u64: parser parse_u64_kind U64.t
let parse_u64 = | false | null | false | decode_u64_injective ();
make_total_constant_size_parser 8 U64.t decode_u64 | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"total"
] | [
"LowParse.Spec.Combinators.make_total_constant_size_parser",
"FStar.UInt64.t",
"LowParse.Spec.Int.decode_u64",
"Prims.unit",
"LowParse.Spec.Int.decode_u64_injective"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective'
let parse_u16 =
decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16
let parse_u16_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 2)
let serialize_u16 =
(fun (x: U16.t) -> E.n_to_be 2 (U16.v x))
let decode_u32
(b: bytes { Seq.length b == 4 } )
: GTot U32.t
= E.lemma_be_to_n_is_bounded b;
U32.uint_to_t (E.be_to_n b)
let decode_u32_injective'
(b1: bytes { Seq.length b1 == 4 } )
(b2: bytes { Seq.length b2 == 4 } )
: Lemma
(decode_u32 b1 == decode_u32 b2 ==> Seq.equal b1 b2)
= if decode_u32 b1 = decode_u32 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u32_injective () : Lemma
(make_total_constant_size_parser_precond 4 U32.t decode_u32)
= Classical.forall_intro_2 decode_u32_injective'
let parse_u32 =
decode_u32_injective ();
make_total_constant_size_parser 4 U32.t decode_u32
let parse_u32_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 4)
let serialize_u32 =
(fun (x: U32.t) -> E.n_to_be 4 (U32.v x))
let decode_u64
(b: bytes { Seq.length b == 8 } )
: GTot U64.t
= E.lemma_be_to_n_is_bounded b;
U64.uint_to_t (E.be_to_n b)
let decode_u64_injective'
(b1: bytes { Seq.length b1 == 8 } )
(b2: bytes { Seq.length b2 == 8 } )
: Lemma
(decode_u64 b1 == decode_u64 b2 ==> Seq.equal b1 b2)
= if decode_u64 b1 = decode_u64 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U64.v (U64.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U64.v (U64.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u64_injective () : Lemma
(make_total_constant_size_parser_precond 8 U64.t decode_u64)
= Classical.forall_intro_2 decode_u64_injective' | false | true | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val parse_u64: parser parse_u64_kind U64.t | [] | LowParse.Spec.Int.parse_u64 | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | LowParse.Spec.Base.parser LowParse.Spec.Int.parse_u64_kind FStar.UInt64.t | {
"end_col": 52,
"end_line": 134,
"start_col": 2,
"start_line": 133
} |
Prims.Tot | val tot_parse_u8: tot_parser parse_u8_kind U8.t | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8 | val tot_parse_u8: tot_parser parse_u8_kind U8.t
let tot_parse_u8 = | false | null | false | decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8 | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"total"
] | [
"LowParse.Spec.Combinators.tot_make_total_constant_size_parser",
"FStar.UInt8.t",
"LowParse.Spec.Int.decode_u8",
"Prims.unit",
"LowParse.Spec.Int.decode_u8_injective"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= () | false | true | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val tot_parse_u8: tot_parser parse_u8_kind U8.t | [] | LowParse.Spec.Int.tot_parse_u8 | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | LowParse.Spec.Base.tot_parser LowParse.Spec.Int.parse_u8_kind FStar.UInt8.t | {
"end_col": 54,
"end_line": 15,
"start_col": 2,
"start_line": 14
} |
Prims.Tot | val parse_u32: parser parse_u32_kind U32.t | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let parse_u32 =
decode_u32_injective ();
make_total_constant_size_parser 4 U32.t decode_u32 | val parse_u32: parser parse_u32_kind U32.t
let parse_u32 = | false | null | false | decode_u32_injective ();
make_total_constant_size_parser 4 U32.t decode_u32 | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"total"
] | [
"LowParse.Spec.Combinators.make_total_constant_size_parser",
"FStar.UInt32.t",
"LowParse.Spec.Int.decode_u32",
"Prims.unit",
"LowParse.Spec.Int.decode_u32_injective"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective'
let parse_u16 =
decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16
let parse_u16_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 2)
let serialize_u16 =
(fun (x: U16.t) -> E.n_to_be 2 (U16.v x))
let decode_u32
(b: bytes { Seq.length b == 4 } )
: GTot U32.t
= E.lemma_be_to_n_is_bounded b;
U32.uint_to_t (E.be_to_n b)
let decode_u32_injective'
(b1: bytes { Seq.length b1 == 4 } )
(b2: bytes { Seq.length b2 == 4 } )
: Lemma
(decode_u32 b1 == decode_u32 b2 ==> Seq.equal b1 b2)
= if decode_u32 b1 = decode_u32 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u32_injective () : Lemma
(make_total_constant_size_parser_precond 4 U32.t decode_u32)
= Classical.forall_intro_2 decode_u32_injective' | false | true | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val parse_u32: parser parse_u32_kind U32.t | [] | LowParse.Spec.Int.parse_u32 | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | LowParse.Spec.Base.parser LowParse.Spec.Int.parse_u32_kind FStar.UInt32.t | {
"end_col": 52,
"end_line": 97,
"start_col": 2,
"start_line": 96
} |
Prims.GTot | val decode_u16 (b: bytes{Seq.length b == 2}) : GTot U16.t | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b) | val decode_u16 (b: bytes{Seq.length b == 2}) : GTot U16.t
let decode_u16 (b: bytes{Seq.length b == 2}) : GTot U16.t = | false | null | false | E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b) | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"sometrivial"
] | [
"LowParse.Bytes.bytes",
"Prims.eq2",
"Prims.int",
"FStar.Seq.Base.length",
"LowParse.Bytes.byte",
"FStar.UInt16.uint_to_t",
"FStar.Endianness.be_to_n",
"Prims.unit",
"FStar.Endianness.lemma_be_to_n_is_bounded",
"FStar.UInt16.t"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } ) | false | false | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val decode_u16 (b: bytes{Seq.length b == 2}) : GTot U16.t | [] | LowParse.Spec.Int.decode_u16 | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | b: LowParse.Bytes.bytes{FStar.Seq.Base.length b == 2} -> Prims.GTot FStar.UInt16.t | {
"end_col": 29,
"end_line": 35,
"start_col": 2,
"start_line": 34
} |
Prims.Tot | val parse_u64_le: parser parse_u64_kind U64.t | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let parse_u64_le =
decode_u64_le_injective ();
make_total_constant_size_parser 8 U64.t decode_u64_le | val parse_u64_le: parser parse_u64_kind U64.t
let parse_u64_le = | false | null | false | decode_u64_le_injective ();
make_total_constant_size_parser 8 U64.t decode_u64_le | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"total"
] | [
"LowParse.Spec.Combinators.make_total_constant_size_parser",
"FStar.UInt64.t",
"LowParse.Spec.Int.decode_u64_le",
"Prims.unit",
"LowParse.Spec.Int.decode_u64_le_injective"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective'
let parse_u16 =
decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16
let parse_u16_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 2)
let serialize_u16 =
(fun (x: U16.t) -> E.n_to_be 2 (U16.v x))
let decode_u32
(b: bytes { Seq.length b == 4 } )
: GTot U32.t
= E.lemma_be_to_n_is_bounded b;
U32.uint_to_t (E.be_to_n b)
let decode_u32_injective'
(b1: bytes { Seq.length b1 == 4 } )
(b2: bytes { Seq.length b2 == 4 } )
: Lemma
(decode_u32 b1 == decode_u32 b2 ==> Seq.equal b1 b2)
= if decode_u32 b1 = decode_u32 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u32_injective () : Lemma
(make_total_constant_size_parser_precond 4 U32.t decode_u32)
= Classical.forall_intro_2 decode_u32_injective'
let parse_u32 =
decode_u32_injective ();
make_total_constant_size_parser 4 U32.t decode_u32
let parse_u32_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 4)
let serialize_u32 =
(fun (x: U32.t) -> E.n_to_be 4 (U32.v x))
let decode_u64
(b: bytes { Seq.length b == 8 } )
: GTot U64.t
= E.lemma_be_to_n_is_bounded b;
U64.uint_to_t (E.be_to_n b)
let decode_u64_injective'
(b1: bytes { Seq.length b1 == 8 } )
(b2: bytes { Seq.length b2 == 8 } )
: Lemma
(decode_u64 b1 == decode_u64 b2 ==> Seq.equal b1 b2)
= if decode_u64 b1 = decode_u64 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U64.v (U64.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U64.v (U64.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u64_injective () : Lemma
(make_total_constant_size_parser_precond 8 U64.t decode_u64)
= Classical.forall_intro_2 decode_u64_injective'
let parse_u64 =
decode_u64_injective ();
make_total_constant_size_parser 8 U64.t decode_u64
let parse_u64_spec
b
= E.lemma_be_to_n_is_bounded (Seq.slice b 0 8)
let serialize_u64 =
(fun (x: U64.t) -> E.n_to_be 8 (U64.v x))
let decode_u64_le
(b: bytes { Seq.length b == 8 } )
: GTot U64.t
= E.lemma_le_to_n_is_bounded b;
U64.uint_to_t (E.le_to_n b)
let decode_u64_le_injective'
(b1: bytes { Seq.length b1 == 8 } )
(b2: bytes { Seq.length b2 == 8 } )
: Lemma
(decode_u64_le b1 == decode_u64_le b2 ==> Seq.equal b1 b2)
= if decode_u64_le b1 = decode_u64_le b2
then begin
E.lemma_le_to_n_is_bounded b1;
E.lemma_le_to_n_is_bounded b2;
assert (U64.v (U64.uint_to_t (E.le_to_n b1)) == E.le_to_n b1);
assert (U64.v (U64.uint_to_t (E.le_to_n b2)) == E.le_to_n b2);
assert (E.le_to_n b1 == E.le_to_n b2);
E.le_to_n_inj b1 b2
end else ()
let decode_u64_le_injective () : Lemma
(make_total_constant_size_parser_precond 8 U64.t decode_u64_le)
= Classical.forall_intro_2 decode_u64_le_injective' | false | true | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val parse_u64_le: parser parse_u64_kind U64.t | [] | LowParse.Spec.Int.parse_u64_le | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | LowParse.Spec.Base.parser LowParse.Spec.Int.parse_u64_kind FStar.UInt64.t | {
"end_col": 55,
"end_line": 170,
"start_col": 2,
"start_line": 169
} |
Prims.GTot | val decode_u64 (b: bytes{Seq.length b == 8}) : GTot U64.t | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let decode_u64
(b: bytes { Seq.length b == 8 } )
: GTot U64.t
= E.lemma_be_to_n_is_bounded b;
U64.uint_to_t (E.be_to_n b) | val decode_u64 (b: bytes{Seq.length b == 8}) : GTot U64.t
let decode_u64 (b: bytes{Seq.length b == 8}) : GTot U64.t = | false | null | false | E.lemma_be_to_n_is_bounded b;
U64.uint_to_t (E.be_to_n b) | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"sometrivial"
] | [
"LowParse.Bytes.bytes",
"Prims.eq2",
"Prims.int",
"FStar.Seq.Base.length",
"LowParse.Bytes.byte",
"FStar.UInt64.uint_to_t",
"FStar.Endianness.be_to_n",
"Prims.unit",
"FStar.Endianness.lemma_be_to_n_is_bounded",
"FStar.UInt64.t"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective'
let parse_u16 =
decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16
let parse_u16_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 2)
let serialize_u16 =
(fun (x: U16.t) -> E.n_to_be 2 (U16.v x))
let decode_u32
(b: bytes { Seq.length b == 4 } )
: GTot U32.t
= E.lemma_be_to_n_is_bounded b;
U32.uint_to_t (E.be_to_n b)
let decode_u32_injective'
(b1: bytes { Seq.length b1 == 4 } )
(b2: bytes { Seq.length b2 == 4 } )
: Lemma
(decode_u32 b1 == decode_u32 b2 ==> Seq.equal b1 b2)
= if decode_u32 b1 = decode_u32 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u32_injective () : Lemma
(make_total_constant_size_parser_precond 4 U32.t decode_u32)
= Classical.forall_intro_2 decode_u32_injective'
let parse_u32 =
decode_u32_injective ();
make_total_constant_size_parser 4 U32.t decode_u32
let parse_u32_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 4)
let serialize_u32 =
(fun (x: U32.t) -> E.n_to_be 4 (U32.v x))
let decode_u64
(b: bytes { Seq.length b == 8 } ) | false | false | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val decode_u64 (b: bytes{Seq.length b == 8}) : GTot U64.t | [] | LowParse.Spec.Int.decode_u64 | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | b: LowParse.Bytes.bytes{FStar.Seq.Base.length b == 8} -> Prims.GTot FStar.UInt64.t | {
"end_col": 29,
"end_line": 111,
"start_col": 2,
"start_line": 110
} |
Prims.Tot | val decode_u8 (b: bytes{Seq.length b == 1}) : Tot U8.t | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0 | val decode_u8 (b: bytes{Seq.length b == 1}) : Tot U8.t
let decode_u8 (b: bytes{Seq.length b == 1}) : Tot U8.t = | false | null | false | Seq.index b 0 | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"total"
] | [
"LowParse.Bytes.bytes",
"Prims.eq2",
"Prims.int",
"FStar.Seq.Base.length",
"LowParse.Bytes.byte",
"FStar.Seq.Base.index",
"FStar.UInt8.t"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } ) | false | false | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val decode_u8 (b: bytes{Seq.length b == 1}) : Tot U8.t | [] | LowParse.Spec.Int.decode_u8 | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | b: LowParse.Bytes.bytes{FStar.Seq.Base.length b == 1} -> FStar.UInt8.t | {
"end_col": 15,
"end_line": 7,
"start_col": 2,
"start_line": 7
} |
Prims.GTot | val decode_u32 (b: bytes{Seq.length b == 4}) : GTot U32.t | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let decode_u32
(b: bytes { Seq.length b == 4 } )
: GTot U32.t
= E.lemma_be_to_n_is_bounded b;
U32.uint_to_t (E.be_to_n b) | val decode_u32 (b: bytes{Seq.length b == 4}) : GTot U32.t
let decode_u32 (b: bytes{Seq.length b == 4}) : GTot U32.t = | false | null | false | E.lemma_be_to_n_is_bounded b;
U32.uint_to_t (E.be_to_n b) | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"sometrivial"
] | [
"LowParse.Bytes.bytes",
"Prims.eq2",
"Prims.int",
"FStar.Seq.Base.length",
"LowParse.Bytes.byte",
"FStar.UInt32.uint_to_t",
"FStar.Endianness.be_to_n",
"Prims.unit",
"FStar.Endianness.lemma_be_to_n_is_bounded",
"FStar.UInt32.t"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective'
let parse_u16 =
decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16
let parse_u16_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 2)
let serialize_u16 =
(fun (x: U16.t) -> E.n_to_be 2 (U16.v x))
let decode_u32
(b: bytes { Seq.length b == 4 } ) | false | false | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val decode_u32 (b: bytes{Seq.length b == 4}) : GTot U32.t | [] | LowParse.Spec.Int.decode_u32 | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | b: LowParse.Bytes.bytes{FStar.Seq.Base.length b == 4} -> Prims.GTot FStar.UInt32.t | {
"end_col": 29,
"end_line": 74,
"start_col": 2,
"start_line": 73
} |
FStar.Pervasives.Lemma | val decode_u16_injective: Prims.unit
-> Lemma (make_total_constant_size_parser_precond 2 U16.t decode_u16) | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective' | val decode_u16_injective: Prims.unit
-> Lemma (make_total_constant_size_parser_precond 2 U16.t decode_u16)
let decode_u16_injective () : Lemma (make_total_constant_size_parser_precond 2 U16.t decode_u16) = | false | null | true | Classical.forall_intro_2 decode_u16_injective' | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"lemma"
] | [
"Prims.unit",
"FStar.Classical.forall_intro_2",
"LowParse.Bytes.bytes",
"Prims.eq2",
"Prims.int",
"FStar.Seq.Base.length",
"LowParse.Bytes.byte",
"Prims.l_imp",
"FStar.UInt16.t",
"LowParse.Spec.Int.decode_u16",
"FStar.Seq.Base.equal",
"LowParse.Spec.Int.decode_u16_injective'",
"Prims.l_True",
"Prims.squash",
"LowParse.Spec.Combinators.make_total_constant_size_parser_precond",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma | false | false | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val decode_u16_injective: Prims.unit
-> Lemma (make_total_constant_size_parser_precond 2 U16.t decode_u16) | [] | LowParse.Spec.Int.decode_u16_injective | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | _: Prims.unit
-> FStar.Pervasives.Lemma
(ensures
LowParse.Spec.Combinators.make_total_constant_size_parser_precond 2
FStar.UInt16.t
LowParse.Spec.Int.decode_u16) | {
"end_col": 48,
"end_line": 56,
"start_col": 2,
"start_line": 56
} |
FStar.Pervasives.Lemma | val decode_u64_injective: Prims.unit
-> Lemma (make_total_constant_size_parser_precond 8 U64.t decode_u64) | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let decode_u64_injective () : Lemma
(make_total_constant_size_parser_precond 8 U64.t decode_u64)
= Classical.forall_intro_2 decode_u64_injective' | val decode_u64_injective: Prims.unit
-> Lemma (make_total_constant_size_parser_precond 8 U64.t decode_u64)
let decode_u64_injective () : Lemma (make_total_constant_size_parser_precond 8 U64.t decode_u64) = | false | null | true | Classical.forall_intro_2 decode_u64_injective' | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"lemma"
] | [
"Prims.unit",
"FStar.Classical.forall_intro_2",
"LowParse.Bytes.bytes",
"Prims.eq2",
"Prims.int",
"FStar.Seq.Base.length",
"LowParse.Bytes.byte",
"Prims.l_imp",
"FStar.UInt64.t",
"LowParse.Spec.Int.decode_u64",
"FStar.Seq.Base.equal",
"LowParse.Spec.Int.decode_u64_injective'",
"Prims.l_True",
"Prims.squash",
"LowParse.Spec.Combinators.make_total_constant_size_parser_precond",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective'
let parse_u16 =
decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16
let parse_u16_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 2)
let serialize_u16 =
(fun (x: U16.t) -> E.n_to_be 2 (U16.v x))
let decode_u32
(b: bytes { Seq.length b == 4 } )
: GTot U32.t
= E.lemma_be_to_n_is_bounded b;
U32.uint_to_t (E.be_to_n b)
let decode_u32_injective'
(b1: bytes { Seq.length b1 == 4 } )
(b2: bytes { Seq.length b2 == 4 } )
: Lemma
(decode_u32 b1 == decode_u32 b2 ==> Seq.equal b1 b2)
= if decode_u32 b1 = decode_u32 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u32_injective () : Lemma
(make_total_constant_size_parser_precond 4 U32.t decode_u32)
= Classical.forall_intro_2 decode_u32_injective'
let parse_u32 =
decode_u32_injective ();
make_total_constant_size_parser 4 U32.t decode_u32
let parse_u32_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 4)
let serialize_u32 =
(fun (x: U32.t) -> E.n_to_be 4 (U32.v x))
let decode_u64
(b: bytes { Seq.length b == 8 } )
: GTot U64.t
= E.lemma_be_to_n_is_bounded b;
U64.uint_to_t (E.be_to_n b)
let decode_u64_injective'
(b1: bytes { Seq.length b1 == 8 } )
(b2: bytes { Seq.length b2 == 8 } )
: Lemma
(decode_u64 b1 == decode_u64 b2 ==> Seq.equal b1 b2)
= if decode_u64 b1 = decode_u64 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U64.v (U64.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U64.v (U64.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u64_injective () : Lemma | false | false | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val decode_u64_injective: Prims.unit
-> Lemma (make_total_constant_size_parser_precond 8 U64.t decode_u64) | [] | LowParse.Spec.Int.decode_u64_injective | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | _: Prims.unit
-> FStar.Pervasives.Lemma
(ensures
LowParse.Spec.Combinators.make_total_constant_size_parser_precond 8
FStar.UInt64.t
LowParse.Spec.Int.decode_u64) | {
"end_col": 48,
"end_line": 130,
"start_col": 2,
"start_line": 130
} |
Prims.GTot | val decode_u64_le (b: bytes{Seq.length b == 8}) : GTot U64.t | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let decode_u64_le
(b: bytes { Seq.length b == 8 } )
: GTot U64.t
= E.lemma_le_to_n_is_bounded b;
U64.uint_to_t (E.le_to_n b) | val decode_u64_le (b: bytes{Seq.length b == 8}) : GTot U64.t
let decode_u64_le (b: bytes{Seq.length b == 8}) : GTot U64.t = | false | null | false | E.lemma_le_to_n_is_bounded b;
U64.uint_to_t (E.le_to_n b) | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"sometrivial"
] | [
"LowParse.Bytes.bytes",
"Prims.eq2",
"Prims.int",
"FStar.Seq.Base.length",
"LowParse.Bytes.byte",
"FStar.UInt64.uint_to_t",
"FStar.Endianness.le_to_n",
"Prims.unit",
"FStar.Endianness.lemma_le_to_n_is_bounded",
"FStar.UInt64.t"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective'
let parse_u16 =
decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16
let parse_u16_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 2)
let serialize_u16 =
(fun (x: U16.t) -> E.n_to_be 2 (U16.v x))
let decode_u32
(b: bytes { Seq.length b == 4 } )
: GTot U32.t
= E.lemma_be_to_n_is_bounded b;
U32.uint_to_t (E.be_to_n b)
let decode_u32_injective'
(b1: bytes { Seq.length b1 == 4 } )
(b2: bytes { Seq.length b2 == 4 } )
: Lemma
(decode_u32 b1 == decode_u32 b2 ==> Seq.equal b1 b2)
= if decode_u32 b1 = decode_u32 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u32_injective () : Lemma
(make_total_constant_size_parser_precond 4 U32.t decode_u32)
= Classical.forall_intro_2 decode_u32_injective'
let parse_u32 =
decode_u32_injective ();
make_total_constant_size_parser 4 U32.t decode_u32
let parse_u32_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 4)
let serialize_u32 =
(fun (x: U32.t) -> E.n_to_be 4 (U32.v x))
let decode_u64
(b: bytes { Seq.length b == 8 } )
: GTot U64.t
= E.lemma_be_to_n_is_bounded b;
U64.uint_to_t (E.be_to_n b)
let decode_u64_injective'
(b1: bytes { Seq.length b1 == 8 } )
(b2: bytes { Seq.length b2 == 8 } )
: Lemma
(decode_u64 b1 == decode_u64 b2 ==> Seq.equal b1 b2)
= if decode_u64 b1 = decode_u64 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U64.v (U64.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U64.v (U64.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u64_injective () : Lemma
(make_total_constant_size_parser_precond 8 U64.t decode_u64)
= Classical.forall_intro_2 decode_u64_injective'
let parse_u64 =
decode_u64_injective ();
make_total_constant_size_parser 8 U64.t decode_u64
let parse_u64_spec
b
= E.lemma_be_to_n_is_bounded (Seq.slice b 0 8)
let serialize_u64 =
(fun (x: U64.t) -> E.n_to_be 8 (U64.v x))
let decode_u64_le
(b: bytes { Seq.length b == 8 } ) | false | false | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val decode_u64_le (b: bytes{Seq.length b == 8}) : GTot U64.t | [] | LowParse.Spec.Int.decode_u64_le | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | b: LowParse.Bytes.bytes{FStar.Seq.Base.length b == 8} -> Prims.GTot FStar.UInt64.t | {
"end_col": 29,
"end_line": 147,
"start_col": 2,
"start_line": 146
} |
FStar.Pervasives.Lemma | val decode_u32_injective: Prims.unit
-> Lemma (make_total_constant_size_parser_precond 4 U32.t decode_u32) | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let decode_u32_injective () : Lemma
(make_total_constant_size_parser_precond 4 U32.t decode_u32)
= Classical.forall_intro_2 decode_u32_injective' | val decode_u32_injective: Prims.unit
-> Lemma (make_total_constant_size_parser_precond 4 U32.t decode_u32)
let decode_u32_injective () : Lemma (make_total_constant_size_parser_precond 4 U32.t decode_u32) = | false | null | true | Classical.forall_intro_2 decode_u32_injective' | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"lemma"
] | [
"Prims.unit",
"FStar.Classical.forall_intro_2",
"LowParse.Bytes.bytes",
"Prims.eq2",
"Prims.int",
"FStar.Seq.Base.length",
"LowParse.Bytes.byte",
"Prims.l_imp",
"FStar.UInt32.t",
"LowParse.Spec.Int.decode_u32",
"FStar.Seq.Base.equal",
"LowParse.Spec.Int.decode_u32_injective'",
"Prims.l_True",
"Prims.squash",
"LowParse.Spec.Combinators.make_total_constant_size_parser_precond",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective'
let parse_u16 =
decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16
let parse_u16_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 2)
let serialize_u16 =
(fun (x: U16.t) -> E.n_to_be 2 (U16.v x))
let decode_u32
(b: bytes { Seq.length b == 4 } )
: GTot U32.t
= E.lemma_be_to_n_is_bounded b;
U32.uint_to_t (E.be_to_n b)
let decode_u32_injective'
(b1: bytes { Seq.length b1 == 4 } )
(b2: bytes { Seq.length b2 == 4 } )
: Lemma
(decode_u32 b1 == decode_u32 b2 ==> Seq.equal b1 b2)
= if decode_u32 b1 = decode_u32 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u32_injective () : Lemma | false | false | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val decode_u32_injective: Prims.unit
-> Lemma (make_total_constant_size_parser_precond 4 U32.t decode_u32) | [] | LowParse.Spec.Int.decode_u32_injective | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | _: Prims.unit
-> FStar.Pervasives.Lemma
(ensures
LowParse.Spec.Combinators.make_total_constant_size_parser_precond 4
FStar.UInt32.t
LowParse.Spec.Int.decode_u32) | {
"end_col": 48,
"end_line": 93,
"start_col": 2,
"start_line": 93
} |
FStar.Pervasives.Lemma | val decode_u64_le_injective: Prims.unit
-> Lemma (make_total_constant_size_parser_precond 8 U64.t decode_u64_le) | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let decode_u64_le_injective () : Lemma
(make_total_constant_size_parser_precond 8 U64.t decode_u64_le)
= Classical.forall_intro_2 decode_u64_le_injective' | val decode_u64_le_injective: Prims.unit
-> Lemma (make_total_constant_size_parser_precond 8 U64.t decode_u64_le)
let decode_u64_le_injective ()
: Lemma (make_total_constant_size_parser_precond 8 U64.t decode_u64_le) = | false | null | true | Classical.forall_intro_2 decode_u64_le_injective' | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"lemma"
] | [
"Prims.unit",
"FStar.Classical.forall_intro_2",
"LowParse.Bytes.bytes",
"Prims.eq2",
"Prims.int",
"FStar.Seq.Base.length",
"LowParse.Bytes.byte",
"Prims.l_imp",
"FStar.UInt64.t",
"LowParse.Spec.Int.decode_u64_le",
"FStar.Seq.Base.equal",
"LowParse.Spec.Int.decode_u64_le_injective'",
"Prims.l_True",
"Prims.squash",
"LowParse.Spec.Combinators.make_total_constant_size_parser_precond",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective'
let parse_u16 =
decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16
let parse_u16_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 2)
let serialize_u16 =
(fun (x: U16.t) -> E.n_to_be 2 (U16.v x))
let decode_u32
(b: bytes { Seq.length b == 4 } )
: GTot U32.t
= E.lemma_be_to_n_is_bounded b;
U32.uint_to_t (E.be_to_n b)
let decode_u32_injective'
(b1: bytes { Seq.length b1 == 4 } )
(b2: bytes { Seq.length b2 == 4 } )
: Lemma
(decode_u32 b1 == decode_u32 b2 ==> Seq.equal b1 b2)
= if decode_u32 b1 = decode_u32 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u32_injective () : Lemma
(make_total_constant_size_parser_precond 4 U32.t decode_u32)
= Classical.forall_intro_2 decode_u32_injective'
let parse_u32 =
decode_u32_injective ();
make_total_constant_size_parser 4 U32.t decode_u32
let parse_u32_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 4)
let serialize_u32 =
(fun (x: U32.t) -> E.n_to_be 4 (U32.v x))
let decode_u64
(b: bytes { Seq.length b == 8 } )
: GTot U64.t
= E.lemma_be_to_n_is_bounded b;
U64.uint_to_t (E.be_to_n b)
let decode_u64_injective'
(b1: bytes { Seq.length b1 == 8 } )
(b2: bytes { Seq.length b2 == 8 } )
: Lemma
(decode_u64 b1 == decode_u64 b2 ==> Seq.equal b1 b2)
= if decode_u64 b1 = decode_u64 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U64.v (U64.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U64.v (U64.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u64_injective () : Lemma
(make_total_constant_size_parser_precond 8 U64.t decode_u64)
= Classical.forall_intro_2 decode_u64_injective'
let parse_u64 =
decode_u64_injective ();
make_total_constant_size_parser 8 U64.t decode_u64
let parse_u64_spec
b
= E.lemma_be_to_n_is_bounded (Seq.slice b 0 8)
let serialize_u64 =
(fun (x: U64.t) -> E.n_to_be 8 (U64.v x))
let decode_u64_le
(b: bytes { Seq.length b == 8 } )
: GTot U64.t
= E.lemma_le_to_n_is_bounded b;
U64.uint_to_t (E.le_to_n b)
let decode_u64_le_injective'
(b1: bytes { Seq.length b1 == 8 } )
(b2: bytes { Seq.length b2 == 8 } )
: Lemma
(decode_u64_le b1 == decode_u64_le b2 ==> Seq.equal b1 b2)
= if decode_u64_le b1 = decode_u64_le b2
then begin
E.lemma_le_to_n_is_bounded b1;
E.lemma_le_to_n_is_bounded b2;
assert (U64.v (U64.uint_to_t (E.le_to_n b1)) == E.le_to_n b1);
assert (U64.v (U64.uint_to_t (E.le_to_n b2)) == E.le_to_n b2);
assert (E.le_to_n b1 == E.le_to_n b2);
E.le_to_n_inj b1 b2
end else ()
let decode_u64_le_injective () : Lemma | false | false | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val decode_u64_le_injective: Prims.unit
-> Lemma (make_total_constant_size_parser_precond 8 U64.t decode_u64_le) | [] | LowParse.Spec.Int.decode_u64_le_injective | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | _: Prims.unit
-> FStar.Pervasives.Lemma
(ensures
LowParse.Spec.Combinators.make_total_constant_size_parser_precond 8
FStar.UInt64.t
LowParse.Spec.Int.decode_u64_le) | {
"end_col": 51,
"end_line": 166,
"start_col": 2,
"start_line": 166
} |
FStar.Pervasives.Lemma | val decode_u32_injective' (b1: bytes{Seq.length b1 == 4}) (b2: bytes{Seq.length b2 == 4})
: Lemma (decode_u32 b1 == decode_u32 b2 ==> Seq.equal b1 b2) | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let decode_u32_injective'
(b1: bytes { Seq.length b1 == 4 } )
(b2: bytes { Seq.length b2 == 4 } )
: Lemma
(decode_u32 b1 == decode_u32 b2 ==> Seq.equal b1 b2)
= if decode_u32 b1 = decode_u32 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else () | val decode_u32_injective' (b1: bytes{Seq.length b1 == 4}) (b2: bytes{Seq.length b2 == 4})
: Lemma (decode_u32 b1 == decode_u32 b2 ==> Seq.equal b1 b2)
let decode_u32_injective' (b1: bytes{Seq.length b1 == 4}) (b2: bytes{Seq.length b2 == 4})
: Lemma (decode_u32 b1 == decode_u32 b2 ==> Seq.equal b1 b2) = | false | null | true | if decode_u32 b1 = decode_u32 b2
then
(E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2) | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"lemma"
] | [
"LowParse.Bytes.bytes",
"Prims.eq2",
"Prims.int",
"FStar.Seq.Base.length",
"LowParse.Bytes.byte",
"Prims.op_Equality",
"FStar.UInt32.t",
"LowParse.Spec.Int.decode_u32",
"FStar.Endianness.be_to_n_inj",
"Prims.unit",
"Prims._assert",
"Prims.nat",
"FStar.Endianness.be_to_n",
"Prims.l_or",
"Prims.b2t",
"Prims.op_GreaterThanOrEqual",
"FStar.UInt.size",
"FStar.UInt32.n",
"FStar.UInt32.v",
"FStar.UInt32.uint_to_t",
"FStar.Endianness.lemma_be_to_n_is_bounded",
"Prims.bool",
"Prims.l_True",
"Prims.squash",
"Prims.l_imp",
"FStar.Seq.Base.equal",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective'
let parse_u16 =
decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16
let parse_u16_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 2)
let serialize_u16 =
(fun (x: U16.t) -> E.n_to_be 2 (U16.v x))
let decode_u32
(b: bytes { Seq.length b == 4 } )
: GTot U32.t
= E.lemma_be_to_n_is_bounded b;
U32.uint_to_t (E.be_to_n b)
let decode_u32_injective'
(b1: bytes { Seq.length b1 == 4 } )
(b2: bytes { Seq.length b2 == 4 } )
: Lemma | false | false | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val decode_u32_injective' (b1: bytes{Seq.length b1 == 4}) (b2: bytes{Seq.length b2 == 4})
: Lemma (decode_u32 b1 == decode_u32 b2 ==> Seq.equal b1 b2) | [] | LowParse.Spec.Int.decode_u32_injective' | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} |
b1: LowParse.Bytes.bytes{FStar.Seq.Base.length b1 == 4} ->
b2: LowParse.Bytes.bytes{FStar.Seq.Base.length b2 == 4}
-> FStar.Pervasives.Lemma
(ensures
LowParse.Spec.Int.decode_u32 b1 == LowParse.Spec.Int.decode_u32 b2 ==>
FStar.Seq.Base.equal b1 b2) | {
"end_col": 13,
"end_line": 89,
"start_col": 2,
"start_line": 81
} |
FStar.Pervasives.Lemma | val decode_u64_injective' (b1: bytes{Seq.length b1 == 8}) (b2: bytes{Seq.length b2 == 8})
: Lemma (decode_u64 b1 == decode_u64 b2 ==> Seq.equal b1 b2) | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let decode_u64_injective'
(b1: bytes { Seq.length b1 == 8 } )
(b2: bytes { Seq.length b2 == 8 } )
: Lemma
(decode_u64 b1 == decode_u64 b2 ==> Seq.equal b1 b2)
= if decode_u64 b1 = decode_u64 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U64.v (U64.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U64.v (U64.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else () | val decode_u64_injective' (b1: bytes{Seq.length b1 == 8}) (b2: bytes{Seq.length b2 == 8})
: Lemma (decode_u64 b1 == decode_u64 b2 ==> Seq.equal b1 b2)
let decode_u64_injective' (b1: bytes{Seq.length b1 == 8}) (b2: bytes{Seq.length b2 == 8})
: Lemma (decode_u64 b1 == decode_u64 b2 ==> Seq.equal b1 b2) = | false | null | true | if decode_u64 b1 = decode_u64 b2
then
(E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U64.v (U64.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U64.v (U64.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2) | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"lemma"
] | [
"LowParse.Bytes.bytes",
"Prims.eq2",
"Prims.int",
"FStar.Seq.Base.length",
"LowParse.Bytes.byte",
"Prims.op_Equality",
"FStar.UInt64.t",
"LowParse.Spec.Int.decode_u64",
"FStar.Endianness.be_to_n_inj",
"Prims.unit",
"Prims._assert",
"Prims.nat",
"FStar.Endianness.be_to_n",
"Prims.l_or",
"Prims.b2t",
"Prims.op_GreaterThanOrEqual",
"FStar.UInt.size",
"FStar.UInt64.n",
"FStar.UInt64.v",
"FStar.UInt64.uint_to_t",
"FStar.Endianness.lemma_be_to_n_is_bounded",
"Prims.bool",
"Prims.l_True",
"Prims.squash",
"Prims.l_imp",
"FStar.Seq.Base.equal",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective'
let parse_u16 =
decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16
let parse_u16_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 2)
let serialize_u16 =
(fun (x: U16.t) -> E.n_to_be 2 (U16.v x))
let decode_u32
(b: bytes { Seq.length b == 4 } )
: GTot U32.t
= E.lemma_be_to_n_is_bounded b;
U32.uint_to_t (E.be_to_n b)
let decode_u32_injective'
(b1: bytes { Seq.length b1 == 4 } )
(b2: bytes { Seq.length b2 == 4 } )
: Lemma
(decode_u32 b1 == decode_u32 b2 ==> Seq.equal b1 b2)
= if decode_u32 b1 = decode_u32 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u32_injective () : Lemma
(make_total_constant_size_parser_precond 4 U32.t decode_u32)
= Classical.forall_intro_2 decode_u32_injective'
let parse_u32 =
decode_u32_injective ();
make_total_constant_size_parser 4 U32.t decode_u32
let parse_u32_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 4)
let serialize_u32 =
(fun (x: U32.t) -> E.n_to_be 4 (U32.v x))
let decode_u64
(b: bytes { Seq.length b == 8 } )
: GTot U64.t
= E.lemma_be_to_n_is_bounded b;
U64.uint_to_t (E.be_to_n b)
let decode_u64_injective'
(b1: bytes { Seq.length b1 == 8 } )
(b2: bytes { Seq.length b2 == 8 } )
: Lemma | false | false | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val decode_u64_injective' (b1: bytes{Seq.length b1 == 8}) (b2: bytes{Seq.length b2 == 8})
: Lemma (decode_u64 b1 == decode_u64 b2 ==> Seq.equal b1 b2) | [] | LowParse.Spec.Int.decode_u64_injective' | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} |
b1: LowParse.Bytes.bytes{FStar.Seq.Base.length b1 == 8} ->
b2: LowParse.Bytes.bytes{FStar.Seq.Base.length b2 == 8}
-> FStar.Pervasives.Lemma
(ensures
LowParse.Spec.Int.decode_u64 b1 == LowParse.Spec.Int.decode_u64 b2 ==>
FStar.Seq.Base.equal b1 b2) | {
"end_col": 13,
"end_line": 126,
"start_col": 2,
"start_line": 118
} |
FStar.Pervasives.Lemma | val decode_u64_le_injective' (b1: bytes{Seq.length b1 == 8}) (b2: bytes{Seq.length b2 == 8})
: Lemma (decode_u64_le b1 == decode_u64_le b2 ==> Seq.equal b1 b2) | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let decode_u64_le_injective'
(b1: bytes { Seq.length b1 == 8 } )
(b2: bytes { Seq.length b2 == 8 } )
: Lemma
(decode_u64_le b1 == decode_u64_le b2 ==> Seq.equal b1 b2)
= if decode_u64_le b1 = decode_u64_le b2
then begin
E.lemma_le_to_n_is_bounded b1;
E.lemma_le_to_n_is_bounded b2;
assert (U64.v (U64.uint_to_t (E.le_to_n b1)) == E.le_to_n b1);
assert (U64.v (U64.uint_to_t (E.le_to_n b2)) == E.le_to_n b2);
assert (E.le_to_n b1 == E.le_to_n b2);
E.le_to_n_inj b1 b2
end else () | val decode_u64_le_injective' (b1: bytes{Seq.length b1 == 8}) (b2: bytes{Seq.length b2 == 8})
: Lemma (decode_u64_le b1 == decode_u64_le b2 ==> Seq.equal b1 b2)
let decode_u64_le_injective' (b1: bytes{Seq.length b1 == 8}) (b2: bytes{Seq.length b2 == 8})
: Lemma (decode_u64_le b1 == decode_u64_le b2 ==> Seq.equal b1 b2) = | false | null | true | if decode_u64_le b1 = decode_u64_le b2
then
(E.lemma_le_to_n_is_bounded b1;
E.lemma_le_to_n_is_bounded b2;
assert (U64.v (U64.uint_to_t (E.le_to_n b1)) == E.le_to_n b1);
assert (U64.v (U64.uint_to_t (E.le_to_n b2)) == E.le_to_n b2);
assert (E.le_to_n b1 == E.le_to_n b2);
E.le_to_n_inj b1 b2) | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"lemma"
] | [
"LowParse.Bytes.bytes",
"Prims.eq2",
"Prims.int",
"FStar.Seq.Base.length",
"LowParse.Bytes.byte",
"Prims.op_Equality",
"FStar.UInt64.t",
"LowParse.Spec.Int.decode_u64_le",
"FStar.Endianness.le_to_n_inj",
"Prims.unit",
"Prims._assert",
"Prims.nat",
"FStar.Endianness.le_to_n",
"Prims.l_or",
"Prims.b2t",
"Prims.op_GreaterThanOrEqual",
"FStar.UInt.size",
"FStar.UInt64.n",
"FStar.UInt64.v",
"FStar.UInt64.uint_to_t",
"FStar.Endianness.lemma_le_to_n_is_bounded",
"Prims.bool",
"Prims.l_True",
"Prims.squash",
"Prims.l_imp",
"FStar.Seq.Base.equal",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective'
let parse_u16 =
decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16
let parse_u16_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 2)
let serialize_u16 =
(fun (x: U16.t) -> E.n_to_be 2 (U16.v x))
let decode_u32
(b: bytes { Seq.length b == 4 } )
: GTot U32.t
= E.lemma_be_to_n_is_bounded b;
U32.uint_to_t (E.be_to_n b)
let decode_u32_injective'
(b1: bytes { Seq.length b1 == 4 } )
(b2: bytes { Seq.length b2 == 4 } )
: Lemma
(decode_u32 b1 == decode_u32 b2 ==> Seq.equal b1 b2)
= if decode_u32 b1 = decode_u32 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u32_injective () : Lemma
(make_total_constant_size_parser_precond 4 U32.t decode_u32)
= Classical.forall_intro_2 decode_u32_injective'
let parse_u32 =
decode_u32_injective ();
make_total_constant_size_parser 4 U32.t decode_u32
let parse_u32_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 4)
let serialize_u32 =
(fun (x: U32.t) -> E.n_to_be 4 (U32.v x))
let decode_u64
(b: bytes { Seq.length b == 8 } )
: GTot U64.t
= E.lemma_be_to_n_is_bounded b;
U64.uint_to_t (E.be_to_n b)
let decode_u64_injective'
(b1: bytes { Seq.length b1 == 8 } )
(b2: bytes { Seq.length b2 == 8 } )
: Lemma
(decode_u64 b1 == decode_u64 b2 ==> Seq.equal b1 b2)
= if decode_u64 b1 = decode_u64 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U64.v (U64.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U64.v (U64.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u64_injective () : Lemma
(make_total_constant_size_parser_precond 8 U64.t decode_u64)
= Classical.forall_intro_2 decode_u64_injective'
let parse_u64 =
decode_u64_injective ();
make_total_constant_size_parser 8 U64.t decode_u64
let parse_u64_spec
b
= E.lemma_be_to_n_is_bounded (Seq.slice b 0 8)
let serialize_u64 =
(fun (x: U64.t) -> E.n_to_be 8 (U64.v x))
let decode_u64_le
(b: bytes { Seq.length b == 8 } )
: GTot U64.t
= E.lemma_le_to_n_is_bounded b;
U64.uint_to_t (E.le_to_n b)
let decode_u64_le_injective'
(b1: bytes { Seq.length b1 == 8 } )
(b2: bytes { Seq.length b2 == 8 } )
: Lemma | false | false | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val decode_u64_le_injective' (b1: bytes{Seq.length b1 == 8}) (b2: bytes{Seq.length b2 == 8})
: Lemma (decode_u64_le b1 == decode_u64_le b2 ==> Seq.equal b1 b2) | [] | LowParse.Spec.Int.decode_u64_le_injective' | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} |
b1: LowParse.Bytes.bytes{FStar.Seq.Base.length b1 == 8} ->
b2: LowParse.Bytes.bytes{FStar.Seq.Base.length b2 == 8}
-> FStar.Pervasives.Lemma
(ensures
LowParse.Spec.Int.decode_u64_le b1 == LowParse.Spec.Int.decode_u64_le b2 ==>
FStar.Seq.Base.equal b1 b2) | {
"end_col": 13,
"end_line": 162,
"start_col": 2,
"start_line": 154
} |
FStar.Pervasives.Lemma | val decode_u16_injective' (b1: bytes{Seq.length b1 == 2}) (b2: bytes{Seq.length b2 == 2})
: Lemma (decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2) | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else () | val decode_u16_injective' (b1: bytes{Seq.length b1 == 2}) (b2: bytes{Seq.length b2 == 2})
: Lemma (decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
let decode_u16_injective' (b1: bytes{Seq.length b1 == 2}) (b2: bytes{Seq.length b2 == 2})
: Lemma (decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2) = | false | null | true | if decode_u16 b1 = decode_u16 b2
then
(E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2) | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"lemma"
] | [
"LowParse.Bytes.bytes",
"Prims.eq2",
"Prims.int",
"FStar.Seq.Base.length",
"LowParse.Bytes.byte",
"Prims.op_Equality",
"FStar.UInt16.t",
"LowParse.Spec.Int.decode_u16",
"FStar.Endianness.be_to_n_inj",
"Prims.unit",
"Prims._assert",
"Prims.nat",
"FStar.Endianness.be_to_n",
"Prims.l_or",
"Prims.b2t",
"Prims.op_GreaterThanOrEqual",
"FStar.UInt.size",
"FStar.UInt32.n",
"FStar.UInt32.v",
"FStar.UInt32.uint_to_t",
"FStar.Endianness.lemma_be_to_n_is_bounded",
"Prims.bool",
"Prims.l_True",
"Prims.squash",
"Prims.l_imp",
"FStar.Seq.Base.equal",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma | false | false | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val decode_u16_injective' (b1: bytes{Seq.length b1 == 2}) (b2: bytes{Seq.length b2 == 2})
: Lemma (decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2) | [] | LowParse.Spec.Int.decode_u16_injective' | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} |
b1: LowParse.Bytes.bytes{FStar.Seq.Base.length b1 == 2} ->
b2: LowParse.Bytes.bytes{FStar.Seq.Base.length b2 == 2}
-> FStar.Pervasives.Lemma
(ensures
LowParse.Spec.Int.decode_u16 b1 == LowParse.Spec.Int.decode_u16 b2 ==>
FStar.Seq.Base.equal b1 b2) | {
"end_col": 13,
"end_line": 50,
"start_col": 2,
"start_line": 42
} |
Prims.Tot | val serialize_u8 : serializer parse_u8 | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let serialize_u8 =
Seq.create 1 | val serialize_u8 : serializer parse_u8
let serialize_u8 = | false | null | false | Seq.create 1 | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"total"
] | [
"FStar.Seq.Base.create",
"LowParse.Bytes.byte"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = () | false | true | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val serialize_u8 : serializer parse_u8 | [] | LowParse.Spec.Int.serialize_u8 | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | LowParse.Spec.Base.serializer LowParse.Spec.Int.parse_u8 | {
"end_col": 14,
"end_line": 27,
"start_col": 2,
"start_line": 27
} |
Prims.Tot | val serialize_u32 : serializer parse_u32 | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let serialize_u32 =
(fun (x: U32.t) -> E.n_to_be 4 (U32.v x)) | val serialize_u32 : serializer parse_u32
let serialize_u32 = | false | null | false | (fun (x: U32.t) -> E.n_to_be 4 (U32.v x)) | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"total"
] | [
"FStar.UInt32.t",
"FStar.Endianness.n_to_be",
"FStar.UInt32.v",
"FStar.Endianness.bytes",
"Prims.l_and",
"Prims.eq2",
"Prims.nat",
"FStar.Seq.Base.length",
"FStar.UInt8.t",
"FStar.Endianness.be_to_n"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective'
let parse_u16 =
decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16
let parse_u16_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 2)
let serialize_u16 =
(fun (x: U16.t) -> E.n_to_be 2 (U16.v x))
let decode_u32
(b: bytes { Seq.length b == 4 } )
: GTot U32.t
= E.lemma_be_to_n_is_bounded b;
U32.uint_to_t (E.be_to_n b)
let decode_u32_injective'
(b1: bytes { Seq.length b1 == 4 } )
(b2: bytes { Seq.length b2 == 4 } )
: Lemma
(decode_u32 b1 == decode_u32 b2 ==> Seq.equal b1 b2)
= if decode_u32 b1 = decode_u32 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u32_injective () : Lemma
(make_total_constant_size_parser_precond 4 U32.t decode_u32)
= Classical.forall_intro_2 decode_u32_injective'
let parse_u32 =
decode_u32_injective ();
make_total_constant_size_parser 4 U32.t decode_u32
let parse_u32_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 4) | false | true | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val serialize_u32 : serializer parse_u32 | [] | LowParse.Spec.Int.serialize_u32 | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | LowParse.Spec.Base.serializer LowParse.Spec.Int.parse_u32 | {
"end_col": 43,
"end_line": 105,
"start_col": 2,
"start_line": 105
} |
Prims.Tot | val serialize_u64 : serializer parse_u64 | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let serialize_u64 =
(fun (x: U64.t) -> E.n_to_be 8 (U64.v x)) | val serialize_u64 : serializer parse_u64
let serialize_u64 = | false | null | false | (fun (x: U64.t) -> E.n_to_be 8 (U64.v x)) | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"total"
] | [
"FStar.UInt64.t",
"FStar.Endianness.n_to_be",
"FStar.UInt64.v",
"FStar.Endianness.bytes",
"Prims.l_and",
"Prims.eq2",
"Prims.nat",
"FStar.Seq.Base.length",
"FStar.UInt8.t",
"FStar.Endianness.be_to_n"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective'
let parse_u16 =
decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16
let parse_u16_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 2)
let serialize_u16 =
(fun (x: U16.t) -> E.n_to_be 2 (U16.v x))
let decode_u32
(b: bytes { Seq.length b == 4 } )
: GTot U32.t
= E.lemma_be_to_n_is_bounded b;
U32.uint_to_t (E.be_to_n b)
let decode_u32_injective'
(b1: bytes { Seq.length b1 == 4 } )
(b2: bytes { Seq.length b2 == 4 } )
: Lemma
(decode_u32 b1 == decode_u32 b2 ==> Seq.equal b1 b2)
= if decode_u32 b1 = decode_u32 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u32_injective () : Lemma
(make_total_constant_size_parser_precond 4 U32.t decode_u32)
= Classical.forall_intro_2 decode_u32_injective'
let parse_u32 =
decode_u32_injective ();
make_total_constant_size_parser 4 U32.t decode_u32
let parse_u32_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 4)
let serialize_u32 =
(fun (x: U32.t) -> E.n_to_be 4 (U32.v x))
let decode_u64
(b: bytes { Seq.length b == 8 } )
: GTot U64.t
= E.lemma_be_to_n_is_bounded b;
U64.uint_to_t (E.be_to_n b)
let decode_u64_injective'
(b1: bytes { Seq.length b1 == 8 } )
(b2: bytes { Seq.length b2 == 8 } )
: Lemma
(decode_u64 b1 == decode_u64 b2 ==> Seq.equal b1 b2)
= if decode_u64 b1 = decode_u64 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U64.v (U64.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U64.v (U64.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u64_injective () : Lemma
(make_total_constant_size_parser_precond 8 U64.t decode_u64)
= Classical.forall_intro_2 decode_u64_injective'
let parse_u64 =
decode_u64_injective ();
make_total_constant_size_parser 8 U64.t decode_u64
let parse_u64_spec
b
= E.lemma_be_to_n_is_bounded (Seq.slice b 0 8) | false | true | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val serialize_u64 : serializer parse_u64 | [] | LowParse.Spec.Int.serialize_u64 | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | LowParse.Spec.Base.serializer LowParse.Spec.Int.parse_u64 | {
"end_col": 43,
"end_line": 141,
"start_col": 2,
"start_line": 141
} |
Prims.Tot | val serialize_u16 : serializer parse_u16 | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let serialize_u16 =
(fun (x: U16.t) -> E.n_to_be 2 (U16.v x)) | val serialize_u16 : serializer parse_u16
let serialize_u16 = | false | null | false | (fun (x: U16.t) -> E.n_to_be 2 (U16.v x)) | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"total"
] | [
"FStar.UInt16.t",
"FStar.Endianness.n_to_be",
"FStar.UInt16.v",
"FStar.Endianness.bytes",
"Prims.l_and",
"Prims.eq2",
"Prims.nat",
"FStar.Seq.Base.length",
"FStar.UInt8.t",
"FStar.Endianness.be_to_n"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective'
let parse_u16 =
decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16
let parse_u16_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 2) | false | true | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val serialize_u16 : serializer parse_u16 | [] | LowParse.Spec.Int.serialize_u16 | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | LowParse.Spec.Base.serializer LowParse.Spec.Int.parse_u16 | {
"end_col": 43,
"end_line": 68,
"start_col": 2,
"start_line": 68
} |
Prims.Tot | val serialize_u64_le : serializer parse_u64_le | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let serialize_u64_le =
(fun (x: U64.t) -> E.n_to_le 8 (U64.v x)) | val serialize_u64_le : serializer parse_u64_le
let serialize_u64_le = | false | null | false | (fun (x: U64.t) -> E.n_to_le 8 (U64.v x)) | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"total"
] | [
"FStar.UInt64.t",
"FStar.Endianness.n_to_le",
"FStar.UInt64.v",
"FStar.Endianness.bytes",
"Prims.l_and",
"Prims.eq2",
"Prims.nat",
"FStar.Seq.Base.length",
"FStar.UInt8.t",
"FStar.Endianness.le_to_n"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective'
let parse_u16 =
decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16
let parse_u16_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 2)
let serialize_u16 =
(fun (x: U16.t) -> E.n_to_be 2 (U16.v x))
let decode_u32
(b: bytes { Seq.length b == 4 } )
: GTot U32.t
= E.lemma_be_to_n_is_bounded b;
U32.uint_to_t (E.be_to_n b)
let decode_u32_injective'
(b1: bytes { Seq.length b1 == 4 } )
(b2: bytes { Seq.length b2 == 4 } )
: Lemma
(decode_u32 b1 == decode_u32 b2 ==> Seq.equal b1 b2)
= if decode_u32 b1 = decode_u32 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u32_injective () : Lemma
(make_total_constant_size_parser_precond 4 U32.t decode_u32)
= Classical.forall_intro_2 decode_u32_injective'
let parse_u32 =
decode_u32_injective ();
make_total_constant_size_parser 4 U32.t decode_u32
let parse_u32_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 4)
let serialize_u32 =
(fun (x: U32.t) -> E.n_to_be 4 (U32.v x))
let decode_u64
(b: bytes { Seq.length b == 8 } )
: GTot U64.t
= E.lemma_be_to_n_is_bounded b;
U64.uint_to_t (E.be_to_n b)
let decode_u64_injective'
(b1: bytes { Seq.length b1 == 8 } )
(b2: bytes { Seq.length b2 == 8 } )
: Lemma
(decode_u64 b1 == decode_u64 b2 ==> Seq.equal b1 b2)
= if decode_u64 b1 = decode_u64 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U64.v (U64.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U64.v (U64.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u64_injective () : Lemma
(make_total_constant_size_parser_precond 8 U64.t decode_u64)
= Classical.forall_intro_2 decode_u64_injective'
let parse_u64 =
decode_u64_injective ();
make_total_constant_size_parser 8 U64.t decode_u64
let parse_u64_spec
b
= E.lemma_be_to_n_is_bounded (Seq.slice b 0 8)
let serialize_u64 =
(fun (x: U64.t) -> E.n_to_be 8 (U64.v x))
let decode_u64_le
(b: bytes { Seq.length b == 8 } )
: GTot U64.t
= E.lemma_le_to_n_is_bounded b;
U64.uint_to_t (E.le_to_n b)
let decode_u64_le_injective'
(b1: bytes { Seq.length b1 == 8 } )
(b2: bytes { Seq.length b2 == 8 } )
: Lemma
(decode_u64_le b1 == decode_u64_le b2 ==> Seq.equal b1 b2)
= if decode_u64_le b1 = decode_u64_le b2
then begin
E.lemma_le_to_n_is_bounded b1;
E.lemma_le_to_n_is_bounded b2;
assert (U64.v (U64.uint_to_t (E.le_to_n b1)) == E.le_to_n b1);
assert (U64.v (U64.uint_to_t (E.le_to_n b2)) == E.le_to_n b2);
assert (E.le_to_n b1 == E.le_to_n b2);
E.le_to_n_inj b1 b2
end else ()
let decode_u64_le_injective () : Lemma
(make_total_constant_size_parser_precond 8 U64.t decode_u64_le)
= Classical.forall_intro_2 decode_u64_le_injective'
let parse_u64_le =
decode_u64_le_injective ();
make_total_constant_size_parser 8 U64.t decode_u64_le
let parse_u64_le_spec
b
= E.lemma_le_to_n_is_bounded (Seq.slice b 0 8) | false | true | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val serialize_u64_le : serializer parse_u64_le | [] | LowParse.Spec.Int.serialize_u64_le | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | LowParse.Spec.Base.serializer LowParse.Spec.Int.parse_u64_le | {
"end_col": 43,
"end_line": 177,
"start_col": 2,
"start_line": 177
} |
FStar.Pervasives.Lemma | val parse_u32_spec
(b: bytes)
: Lemma
(requires (Seq.length b >= 4))
(ensures (
let pp = parse parse_u32 b in
Some? pp /\ (
let (Some (v, consumed)) = pp in
U32.v v == E.be_to_n (Seq.slice b 0 4)
))) | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let parse_u32_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 4) | val parse_u32_spec
(b: bytes)
: Lemma
(requires (Seq.length b >= 4))
(ensures (
let pp = parse parse_u32 b in
Some? pp /\ (
let (Some (v, consumed)) = pp in
U32.v v == E.be_to_n (Seq.slice b 0 4)
)))
let parse_u32_spec b = | false | null | true | E.lemma_be_to_n_is_bounded (Seq.slice b 0 4) | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"lemma"
] | [
"LowParse.Bytes.bytes",
"FStar.Endianness.lemma_be_to_n_is_bounded",
"FStar.Seq.Base.slice",
"LowParse.Bytes.byte",
"Prims.unit"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective'
let parse_u16 =
decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16
let parse_u16_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 2)
let serialize_u16 =
(fun (x: U16.t) -> E.n_to_be 2 (U16.v x))
let decode_u32
(b: bytes { Seq.length b == 4 } )
: GTot U32.t
= E.lemma_be_to_n_is_bounded b;
U32.uint_to_t (E.be_to_n b)
let decode_u32_injective'
(b1: bytes { Seq.length b1 == 4 } )
(b2: bytes { Seq.length b2 == 4 } )
: Lemma
(decode_u32 b1 == decode_u32 b2 ==> Seq.equal b1 b2)
= if decode_u32 b1 = decode_u32 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u32_injective () : Lemma
(make_total_constant_size_parser_precond 4 U32.t decode_u32)
= Classical.forall_intro_2 decode_u32_injective'
let parse_u32 =
decode_u32_injective ();
make_total_constant_size_parser 4 U32.t decode_u32
let parse_u32_spec
b | false | false | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val parse_u32_spec
(b: bytes)
: Lemma
(requires (Seq.length b >= 4))
(ensures (
let pp = parse parse_u32 b in
Some? pp /\ (
let (Some (v, consumed)) = pp in
U32.v v == E.be_to_n (Seq.slice b 0 4)
))) | [] | LowParse.Spec.Int.parse_u32_spec | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | b: LowParse.Bytes.bytes
-> FStar.Pervasives.Lemma (requires FStar.Seq.Base.length b >= 4)
(ensures
(let pp = LowParse.Spec.Base.parse LowParse.Spec.Int.parse_u32 b in
Some? pp /\
(let _ = pp in
(let FStar.Pervasives.Native.Some #_ (FStar.Pervasives.Native.Mktuple2 #_ #_ v _) = _ in
FStar.UInt32.v v == FStar.Endianness.be_to_n (FStar.Seq.Base.slice b 0 4))
<:
Prims.logical))) | {
"end_col": 46,
"end_line": 102,
"start_col": 2,
"start_line": 102
} |
FStar.Pervasives.Lemma | val parse_u64_spec
(b: bytes)
: Lemma
(requires (Seq.length b >= 8))
(ensures (
let pp = parse parse_u64 b in
Some? pp /\ (
let (Some (v, consumed)) = pp in
U64.v v == E.be_to_n (Seq.slice b 0 8)
))) | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let parse_u64_spec
b
= E.lemma_be_to_n_is_bounded (Seq.slice b 0 8) | val parse_u64_spec
(b: bytes)
: Lemma
(requires (Seq.length b >= 8))
(ensures (
let pp = parse parse_u64 b in
Some? pp /\ (
let (Some (v, consumed)) = pp in
U64.v v == E.be_to_n (Seq.slice b 0 8)
)))
let parse_u64_spec b = | false | null | true | E.lemma_be_to_n_is_bounded (Seq.slice b 0 8) | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"lemma"
] | [
"LowParse.Bytes.bytes",
"FStar.Endianness.lemma_be_to_n_is_bounded",
"FStar.Seq.Base.slice",
"LowParse.Bytes.byte",
"Prims.unit"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective'
let parse_u16 =
decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16
let parse_u16_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 2)
let serialize_u16 =
(fun (x: U16.t) -> E.n_to_be 2 (U16.v x))
let decode_u32
(b: bytes { Seq.length b == 4 } )
: GTot U32.t
= E.lemma_be_to_n_is_bounded b;
U32.uint_to_t (E.be_to_n b)
let decode_u32_injective'
(b1: bytes { Seq.length b1 == 4 } )
(b2: bytes { Seq.length b2 == 4 } )
: Lemma
(decode_u32 b1 == decode_u32 b2 ==> Seq.equal b1 b2)
= if decode_u32 b1 = decode_u32 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u32_injective () : Lemma
(make_total_constant_size_parser_precond 4 U32.t decode_u32)
= Classical.forall_intro_2 decode_u32_injective'
let parse_u32 =
decode_u32_injective ();
make_total_constant_size_parser 4 U32.t decode_u32
let parse_u32_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 4)
let serialize_u32 =
(fun (x: U32.t) -> E.n_to_be 4 (U32.v x))
let decode_u64
(b: bytes { Seq.length b == 8 } )
: GTot U64.t
= E.lemma_be_to_n_is_bounded b;
U64.uint_to_t (E.be_to_n b)
let decode_u64_injective'
(b1: bytes { Seq.length b1 == 8 } )
(b2: bytes { Seq.length b2 == 8 } )
: Lemma
(decode_u64 b1 == decode_u64 b2 ==> Seq.equal b1 b2)
= if decode_u64 b1 = decode_u64 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U64.v (U64.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U64.v (U64.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u64_injective () : Lemma
(make_total_constant_size_parser_precond 8 U64.t decode_u64)
= Classical.forall_intro_2 decode_u64_injective'
let parse_u64 =
decode_u64_injective ();
make_total_constant_size_parser 8 U64.t decode_u64
let parse_u64_spec | false | false | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val parse_u64_spec
(b: bytes)
: Lemma
(requires (Seq.length b >= 8))
(ensures (
let pp = parse parse_u64 b in
Some? pp /\ (
let (Some (v, consumed)) = pp in
U64.v v == E.be_to_n (Seq.slice b 0 8)
))) | [] | LowParse.Spec.Int.parse_u64_spec | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | b: LowParse.Bytes.bytes
-> FStar.Pervasives.Lemma (requires FStar.Seq.Base.length b >= 8)
(ensures
(let pp = LowParse.Spec.Base.parse LowParse.Spec.Int.parse_u64 b in
Some? pp /\
(let _ = pp in
(let FStar.Pervasives.Native.Some #_ (FStar.Pervasives.Native.Mktuple2 #_ #_ v _) = _ in
FStar.UInt64.v v == FStar.Endianness.be_to_n (FStar.Seq.Base.slice b 0 8))
<:
Prims.logical))) | {
"end_col": 46,
"end_line": 138,
"start_col": 2,
"start_line": 138
} |
FStar.Pervasives.Lemma | val parse_u64_le_spec
(b: bytes)
: Lemma
(requires (Seq.length b >= 8))
(ensures (
let pp = parse parse_u64_le b in
Some? pp /\ (
let (Some (v, consumed)) = pp in
U64.v v == E.le_to_n (Seq.slice b 0 8)
))) | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let parse_u64_le_spec
b
= E.lemma_le_to_n_is_bounded (Seq.slice b 0 8) | val parse_u64_le_spec
(b: bytes)
: Lemma
(requires (Seq.length b >= 8))
(ensures (
let pp = parse parse_u64_le b in
Some? pp /\ (
let (Some (v, consumed)) = pp in
U64.v v == E.le_to_n (Seq.slice b 0 8)
)))
let parse_u64_le_spec b = | false | null | true | E.lemma_le_to_n_is_bounded (Seq.slice b 0 8) | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"lemma"
] | [
"LowParse.Bytes.bytes",
"FStar.Endianness.lemma_le_to_n_is_bounded",
"FStar.Seq.Base.slice",
"LowParse.Bytes.byte",
"Prims.unit"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective'
let parse_u16 =
decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16
let parse_u16_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 2)
let serialize_u16 =
(fun (x: U16.t) -> E.n_to_be 2 (U16.v x))
let decode_u32
(b: bytes { Seq.length b == 4 } )
: GTot U32.t
= E.lemma_be_to_n_is_bounded b;
U32.uint_to_t (E.be_to_n b)
let decode_u32_injective'
(b1: bytes { Seq.length b1 == 4 } )
(b2: bytes { Seq.length b2 == 4 } )
: Lemma
(decode_u32 b1 == decode_u32 b2 ==> Seq.equal b1 b2)
= if decode_u32 b1 = decode_u32 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u32_injective () : Lemma
(make_total_constant_size_parser_precond 4 U32.t decode_u32)
= Classical.forall_intro_2 decode_u32_injective'
let parse_u32 =
decode_u32_injective ();
make_total_constant_size_parser 4 U32.t decode_u32
let parse_u32_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 4)
let serialize_u32 =
(fun (x: U32.t) -> E.n_to_be 4 (U32.v x))
let decode_u64
(b: bytes { Seq.length b == 8 } )
: GTot U64.t
= E.lemma_be_to_n_is_bounded b;
U64.uint_to_t (E.be_to_n b)
let decode_u64_injective'
(b1: bytes { Seq.length b1 == 8 } )
(b2: bytes { Seq.length b2 == 8 } )
: Lemma
(decode_u64 b1 == decode_u64 b2 ==> Seq.equal b1 b2)
= if decode_u64 b1 = decode_u64 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U64.v (U64.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U64.v (U64.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u64_injective () : Lemma
(make_total_constant_size_parser_precond 8 U64.t decode_u64)
= Classical.forall_intro_2 decode_u64_injective'
let parse_u64 =
decode_u64_injective ();
make_total_constant_size_parser 8 U64.t decode_u64
let parse_u64_spec
b
= E.lemma_be_to_n_is_bounded (Seq.slice b 0 8)
let serialize_u64 =
(fun (x: U64.t) -> E.n_to_be 8 (U64.v x))
let decode_u64_le
(b: bytes { Seq.length b == 8 } )
: GTot U64.t
= E.lemma_le_to_n_is_bounded b;
U64.uint_to_t (E.le_to_n b)
let decode_u64_le_injective'
(b1: bytes { Seq.length b1 == 8 } )
(b2: bytes { Seq.length b2 == 8 } )
: Lemma
(decode_u64_le b1 == decode_u64_le b2 ==> Seq.equal b1 b2)
= if decode_u64_le b1 = decode_u64_le b2
then begin
E.lemma_le_to_n_is_bounded b1;
E.lemma_le_to_n_is_bounded b2;
assert (U64.v (U64.uint_to_t (E.le_to_n b1)) == E.le_to_n b1);
assert (U64.v (U64.uint_to_t (E.le_to_n b2)) == E.le_to_n b2);
assert (E.le_to_n b1 == E.le_to_n b2);
E.le_to_n_inj b1 b2
end else ()
let decode_u64_le_injective () : Lemma
(make_total_constant_size_parser_precond 8 U64.t decode_u64_le)
= Classical.forall_intro_2 decode_u64_le_injective'
let parse_u64_le =
decode_u64_le_injective ();
make_total_constant_size_parser 8 U64.t decode_u64_le
let parse_u64_le_spec | false | false | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val parse_u64_le_spec
(b: bytes)
: Lemma
(requires (Seq.length b >= 8))
(ensures (
let pp = parse parse_u64_le b in
Some? pp /\ (
let (Some (v, consumed)) = pp in
U64.v v == E.le_to_n (Seq.slice b 0 8)
))) | [] | LowParse.Spec.Int.parse_u64_le_spec | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | b: LowParse.Bytes.bytes
-> FStar.Pervasives.Lemma (requires FStar.Seq.Base.length b >= 8)
(ensures
(let pp = LowParse.Spec.Base.parse LowParse.Spec.Int.parse_u64_le b in
Some? pp /\
(let _ = pp in
(let FStar.Pervasives.Native.Some #_ (FStar.Pervasives.Native.Mktuple2 #_ #_ v _) = _ in
FStar.UInt64.v v == FStar.Endianness.le_to_n (FStar.Seq.Base.slice b 0 8))
<:
Prims.logical))) | {
"end_col": 46,
"end_line": 174,
"start_col": 2,
"start_line": 174
} |
FStar.Pervasives.Lemma | val parse_u8_spec
(b: bytes)
: Lemma
(requires (Seq.length b >= 1))
(ensures (
let pp = parse parse_u8 b in
Some? pp /\ (
let (Some (v, consumed)) = pp in
U8.v v == E.be_to_n (Seq.slice b 0 1)
))) | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0) | val parse_u8_spec
(b: bytes)
: Lemma
(requires (Seq.length b >= 1))
(ensures (
let pp = parse parse_u8 b in
Some? pp /\ (
let (Some (v, consumed)) = pp in
U8.v v == E.be_to_n (Seq.slice b 0 1)
)))
let parse_u8_spec b = | false | null | true | Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0) | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"lemma"
] | [
"LowParse.Bytes.bytes",
"FStar.Endianness.reveal_be_to_n",
"FStar.Seq.Base.slice",
"LowParse.Bytes.byte",
"Prims.unit",
"FStar.Seq.Base.lemma_index_slice"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b | false | false | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val parse_u8_spec
(b: bytes)
: Lemma
(requires (Seq.length b >= 1))
(ensures (
let pp = parse parse_u8 b in
Some? pp /\ (
let (Some (v, consumed)) = pp in
U8.v v == E.be_to_n (Seq.slice b 0 1)
))) | [] | LowParse.Spec.Int.parse_u8_spec | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | b: LowParse.Bytes.bytes
-> FStar.Pervasives.Lemma (requires FStar.Seq.Base.length b >= 1)
(ensures
(let pp = LowParse.Spec.Base.parse LowParse.Spec.Int.parse_u8 b in
Some? pp /\
(let _ = pp in
(let FStar.Pervasives.Native.Some #_ (FStar.Pervasives.Native.Mktuple2 #_ #_ v _) = _ in
FStar.UInt8.v v == FStar.Endianness.be_to_n (FStar.Seq.Base.slice b 0 1))
<:
Prims.logical))) | {
"end_col": 52,
"end_line": 22,
"start_col": 2,
"start_line": 20
} |
FStar.Pervasives.Lemma | val parse_u16_spec
(b: bytes)
: Lemma
(requires (Seq.length b >= 2))
(ensures (
let pp = parse parse_u16 b in
Some? pp /\ (
let (Some (v, consumed)) = pp in
U16.v v == E.be_to_n (Seq.slice b 0 2)
))) | [
{
"abbrev": false,
"full_module": "LowParse.Spec.Combinators",
"short_module": null
},
{
"abbrev": true,
"full_module": "FStar.UInt64",
"short_module": "U64"
},
{
"abbrev": true,
"full_module": "FStar.UInt32",
"short_module": "U32"
},
{
"abbrev": true,
"full_module": "FStar.UInt16",
"short_module": "U16"
},
{
"abbrev": true,
"full_module": "FStar.UInt8",
"short_module": "U8"
},
{
"abbrev": true,
"full_module": "FStar.Endianness",
"short_module": "E"
},
{
"abbrev": true,
"full_module": "FStar.Seq",
"short_module": "Seq"
},
{
"abbrev": false,
"full_module": "LowParse.Spec.Base",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "LowParse.Spec",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let parse_u16_spec
b
=
E.lemma_be_to_n_is_bounded (Seq.slice b 0 2) | val parse_u16_spec
(b: bytes)
: Lemma
(requires (Seq.length b >= 2))
(ensures (
let pp = parse parse_u16 b in
Some? pp /\ (
let (Some (v, consumed)) = pp in
U16.v v == E.be_to_n (Seq.slice b 0 2)
)))
let parse_u16_spec b = | false | null | true | E.lemma_be_to_n_is_bounded (Seq.slice b 0 2) | {
"checked_file": "LowParse.Spec.Int.fst.checked",
"dependencies": [
"prims.fst.checked",
"LowParse.Spec.Seq.fst.checked",
"LowParse.Spec.Combinators.fsti.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "LowParse.Spec.Int.fst"
} | [
"lemma"
] | [
"LowParse.Bytes.bytes",
"FStar.Endianness.lemma_be_to_n_is_bounded",
"FStar.Seq.Base.slice",
"LowParse.Bytes.byte",
"Prims.unit"
] | [] | module LowParse.Spec.Int
open LowParse.Spec.Combinators
let decode_u8
(b: bytes { Seq.length b == 1 } )
: Tot U8.t
= Seq.index b 0
let decode_u8_injective () : Lemma
(make_total_constant_size_parser_precond 1 U8.t decode_u8)
= ()
let tot_parse_u8 =
decode_u8_injective ();
tot_make_total_constant_size_parser 1 U8.t decode_u8
let parse_u8_spec
b
=
Seq.lemma_index_slice b 0 1 0;
E.reveal_be_to_n (Seq.slice b 0 1);
E.reveal_be_to_n (Seq.slice (Seq.slice b 0 1) 0 0)
let parse_u8_spec' b = ()
let serialize_u8 =
Seq.create 1
let serialize_u8_spec x = ()
let decode_u16
(b: bytes { Seq.length b == 2 } )
: GTot U16.t
= E.lemma_be_to_n_is_bounded b;
U16.uint_to_t (E.be_to_n b)
let decode_u16_injective'
(b1: bytes { Seq.length b1 == 2 } )
(b2: bytes { Seq.length b2 == 2 } )
: Lemma
(decode_u16 b1 == decode_u16 b2 ==> Seq.equal b1 b2)
= if decode_u16 b1 = decode_u16 b2
then begin
E.lemma_be_to_n_is_bounded b1;
E.lemma_be_to_n_is_bounded b2;
assert (U32.v (U32.uint_to_t (E.be_to_n b1)) == E.be_to_n b1);
assert (U32.v (U32.uint_to_t (E.be_to_n b2)) == E.be_to_n b2);
assert (E.be_to_n b1 == E.be_to_n b2);
E.be_to_n_inj b1 b2
end else ()
let decode_u16_injective
()
: Lemma
(make_total_constant_size_parser_precond 2 U16.t decode_u16)
= Classical.forall_intro_2 decode_u16_injective'
let parse_u16 =
decode_u16_injective ();
make_total_constant_size_parser 2 U16.t decode_u16
let parse_u16_spec
b | false | false | LowParse.Spec.Int.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 8,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": true,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val parse_u16_spec
(b: bytes)
: Lemma
(requires (Seq.length b >= 2))
(ensures (
let pp = parse parse_u16 b in
Some? pp /\ (
let (Some (v, consumed)) = pp in
U16.v v == E.be_to_n (Seq.slice b 0 2)
))) | [] | LowParse.Spec.Int.parse_u16_spec | {
"file_name": "src/lowparse/LowParse.Spec.Int.fst",
"git_rev": "446a08ce38df905547cf20f28c43776b22b8087a",
"git_url": "https://github.com/project-everest/everparse.git",
"project_name": "everparse"
} | b: LowParse.Bytes.bytes
-> FStar.Pervasives.Lemma (requires FStar.Seq.Base.length b >= 2)
(ensures
(let pp = LowParse.Spec.Base.parse LowParse.Spec.Int.parse_u16 b in
Some? pp /\
(let _ = pp in
(let FStar.Pervasives.Native.Some #_ (FStar.Pervasives.Native.Mktuple2 #_ #_ v _) = _ in
FStar.UInt16.v v == FStar.Endianness.be_to_n (FStar.Seq.Base.slice b 0 2))
<:
Prims.logical))) | {
"end_col": 46,
"end_line": 65,
"start_col": 2,
"start_line": 65
} |
Prims.Tot | val same_domain (h:vale_heap) (m:S.machine_heap) : prop0 | [
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m) | val same_domain (h:vale_heap) (m:S.machine_heap) : prop0
let same_domain h m = | false | null | false | Set.equal (IB.addrs_set (_ih h)) (Map.domain m) | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [
"total"
] | [
"Vale.Arch.HeapImpl.vale_heap",
"Vale.Arch.MachineHeap_s.machine_heap",
"FStar.Set.equal",
"Prims.int",
"Vale.Interop.Heap_s.addrs_set",
"Vale.Arch.HeapImpl._ih",
"FStar.Map.domain",
"Vale.Def.Types_s.nat8",
"Vale.Def.Prop_s.prop0"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base | false | true | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val same_domain (h:vale_heap) (m:S.machine_heap) : prop0 | [] | Vale.X64.Memory_Sems.same_domain | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | h: Vale.Arch.HeapImpl.vale_heap -> m: Vale.Arch.MachineHeap_s.machine_heap -> Vale.Def.Prop_s.prop0 | {
"end_col": 69,
"end_line": 21,
"start_col": 22,
"start_line": 21
} |
Prims.GTot | val destroy_heaplets (h1:vale_full_heap) : GTot vale_full_heap | [
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let destroy_heaplets h1 =
h1 | val destroy_heaplets (h1:vale_full_heap) : GTot vale_full_heap
let destroy_heaplets h1 = | false | null | false | h1 | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [
"sometrivial"
] | [
"Vale.Arch.HeapImpl.vale_full_heap"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base
let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m)
let lemma_same_domains h m1 m2 = ()
let get_heap h = I.down_mem (_ih h)
let upd_heap h m = mi_heap_upd h m
//let lemma_upd_get_heap h = I.down_up_identity (_ih h)
let lemma_get_upd_heap h m = I.up_down_identity (_ih h) m
let lemma_heap_impl = ()
let lemma_heap_get_heap h = ()
let lemma_heap_taint h = ()
//let lemma_heap_upd_heap h mh mt = ()
[@"opaque_to_smt"]
let rec set_of_range (a:int) (n:nat) : Pure (Set.set int)
(requires True)
(ensures fun s -> (forall (i:int).{:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n))
=
if n = 0 then Set.empty else Set.union (set_of_range a (n - 1)) (Set.singleton (a + n - 1))
let buffer_info_has_addr (bi:buffer_info) (a:int) =
let b = bi.bi_buffer in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
addr <= a /\ a < addr + len
let buffer_info_has_addr_opt (bi:option buffer_info) (a:int) =
match bi with
| None -> False
| Some bi -> buffer_info_has_addr bi a
#set-options "--z3rlimit 100"
let rec make_owns_rec (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
if n = 0 then ((fun _ -> None), (fun _ -> Set.empty)) else
let (m0, s0) = make_owns_rec h bs (n - 1) in
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let s i = if i = hi then Set.union (s0 i) s_b else s0 i in
let m a = if addr <= a && a < addr + len then Some (n - 1) else m0 a in
(m, s)
[@"opaque_to_smt"]
let make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
make_owns_rec h bs n
let rec lemma_make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n <= Seq.length bs /\
(forall (i:nat).{:pattern Seq.index bs i} i < Seq.length bs ==> buffer_readable h (Seq.index bs i).bi_buffer) /\
(forall (i1 i2:nat).{:pattern (Seq.index bs i1); (Seq.index bs i2)}
i1 < Seq.length bs /\ i2 < Seq.length bs ==> buffer_info_disjoint (Seq.index bs i1) (Seq.index bs i2))
)
(ensures (
let (m, s) = make_owns h bs n in
(forall (i:heaplet_id) (a:int).{:pattern Set.mem a (s i)}
(Set.mem a (s i) <==> Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (m a) == Some i) /\
(Set.mem a (s i) ==> buffer_info_has_addr_opt (Option.mapTot (fun n -> Seq.index bs n) (m a)) a) /\
(Set.mem a (s i) ==> Set.mem a (Map.domain h.mh))
) /\
(forall (k:nat) (a:int).{:pattern Set.mem a (s (Seq.index bs k).bi_heaplet)}
k < n /\ buffer_info_has_addr (Seq.index bs k) a ==> Set.mem a (s (Seq.index bs k).bi_heaplet))
))
=
reveal_opaque (`%make_owns) make_owns;
if n = 0 then () else
let _ = make_owns h bs (n - 1) in
let (m, s) = make_owns h bs n in
lemma_make_owns h bs (n - 1);
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let lem1 (a:int) : Lemma
(requires Set.mem a s_b)
(ensures Set.mem a (Map.domain h.mh))
[SMTPat (Set.mem a (Map.domain h.mh))]
=
I.addrs_set_mem h.ih b a
in
let lem2 (i:heaplet_id) (a:int) : Lemma
(requires i =!= hi /\ Set.mem a (Set.intersect s_b (s i)))
(ensures False)
[SMTPat (Set.mem a (s i))]
=
reveal_opaque (`%addr_map_pred) addr_map_pred
in
()
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 2 --max_ifuel 2"
let rec lemma_loc_mutable_buffers_rec (l:list buffer_info) (s:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n + List.length l == Seq.length s /\
list_to_seq_post l s n
)
(ensures (
let modloc = loc_mutable_buffers l in
forall (i:nat).{:pattern Seq.index s i} n <= i /\ i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes modloc (loc_buffer bi.bi_buffer))
))
(decreases l)
=
match l with
| [] -> ()
| h::t -> lemma_loc_mutable_buffers_rec t s (n + 1)
#pop-options
let lemma_loc_mutable_buffers (l:list buffer_info) : Lemma
(ensures (
let s = list_to_seq l in
forall (i:nat).{:pattern Seq.index s i} i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes (loc_mutable_buffers l) (loc_buffer bi.bi_buffer))
))
=
lemma_list_to_seq l;
lemma_loc_mutable_buffers_rec l (list_to_seq l) 0
let create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let modloc = loc_mutable_buffers buffers in
let layout1 = h1.vf_layout in
let layin1 = layout1.vl_inner in
let (hmap, hsets) = make_owns h1.vf_heap bs (Seq.length bs) in
let hmap a = Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (hmap a) in
let l = {
vl_heaplets_initialized = true;
vl_heaplet_map = hmap;
vl_heaplet_sets = hsets;
vl_old_heap = h1.vf_heap;
vl_buffers = bs;
vl_mod_loc = modloc;
} in
let layout2 = {layout1 with vl_inner = l} in
let h2 = {
vf_layout = layout2;
vf_heap = h1.vf_heap;
vf_heaplets = h1.vf_heaplets;
} in
h2
let lemma_create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let h2 = create_heaplets buffers h1 in
assert (h2.vf_layout.vl_inner.vl_buffers == bs); // REVIEW: why is this necessary, even with extra ifuel?
lemma_make_owns h1.vf_heap bs (Seq.length bs);
lemma_loc_mutable_buffers buffers;
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
() | false | false | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val destroy_heaplets (h1:vale_full_heap) : GTot vale_full_heap | [] | Vale.X64.Memory_Sems.destroy_heaplets | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | h1: Vale.Arch.HeapImpl.vale_full_heap -> Prims.GTot Vale.Arch.HeapImpl.vale_full_heap | {
"end_col": 4,
"end_line": 189,
"start_col": 2,
"start_line": 189
} |
Prims.GTot | val make_owns (h: vale_heap) (bs: Seq.seq buffer_info) (n: nat{n <= Seq.length bs})
: GTot ((int -> option (n: nat{n < Seq.length bs})) & (heaplet_id -> Set.set int)) | [
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
make_owns_rec h bs n | val make_owns (h: vale_heap) (bs: Seq.seq buffer_info) (n: nat{n <= Seq.length bs})
: GTot ((int -> option (n: nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
let make_owns (h: vale_heap) (bs: Seq.seq buffer_info) (n: nat{n <= Seq.length bs})
: GTot ((int -> option (n: nat{n < Seq.length bs})) & (heaplet_id -> Set.set int)) = | false | null | false | make_owns_rec h bs n | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [
"sometrivial"
] | [
"Vale.Arch.HeapImpl.vale_heap",
"FStar.Seq.Base.seq",
"Vale.Arch.HeapImpl.buffer_info",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"FStar.Seq.Base.length",
"Vale.X64.Memory_Sems.make_owns_rec",
"FStar.Pervasives.Native.tuple2",
"Prims.int",
"FStar.Pervasives.Native.option",
"Prims.op_LessThan",
"Vale.Arch.HeapImpl.heaplet_id",
"FStar.Set.set"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base
let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m)
let lemma_same_domains h m1 m2 = ()
let get_heap h = I.down_mem (_ih h)
let upd_heap h m = mi_heap_upd h m
//let lemma_upd_get_heap h = I.down_up_identity (_ih h)
let lemma_get_upd_heap h m = I.up_down_identity (_ih h) m
let lemma_heap_impl = ()
let lemma_heap_get_heap h = ()
let lemma_heap_taint h = ()
//let lemma_heap_upd_heap h mh mt = ()
[@"opaque_to_smt"]
let rec set_of_range (a:int) (n:nat) : Pure (Set.set int)
(requires True)
(ensures fun s -> (forall (i:int).{:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n))
=
if n = 0 then Set.empty else Set.union (set_of_range a (n - 1)) (Set.singleton (a + n - 1))
let buffer_info_has_addr (bi:buffer_info) (a:int) =
let b = bi.bi_buffer in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
addr <= a /\ a < addr + len
let buffer_info_has_addr_opt (bi:option buffer_info) (a:int) =
match bi with
| None -> False
| Some bi -> buffer_info_has_addr bi a
#set-options "--z3rlimit 100"
let rec make_owns_rec (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
if n = 0 then ((fun _ -> None), (fun _ -> Set.empty)) else
let (m0, s0) = make_owns_rec h bs (n - 1) in
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let s i = if i = hi then Set.union (s0 i) s_b else s0 i in
let m a = if addr <= a && a < addr + len then Some (n - 1) else m0 a in
(m, s)
[@"opaque_to_smt"]
let make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int)) | false | false | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val make_owns (h: vale_heap) (bs: Seq.seq buffer_info) (n: nat{n <= Seq.length bs})
: GTot ((int -> option (n: nat{n < Seq.length bs})) & (heaplet_id -> Set.set int)) | [] | Vale.X64.Memory_Sems.make_owns | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
h: Vale.Arch.HeapImpl.vale_heap ->
bs: FStar.Seq.Base.seq Vale.Arch.HeapImpl.buffer_info ->
n: Prims.nat{n <= FStar.Seq.Base.length bs}
-> Prims.GTot
((_: Prims.int -> FStar.Pervasives.Native.option (n: Prims.nat{n < FStar.Seq.Base.length bs})) *
(_: Vale.Arch.HeapImpl.heaplet_id -> FStar.Set.set Prims.int)) | {
"end_col": 22,
"end_line": 79,
"start_col": 2,
"start_line": 79
} |
Prims.Tot | [
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let buffer_info_has_addr (bi:buffer_info) (a:int) =
let b = bi.bi_buffer in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
addr <= a /\ a < addr + len | let buffer_info_has_addr (bi: buffer_info) (a: int) = | false | null | false | let b = bi.bi_buffer in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
addr <= a /\ a < addr + len | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [
"total"
] | [
"Vale.Arch.HeapImpl.buffer_info",
"Prims.int",
"Prims.l_and",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Prims.op_LessThan",
"Prims.op_Addition",
"Prims.nat",
"LowStar.BufferView.Down.length",
"FStar.UInt8.t",
"Vale.Interop.Types.get_downview",
"Vale.Interop.Types.__proj__Buffer__item__src",
"Vale.Interop.Types.b8_preorder",
"Vale.Interop.Types.__proj__Buffer__item__writeable",
"Vale.Interop.Types.base_typ_as_type",
"Vale.Interop.Types.__proj__Buffer__item__bsrc",
"Vale.Def.Words_s.nat64",
"Vale.Interop.Heap_s.global_addrs_map",
"Vale.Arch.HeapImpl.buffer",
"Vale.Arch.HeapImpl.__proj__Mkbuffer_info__item__bi_typ",
"Vale.Arch.HeapImpl.__proj__Mkbuffer_info__item__bi_buffer",
"Prims.logical"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base
let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m)
let lemma_same_domains h m1 m2 = ()
let get_heap h = I.down_mem (_ih h)
let upd_heap h m = mi_heap_upd h m
//let lemma_upd_get_heap h = I.down_up_identity (_ih h)
let lemma_get_upd_heap h m = I.up_down_identity (_ih h) m
let lemma_heap_impl = ()
let lemma_heap_get_heap h = ()
let lemma_heap_taint h = ()
//let lemma_heap_upd_heap h mh mt = ()
[@"opaque_to_smt"]
let rec set_of_range (a:int) (n:nat) : Pure (Set.set int)
(requires True)
(ensures fun s -> (forall (i:int).{:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n))
=
if n = 0 then Set.empty else Set.union (set_of_range a (n - 1)) (Set.singleton (a + n - 1)) | false | true | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val buffer_info_has_addr : bi: Vale.Arch.HeapImpl.buffer_info -> a: Prims.int -> Prims.logical | [] | Vale.X64.Memory_Sems.buffer_info_has_addr | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | bi: Vale.Arch.HeapImpl.buffer_info -> a: Prims.int -> Prims.logical | {
"end_col": 29,
"end_line": 52,
"start_col": 51,
"start_line": 48
} |
|
Prims.GTot | val create_heaplets (buffers:list buffer_info) (h1:vale_full_heap) : GTot vale_full_heap | [
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let modloc = loc_mutable_buffers buffers in
let layout1 = h1.vf_layout in
let layin1 = layout1.vl_inner in
let (hmap, hsets) = make_owns h1.vf_heap bs (Seq.length bs) in
let hmap a = Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (hmap a) in
let l = {
vl_heaplets_initialized = true;
vl_heaplet_map = hmap;
vl_heaplet_sets = hsets;
vl_old_heap = h1.vf_heap;
vl_buffers = bs;
vl_mod_loc = modloc;
} in
let layout2 = {layout1 with vl_inner = l} in
let h2 = {
vf_layout = layout2;
vf_heap = h1.vf_heap;
vf_heaplets = h1.vf_heaplets;
} in
h2 | val create_heaplets (buffers:list buffer_info) (h1:vale_full_heap) : GTot vale_full_heap
let create_heaplets buffers h1 = | false | null | false | let bs = list_to_seq buffers in
let modloc = loc_mutable_buffers buffers in
let layout1 = h1.vf_layout in
let layin1 = layout1.vl_inner in
let hmap, hsets = make_owns h1.vf_heap bs (Seq.length bs) in
let hmap a = Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (hmap a) in
let l =
{
vl_heaplets_initialized = true;
vl_heaplet_map = hmap;
vl_heaplet_sets = hsets;
vl_old_heap = h1.vf_heap;
vl_buffers = bs;
vl_mod_loc = modloc
}
in
let layout2 = { layout1 with vl_inner = l } in
let h2 = { vf_layout = layout2; vf_heap = h1.vf_heap; vf_heaplets = h1.vf_heaplets } in
h2 | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [
"sometrivial"
] | [
"Prims.list",
"Vale.Arch.HeapImpl.buffer_info",
"Vale.Arch.HeapImpl.vale_full_heap",
"Prims.int",
"FStar.Pervasives.Native.option",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThan",
"FStar.Seq.Base.length",
"Vale.Arch.HeapImpl.heaplet_id",
"FStar.Set.set",
"Vale.Arch.HeapImpl.Mkvale_full_heap",
"Vale.Arch.HeapImpl.__proj__Mkvale_full_heap__item__vf_heap",
"Vale.Arch.HeapImpl.__proj__Mkvale_full_heap__item__vf_heaplets",
"Vale.Arch.HeapImpl.vale_heap_layout",
"Vale.Arch.HeapImpl.Mkvale_heap_layout",
"Vale.Arch.HeapImpl.__proj__Mkvale_heap_layout__item__vl_taint",
"Vale.Arch.HeapImpl.vale_heap_layout_inner",
"Vale.Arch.HeapImpl.Mkvale_heap_layout_inner",
"FStar.Option.mapTot",
"Vale.Arch.HeapImpl.__proj__Mkbuffer_info__item__bi_heaplet",
"FStar.Seq.Base.index",
"FStar.Pervasives.Native.tuple2",
"Vale.X64.Memory_Sems.make_owns",
"Vale.Arch.HeapImpl.__proj__Mkvale_heap_layout__item__vl_inner",
"Vale.Arch.HeapImpl.__proj__Mkvale_full_heap__item__vf_layout",
"Vale.X64.Memory.loc",
"Vale.X64.Memory.loc_mutable_buffers",
"FStar.Seq.Base.seq",
"Vale.Lib.Seqs.list_to_seq"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base
let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m)
let lemma_same_domains h m1 m2 = ()
let get_heap h = I.down_mem (_ih h)
let upd_heap h m = mi_heap_upd h m
//let lemma_upd_get_heap h = I.down_up_identity (_ih h)
let lemma_get_upd_heap h m = I.up_down_identity (_ih h) m
let lemma_heap_impl = ()
let lemma_heap_get_heap h = ()
let lemma_heap_taint h = ()
//let lemma_heap_upd_heap h mh mt = ()
[@"opaque_to_smt"]
let rec set_of_range (a:int) (n:nat) : Pure (Set.set int)
(requires True)
(ensures fun s -> (forall (i:int).{:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n))
=
if n = 0 then Set.empty else Set.union (set_of_range a (n - 1)) (Set.singleton (a + n - 1))
let buffer_info_has_addr (bi:buffer_info) (a:int) =
let b = bi.bi_buffer in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
addr <= a /\ a < addr + len
let buffer_info_has_addr_opt (bi:option buffer_info) (a:int) =
match bi with
| None -> False
| Some bi -> buffer_info_has_addr bi a
#set-options "--z3rlimit 100"
let rec make_owns_rec (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
if n = 0 then ((fun _ -> None), (fun _ -> Set.empty)) else
let (m0, s0) = make_owns_rec h bs (n - 1) in
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let s i = if i = hi then Set.union (s0 i) s_b else s0 i in
let m a = if addr <= a && a < addr + len then Some (n - 1) else m0 a in
(m, s)
[@"opaque_to_smt"]
let make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
make_owns_rec h bs n
let rec lemma_make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n <= Seq.length bs /\
(forall (i:nat).{:pattern Seq.index bs i} i < Seq.length bs ==> buffer_readable h (Seq.index bs i).bi_buffer) /\
(forall (i1 i2:nat).{:pattern (Seq.index bs i1); (Seq.index bs i2)}
i1 < Seq.length bs /\ i2 < Seq.length bs ==> buffer_info_disjoint (Seq.index bs i1) (Seq.index bs i2))
)
(ensures (
let (m, s) = make_owns h bs n in
(forall (i:heaplet_id) (a:int).{:pattern Set.mem a (s i)}
(Set.mem a (s i) <==> Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (m a) == Some i) /\
(Set.mem a (s i) ==> buffer_info_has_addr_opt (Option.mapTot (fun n -> Seq.index bs n) (m a)) a) /\
(Set.mem a (s i) ==> Set.mem a (Map.domain h.mh))
) /\
(forall (k:nat) (a:int).{:pattern Set.mem a (s (Seq.index bs k).bi_heaplet)}
k < n /\ buffer_info_has_addr (Seq.index bs k) a ==> Set.mem a (s (Seq.index bs k).bi_heaplet))
))
=
reveal_opaque (`%make_owns) make_owns;
if n = 0 then () else
let _ = make_owns h bs (n - 1) in
let (m, s) = make_owns h bs n in
lemma_make_owns h bs (n - 1);
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let lem1 (a:int) : Lemma
(requires Set.mem a s_b)
(ensures Set.mem a (Map.domain h.mh))
[SMTPat (Set.mem a (Map.domain h.mh))]
=
I.addrs_set_mem h.ih b a
in
let lem2 (i:heaplet_id) (a:int) : Lemma
(requires i =!= hi /\ Set.mem a (Set.intersect s_b (s i)))
(ensures False)
[SMTPat (Set.mem a (s i))]
=
reveal_opaque (`%addr_map_pred) addr_map_pred
in
()
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 2 --max_ifuel 2"
let rec lemma_loc_mutable_buffers_rec (l:list buffer_info) (s:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n + List.length l == Seq.length s /\
list_to_seq_post l s n
)
(ensures (
let modloc = loc_mutable_buffers l in
forall (i:nat).{:pattern Seq.index s i} n <= i /\ i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes modloc (loc_buffer bi.bi_buffer))
))
(decreases l)
=
match l with
| [] -> ()
| h::t -> lemma_loc_mutable_buffers_rec t s (n + 1)
#pop-options
let lemma_loc_mutable_buffers (l:list buffer_info) : Lemma
(ensures (
let s = list_to_seq l in
forall (i:nat).{:pattern Seq.index s i} i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes (loc_mutable_buffers l) (loc_buffer bi.bi_buffer))
))
=
lemma_list_to_seq l;
lemma_loc_mutable_buffers_rec l (list_to_seq l) 0 | false | false | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val create_heaplets (buffers:list buffer_info) (h1:vale_full_heap) : GTot vale_full_heap | [] | Vale.X64.Memory_Sems.create_heaplets | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | buffers: Prims.list Vale.Arch.HeapImpl.buffer_info -> h1: Vale.Arch.HeapImpl.vale_full_heap
-> Prims.GTot Vale.Arch.HeapImpl.vale_full_heap | {
"end_col": 4,
"end_line": 177,
"start_col": 32,
"start_line": 156
} |
FStar.Pervasives.Lemma | val lemma_loc_mutable_buffers_rec (l: list buffer_info) (s: Seq.seq buffer_info) (n: nat)
: Lemma (requires n + List.length l == Seq.length s /\ list_to_seq_post l s n)
(ensures
(let modloc = loc_mutable_buffers l in
forall (i: nat). {:pattern Seq.index s i}
n <= i /\ i < Seq.length s ==>
(let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes modloc (loc_buffer bi.bi_buffer))))
(decreases l) | [
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec lemma_loc_mutable_buffers_rec (l:list buffer_info) (s:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n + List.length l == Seq.length s /\
list_to_seq_post l s n
)
(ensures (
let modloc = loc_mutable_buffers l in
forall (i:nat).{:pattern Seq.index s i} n <= i /\ i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes modloc (loc_buffer bi.bi_buffer))
))
(decreases l)
=
match l with
| [] -> ()
| h::t -> lemma_loc_mutable_buffers_rec t s (n + 1) | val lemma_loc_mutable_buffers_rec (l: list buffer_info) (s: Seq.seq buffer_info) (n: nat)
: Lemma (requires n + List.length l == Seq.length s /\ list_to_seq_post l s n)
(ensures
(let modloc = loc_mutable_buffers l in
forall (i: nat). {:pattern Seq.index s i}
n <= i /\ i < Seq.length s ==>
(let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes modloc (loc_buffer bi.bi_buffer))))
(decreases l)
let rec lemma_loc_mutable_buffers_rec (l: list buffer_info) (s: Seq.seq buffer_info) (n: nat)
: Lemma (requires n + List.length l == Seq.length s /\ list_to_seq_post l s n)
(ensures
(let modloc = loc_mutable_buffers l in
forall (i: nat). {:pattern Seq.index s i}
n <= i /\ i < Seq.length s ==>
(let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes modloc (loc_buffer bi.bi_buffer))))
(decreases l) = | false | null | true | match l with
| [] -> ()
| h :: t -> lemma_loc_mutable_buffers_rec t s (n + 1) | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [
"lemma",
""
] | [
"Prims.list",
"Vale.Arch.HeapImpl.buffer_info",
"FStar.Seq.Base.seq",
"Prims.nat",
"Vale.X64.Memory_Sems.lemma_loc_mutable_buffers_rec",
"Prims.op_Addition",
"Prims.unit",
"Prims.l_and",
"Prims.eq2",
"Prims.int",
"FStar.List.Tot.Base.length",
"FStar.Seq.Base.length",
"Vale.Lib.Seqs.list_to_seq_post",
"Prims.squash",
"Prims.l_Forall",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Prims.op_LessThan",
"Vale.Arch.HeapImpl.mutability",
"Vale.Arch.HeapImpl.__proj__Mkbuffer_info__item__bi_mutable",
"Vale.Arch.HeapImpl.Mutable",
"Vale.X64.Memory.loc_includes",
"Vale.X64.Memory.loc_buffer",
"Vale.Arch.HeapImpl.__proj__Mkbuffer_info__item__bi_typ",
"Vale.Arch.HeapImpl.__proj__Mkbuffer_info__item__bi_buffer",
"FStar.Seq.Base.index",
"Vale.X64.Memory.loc",
"Vale.X64.Memory.loc_mutable_buffers",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base
let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m)
let lemma_same_domains h m1 m2 = ()
let get_heap h = I.down_mem (_ih h)
let upd_heap h m = mi_heap_upd h m
//let lemma_upd_get_heap h = I.down_up_identity (_ih h)
let lemma_get_upd_heap h m = I.up_down_identity (_ih h) m
let lemma_heap_impl = ()
let lemma_heap_get_heap h = ()
let lemma_heap_taint h = ()
//let lemma_heap_upd_heap h mh mt = ()
[@"opaque_to_smt"]
let rec set_of_range (a:int) (n:nat) : Pure (Set.set int)
(requires True)
(ensures fun s -> (forall (i:int).{:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n))
=
if n = 0 then Set.empty else Set.union (set_of_range a (n - 1)) (Set.singleton (a + n - 1))
let buffer_info_has_addr (bi:buffer_info) (a:int) =
let b = bi.bi_buffer in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
addr <= a /\ a < addr + len
let buffer_info_has_addr_opt (bi:option buffer_info) (a:int) =
match bi with
| None -> False
| Some bi -> buffer_info_has_addr bi a
#set-options "--z3rlimit 100"
let rec make_owns_rec (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
if n = 0 then ((fun _ -> None), (fun _ -> Set.empty)) else
let (m0, s0) = make_owns_rec h bs (n - 1) in
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let s i = if i = hi then Set.union (s0 i) s_b else s0 i in
let m a = if addr <= a && a < addr + len then Some (n - 1) else m0 a in
(m, s)
[@"opaque_to_smt"]
let make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
make_owns_rec h bs n
let rec lemma_make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n <= Seq.length bs /\
(forall (i:nat).{:pattern Seq.index bs i} i < Seq.length bs ==> buffer_readable h (Seq.index bs i).bi_buffer) /\
(forall (i1 i2:nat).{:pattern (Seq.index bs i1); (Seq.index bs i2)}
i1 < Seq.length bs /\ i2 < Seq.length bs ==> buffer_info_disjoint (Seq.index bs i1) (Seq.index bs i2))
)
(ensures (
let (m, s) = make_owns h bs n in
(forall (i:heaplet_id) (a:int).{:pattern Set.mem a (s i)}
(Set.mem a (s i) <==> Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (m a) == Some i) /\
(Set.mem a (s i) ==> buffer_info_has_addr_opt (Option.mapTot (fun n -> Seq.index bs n) (m a)) a) /\
(Set.mem a (s i) ==> Set.mem a (Map.domain h.mh))
) /\
(forall (k:nat) (a:int).{:pattern Set.mem a (s (Seq.index bs k).bi_heaplet)}
k < n /\ buffer_info_has_addr (Seq.index bs k) a ==> Set.mem a (s (Seq.index bs k).bi_heaplet))
))
=
reveal_opaque (`%make_owns) make_owns;
if n = 0 then () else
let _ = make_owns h bs (n - 1) in
let (m, s) = make_owns h bs n in
lemma_make_owns h bs (n - 1);
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let lem1 (a:int) : Lemma
(requires Set.mem a s_b)
(ensures Set.mem a (Map.domain h.mh))
[SMTPat (Set.mem a (Map.domain h.mh))]
=
I.addrs_set_mem h.ih b a
in
let lem2 (i:heaplet_id) (a:int) : Lemma
(requires i =!= hi /\ Set.mem a (Set.intersect s_b (s i)))
(ensures False)
[SMTPat (Set.mem a (s i))]
=
reveal_opaque (`%addr_map_pred) addr_map_pred
in
()
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 2 --max_ifuel 2"
let rec lemma_loc_mutable_buffers_rec (l:list buffer_info) (s:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n + List.length l == Seq.length s /\
list_to_seq_post l s n
)
(ensures (
let modloc = loc_mutable_buffers l in
forall (i:nat).{:pattern Seq.index s i} n <= i /\ i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes modloc (loc_buffer bi.bi_buffer))
))
(decreases l) | false | false | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 1,
"initial_ifuel": 2,
"max_fuel": 1,
"max_ifuel": 2,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val lemma_loc_mutable_buffers_rec (l: list buffer_info) (s: Seq.seq buffer_info) (n: nat)
: Lemma (requires n + List.length l == Seq.length s /\ list_to_seq_post l s n)
(ensures
(let modloc = loc_mutable_buffers l in
forall (i: nat). {:pattern Seq.index s i}
n <= i /\ i < Seq.length s ==>
(let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes modloc (loc_buffer bi.bi_buffer))))
(decreases l) | [
"recursion"
] | Vale.X64.Memory_Sems.lemma_loc_mutable_buffers_rec | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
l: Prims.list Vale.Arch.HeapImpl.buffer_info ->
s: FStar.Seq.Base.seq Vale.Arch.HeapImpl.buffer_info ->
n: Prims.nat
-> FStar.Pervasives.Lemma
(requires
n + FStar.List.Tot.Base.length l == FStar.Seq.Base.length s /\
Vale.Lib.Seqs.list_to_seq_post l s n)
(ensures
(let modloc = Vale.X64.Memory.loc_mutable_buffers l in
forall (i: Prims.nat). {:pattern FStar.Seq.Base.index s i}
n <= i /\ i < FStar.Seq.Base.length s ==>
(let bi = FStar.Seq.Base.index s i in
Mkbuffer_info?.bi_mutable bi == Vale.Arch.HeapImpl.Mutable ==>
Vale.X64.Memory.loc_includes modloc
(Vale.X64.Memory.loc_buffer (Mkbuffer_info?.bi_buffer bi)))))
(decreases l) | {
"end_col": 53,
"end_line": 142,
"start_col": 2,
"start_line": 140
} |
Prims.Tot | [
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let buffer_info_has_addr_opt (bi:option buffer_info) (a:int) =
match bi with
| None -> False
| Some bi -> buffer_info_has_addr bi a | let buffer_info_has_addr_opt (bi: option buffer_info) (a: int) = | false | null | false | match bi with
| None -> False
| Some bi -> buffer_info_has_addr bi a | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [
"total"
] | [
"FStar.Pervasives.Native.option",
"Vale.Arch.HeapImpl.buffer_info",
"Prims.int",
"Prims.l_False",
"Vale.X64.Memory_Sems.buffer_info_has_addr",
"Prims.logical"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base
let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m)
let lemma_same_domains h m1 m2 = ()
let get_heap h = I.down_mem (_ih h)
let upd_heap h m = mi_heap_upd h m
//let lemma_upd_get_heap h = I.down_up_identity (_ih h)
let lemma_get_upd_heap h m = I.up_down_identity (_ih h) m
let lemma_heap_impl = ()
let lemma_heap_get_heap h = ()
let lemma_heap_taint h = ()
//let lemma_heap_upd_heap h mh mt = ()
[@"opaque_to_smt"]
let rec set_of_range (a:int) (n:nat) : Pure (Set.set int)
(requires True)
(ensures fun s -> (forall (i:int).{:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n))
=
if n = 0 then Set.empty else Set.union (set_of_range a (n - 1)) (Set.singleton (a + n - 1))
let buffer_info_has_addr (bi:buffer_info) (a:int) =
let b = bi.bi_buffer in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
addr <= a /\ a < addr + len | false | true | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val buffer_info_has_addr_opt : bi: FStar.Pervasives.Native.option Vale.Arch.HeapImpl.buffer_info -> a: Prims.int -> Prims.logical | [] | Vale.X64.Memory_Sems.buffer_info_has_addr_opt | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | bi: FStar.Pervasives.Native.option Vale.Arch.HeapImpl.buffer_info -> a: Prims.int -> Prims.logical | {
"end_col": 40,
"end_line": 57,
"start_col": 2,
"start_line": 55
} |
|
FStar.Pervasives.Lemma | val low_lemma_load_mem128 (b:buffer128) (i:nat) (h:vale_heap) : Lemma
(requires
i < Seq.length (buffer_as_seq h b) /\
buffer_readable h b
)
(ensures
S.get_heap_val128 (buffer_addr b h + scale16 i) (get_heap h) == buffer_read b i h
) | [
{
"abbrev": false,
"full_module": "Vale.X64.BufferViewStore",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let low_lemma_load_mem128 b i h =
lemma_valid_mem128 b i h;
lemma_load_mem128 b i h;
equiv_load_mem128_aux (buffer_addr b h + scale16 i) h | val low_lemma_load_mem128 (b:buffer128) (i:nat) (h:vale_heap) : Lemma
(requires
i < Seq.length (buffer_as_seq h b) /\
buffer_readable h b
)
(ensures
S.get_heap_val128 (buffer_addr b h + scale16 i) (get_heap h) == buffer_read b i h
)
let low_lemma_load_mem128 b i h = | false | null | true | lemma_valid_mem128 b i h;
lemma_load_mem128 b i h;
equiv_load_mem128_aux (buffer_addr b h + scale16 i) h | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [
"lemma"
] | [
"Vale.X64.Memory.buffer128",
"Prims.nat",
"Vale.Arch.HeapImpl.vale_heap",
"Vale.X64.Memory_Sems.equiv_load_mem128_aux",
"Prims.op_Addition",
"Vale.X64.Memory.buffer_addr",
"Vale.X64.Memory.vuint128",
"Vale.X64.Memory.scale16",
"Prims.unit",
"Vale.X64.Memory.lemma_load_mem128",
"Vale.X64.Memory.lemma_valid_mem128"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base
let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m)
let lemma_same_domains h m1 m2 = ()
let get_heap h = I.down_mem (_ih h)
let upd_heap h m = mi_heap_upd h m
//let lemma_upd_get_heap h = I.down_up_identity (_ih h)
let lemma_get_upd_heap h m = I.up_down_identity (_ih h) m
let lemma_heap_impl = ()
let lemma_heap_get_heap h = ()
let lemma_heap_taint h = ()
//let lemma_heap_upd_heap h mh mt = ()
[@"opaque_to_smt"]
let rec set_of_range (a:int) (n:nat) : Pure (Set.set int)
(requires True)
(ensures fun s -> (forall (i:int).{:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n))
=
if n = 0 then Set.empty else Set.union (set_of_range a (n - 1)) (Set.singleton (a + n - 1))
let buffer_info_has_addr (bi:buffer_info) (a:int) =
let b = bi.bi_buffer in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
addr <= a /\ a < addr + len
let buffer_info_has_addr_opt (bi:option buffer_info) (a:int) =
match bi with
| None -> False
| Some bi -> buffer_info_has_addr bi a
#set-options "--z3rlimit 100"
let rec make_owns_rec (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
if n = 0 then ((fun _ -> None), (fun _ -> Set.empty)) else
let (m0, s0) = make_owns_rec h bs (n - 1) in
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let s i = if i = hi then Set.union (s0 i) s_b else s0 i in
let m a = if addr <= a && a < addr + len then Some (n - 1) else m0 a in
(m, s)
[@"opaque_to_smt"]
let make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
make_owns_rec h bs n
let rec lemma_make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n <= Seq.length bs /\
(forall (i:nat).{:pattern Seq.index bs i} i < Seq.length bs ==> buffer_readable h (Seq.index bs i).bi_buffer) /\
(forall (i1 i2:nat).{:pattern (Seq.index bs i1); (Seq.index bs i2)}
i1 < Seq.length bs /\ i2 < Seq.length bs ==> buffer_info_disjoint (Seq.index bs i1) (Seq.index bs i2))
)
(ensures (
let (m, s) = make_owns h bs n in
(forall (i:heaplet_id) (a:int).{:pattern Set.mem a (s i)}
(Set.mem a (s i) <==> Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (m a) == Some i) /\
(Set.mem a (s i) ==> buffer_info_has_addr_opt (Option.mapTot (fun n -> Seq.index bs n) (m a)) a) /\
(Set.mem a (s i) ==> Set.mem a (Map.domain h.mh))
) /\
(forall (k:nat) (a:int).{:pattern Set.mem a (s (Seq.index bs k).bi_heaplet)}
k < n /\ buffer_info_has_addr (Seq.index bs k) a ==> Set.mem a (s (Seq.index bs k).bi_heaplet))
))
=
reveal_opaque (`%make_owns) make_owns;
if n = 0 then () else
let _ = make_owns h bs (n - 1) in
let (m, s) = make_owns h bs n in
lemma_make_owns h bs (n - 1);
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let lem1 (a:int) : Lemma
(requires Set.mem a s_b)
(ensures Set.mem a (Map.domain h.mh))
[SMTPat (Set.mem a (Map.domain h.mh))]
=
I.addrs_set_mem h.ih b a
in
let lem2 (i:heaplet_id) (a:int) : Lemma
(requires i =!= hi /\ Set.mem a (Set.intersect s_b (s i)))
(ensures False)
[SMTPat (Set.mem a (s i))]
=
reveal_opaque (`%addr_map_pred) addr_map_pred
in
()
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 2 --max_ifuel 2"
let rec lemma_loc_mutable_buffers_rec (l:list buffer_info) (s:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n + List.length l == Seq.length s /\
list_to_seq_post l s n
)
(ensures (
let modloc = loc_mutable_buffers l in
forall (i:nat).{:pattern Seq.index s i} n <= i /\ i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes modloc (loc_buffer bi.bi_buffer))
))
(decreases l)
=
match l with
| [] -> ()
| h::t -> lemma_loc_mutable_buffers_rec t s (n + 1)
#pop-options
let lemma_loc_mutable_buffers (l:list buffer_info) : Lemma
(ensures (
let s = list_to_seq l in
forall (i:nat).{:pattern Seq.index s i} i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes (loc_mutable_buffers l) (loc_buffer bi.bi_buffer))
))
=
lemma_list_to_seq l;
lemma_loc_mutable_buffers_rec l (list_to_seq l) 0
let create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let modloc = loc_mutable_buffers buffers in
let layout1 = h1.vf_layout in
let layin1 = layout1.vl_inner in
let (hmap, hsets) = make_owns h1.vf_heap bs (Seq.length bs) in
let hmap a = Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (hmap a) in
let l = {
vl_heaplets_initialized = true;
vl_heaplet_map = hmap;
vl_heaplet_sets = hsets;
vl_old_heap = h1.vf_heap;
vl_buffers = bs;
vl_mod_loc = modloc;
} in
let layout2 = {layout1 with vl_inner = l} in
let h2 = {
vf_layout = layout2;
vf_heap = h1.vf_heap;
vf_heaplets = h1.vf_heaplets;
} in
h2
let lemma_create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let h2 = create_heaplets buffers h1 in
assert (h2.vf_layout.vl_inner.vl_buffers == bs); // REVIEW: why is this necessary, even with extra ifuel?
lemma_make_owns h1.vf_heap bs (Seq.length bs);
lemma_loc_mutable_buffers buffers;
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
()
let destroy_heaplets h1 =
h1
let lemma_destroy_heaplets h1 =
()
val heap_shift (m1 m2:S.machine_heap) (base:int) (n:nat) : Lemma
(requires (forall i. 0 <= i /\ i < n ==> m1.[base + i] == m2.[base + i]))
(ensures (forall i. {:pattern (m1.[i])} base <= i /\ i < base + n ==> m1.[i] == m2.[i]))
#push-options "--smtencoding.l_arith_repr boxwrap"
let heap_shift m1 m2 base n =
assert (forall i. base <= i /\ i < base + n ==>
m1.[base + (i - base)] == m2.[base + (i - base)])
#pop-options
val same_mem_eq_slices64 (b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1))})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (
k * 8 + 8 <= DV.length (get_downview b.bsrc) /\
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) (get_downview b.bsrc)) (k * 8) (k * 8 + 8) ==
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) (get_downview b.bsrc)) (k * 8) (k * 8 + 8)))
let same_mem_eq_slices64 b i v k h1 h2 mem1 mem2 =
let t = TUInt64 in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db (uint_view t) in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.length_eq ub
let length_up64 (b:buffer64) (h:vale_heap) (k:nat{k < buffer_length b}) (i:nat{i < 8}) : Lemma
(scale8 k + i <= DV.length (get_downview b.bsrc)) =
let vb = UV.mk_buffer (get_downview b.bsrc) uint64_view in
UV.length_eq vb
val same_mem_get_heap_val64 (b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1))})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (let ptr = buffer_addr b h1 + scale8 k in
forall (x:int).{:pattern (mem1.[x])} ptr <= x /\ x < ptr + 8 ==> mem1.[x] == mem2.[x]))
let same_mem_get_heap_val64 b j v k h1 h2 mem1 mem2 =
let ptr = buffer_addr b h1 + scale8 k in
let addr = buffer_addr b h1 in
let aux (x:int{ptr <= x /\ x < ptr + 8}) : Lemma (mem1.[x] == mem2.[x]) =
let i = x - ptr in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db uint64_view in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
same_mem_eq_slices64 b j v k h1 h2 mem1 mem2;
let s1 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8) (k * 8 + 8)) in
let s2 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8) (k * 8 + 8)) in
assert (Seq.index s1 i == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8 + i));
length_up64 b h1 k i;
assert (mem1.[addr+(scale8 k + i)] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8 + i)));
assert (Seq.index s2 i == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8 + i));
length_up64 b h2 k i;
assert (mem2.[addr+(scale8 k + i)] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8 + i)))
in
Classical.forall_intro aux;
assert (forall i. addr + (scale8 k + i) == ptr + i)
let rec written_buffer_down64_aux1
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
(base:nat{base == buffer_addr b h})
(k:nat) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j.{:pattern (mem1.[j]) \/ (mem2.[j])}
base <= j /\ j < base + k * 8 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem1.[j])}
j >= base /\ j < base + scale8 i ==>
mem1.[j] == mem2.[j]))
(decreases %[i-k]) =
if k >= i then ()
else begin
let ptr = base + scale8 k in
same_mem_get_heap_val64 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 8;
written_buffer_down64_aux1 b i v h base (k+1) h1 mem1 mem2
end
let rec written_buffer_down64_aux2
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
(base:nat{base == buffer_addr b h})
(n:nat{n == buffer_length b})
(k:nat{k > i}) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
base + scale8 (i+1) <= j /\ j < base + k * 8 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
j >= base + scale8 (i+1) /\ j < base + scale8 n ==>
mem1.[j] == mem2.[j]))
(decreases %[n-k]) =
if k >= n then ()
else begin
let ptr = base + scale8 k in
same_mem_get_heap_val64 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 8;
written_buffer_down64_aux2 b i v h base n (k+1) h1 mem1 mem2
end
let written_buffer_down64 (b:buffer64{buffer_writeable b}) (i:nat{i < buffer_length b}) (v:nat64) (h:vale_heap)
: Lemma
(requires List.memP b (IB.ptrs_of_mem (_ih h)))
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
(base <= j /\ j < base + scale8 i) \/
(base + scale8 (i+1) <= j /\ j < base + scale8 n) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
written_buffer_down64_aux1 b i v h base 0 h1 mem1 mem2;
written_buffer_down64_aux2 b i v h base n (i+1) h1 mem1 mem2
let unwritten_buffer_down (t:base_typ) (b:buffer t{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:base_typ_as_vale_type t)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
forall (a:b8{List.memP a (IB.ptrs_of_mem (_ih h)) /\ a =!= b}) j. {:pattern mem1.[j]; List.memP a (IB.ptrs_of_mem (_ih h)) \/ mem2.[j]; List.memP a (IB.ptrs_of_mem (_ih h))}
let base = (IB.addrs_of_mem (_ih h)) a in
j >= base /\ j < base + DV.length (get_downview a.bsrc) ==> mem1.[j] == mem2.[j]))
= let aux (a:b8{a =!= b /\ List.memP a (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let base = (IB.addrs_of_mem (_ih h)) a in
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
forall j.
j >= base /\ j < base + DV.length (get_downview a.bsrc) ==>
mem1.[j] == mem2.[j]))
= let db = get_downview a.bsrc in
if DV.length db = 0 then ()
else
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = (IB.addrs_of_mem (_ih h)) a in
let s0 = DV.as_seq (IB.hs_of_mem (_ih h)) db in
let s1 = DV.as_seq (IB.hs_of_mem (_ih h1)) db in
opaque_assert (`%IB.list_disjoint_or_eq) IB.list_disjoint_or_eq IB.list_disjoint_or_eq_def (MB.disjoint a.bsrc b.bsrc);
lemma_dv_equal (IB.down_view a.src) a.bsrc (IB.hs_of_mem (_ih h)) (IB.hs_of_mem (_ih h1));
assert (Seq.equal s0 s1);
assert (forall (j:int).{:pattern (mem1.[j])}
base <= j /\ j < base + Seq.length s0 ==> v_to_typ TUInt8 (Seq.index s0 (j - base)) == mem1.[j]);
heap_shift mem1 mem2 base (DV.length db)
in
Classical.forall_intro aux
let store_buffer_down64_mem
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
forall (j:int). {:pattern mem1.[j] \/ mem2.[j]}
j < base + scale8 i \/ j >= base + scale8 (i+1) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
let aux (j:int)
: Lemma
(j < base + scale8 i \/ j >= base + scale8 (i+1) ==>
mem1.[j] == mem2.[j])
=
I.addrs_set_lemma_all ();
if j >= base && j < base + DV.length (get_downview b.bsrc) then begin
written_buffer_down64 b i v h;
length_t_eq (TUInt64) b
end
else if not (I.valid_addr (_ih h) j)
then I.same_unspecified_down (IB.hs_of_mem (_ih h)) (IB.hs_of_mem (_ih h1)) (IB.ptrs_of_mem (_ih h))
else unwritten_buffer_down TUInt64 b i v h
in
Classical.forall_intro aux
let store_buffer_aux_down64_mem (ptr:int) (v:nat64) (h:vale_heap{writeable_mem64 ptr h})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = store_mem (TUInt64) ptr v h in
let mem2 = I.down_mem (_ih h1) in
forall j. {:pattern mem1.[j] \/ mem2.[j]}
j < ptr \/ j >= ptr + 8 ==>
mem1.[j] == mem2.[j]))
= let t = TUInt64 in
let h1 = store_mem t ptr v h in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
store_buffer_write t ptr v h;
assert (buffer_addr b h + scale8 i == ptr);
assert (buffer_addr b h + scale8 (i+1) == ptr + 8);
store_buffer_down64_mem b i v h
let store_buffer_aux_down64_mem2 (ptr:int) (v:nat64) (h:vale_heap{writeable_mem64 ptr h})
: Lemma
(ensures (
let h1 = store_mem (TUInt64) ptr v h in
let mem2 = I.down_mem (_ih h1) in
S.get_heap_val64 ptr mem2 == v))
= let t = TUInt64 in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let h1 = store_mem t ptr v h in
let mem2 = I.down_mem (_ih h1) in
store_buffer_write t ptr v h;
assert (Seq.index (buffer_as_seq h1 b) i == v);
index64_get_heap_val64 h1 b mem2 i
let in_bounds64 (h:vale_heap) (b:buffer64) (i:nat{i < buffer_length b})
: Lemma (scale8 i + 8 <= DV.length (get_downview b.bsrc))
=
length_t_eq TUInt64 b
let bytes_valid64 ptr h =
reveal_opaque (`%S.valid_addr64) S.valid_addr64;
let t = TUInt64 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
in_bounds64 h b i;
I.addrs_set_mem (_ih h) b ptr;
I.addrs_set_mem (_ih h) b (ptr+1);
I.addrs_set_mem (_ih h) b (ptr+2);
I.addrs_set_mem (_ih h) b (ptr+3);
I.addrs_set_mem (_ih h) b (ptr+4);
I.addrs_set_mem (_ih h) b (ptr+5);
I.addrs_set_mem (_ih h) b (ptr+6);
I.addrs_set_mem (_ih h) b (ptr+7)
val same_mem_get_heap_val128 (b:buffer128)
(i:nat{i < buffer_length b})
(v:quad32)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1)) /\ buffer_writeable b})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (let ptr = buffer_addr b h1 + scale16 k in
forall i.{:pattern mem1.[i]} i >= ptr /\ i < ptr+16 ==> mem1.[i] == mem2.[i]))
val same_mem_eq_slices128 (b:buffer128)
(i:nat{i < buffer_length b})
(v:quad32)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1)) /\ buffer_writeable b})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (
k * 16 + 16 <= DV.length (get_downview b.bsrc) /\
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) (get_downview b.bsrc)) (k * 16) (k * 16 + 16) ==
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) (get_downview b.bsrc)) (k * 16) (k * 16 + 16)))
let same_mem_eq_slices128 b i v k h1 h2 mem1 mem2 =
let t = TUInt128 in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db (uint_view t) in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.length_eq ub
let length_up128 (b:buffer128) (h:vale_heap) (k:nat{k < buffer_length b}) (i:nat{i < 16}) : Lemma
(scale16 k + i <= DV.length (get_downview b.bsrc)) =
let vb = UV.mk_buffer (get_downview b.bsrc) uint128_view in
UV.length_eq vb
let same_mem_get_heap_val128 b j v k h1 h2 mem1 mem2 =
let ptr = buffer_addr b h1 + scale16 k in
let addr = buffer_addr b h1 in
let aux (i:nat{ptr <= i /\ i < ptr+16}) : Lemma (mem1.[i] == mem2.[i]) =
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db uint128_view in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
same_mem_eq_slices128 b j v k h1 h2 mem1 mem2;
let s1 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16) (k * 16 + 16)) in
let s2 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16) (k * 16 + 16)) in
assert (Seq.index s1 (i - ptr) == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16 + (i-ptr)));
length_up128 b h1 k (i-ptr);
assert (mem1.[i] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16 + (i-ptr))));
assert (Seq.index s2 (i-ptr) == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16 + (i-ptr)));
length_up128 b h2 k (i-ptr);
assert (mem2.[addr+(scale16 k + (i-ptr))] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16 + (i-ptr))));
assert (forall i. addr + (scale16 k + (i-ptr)) == i)
in
Classical.forall_intro aux
let in_bounds128 (h:vale_heap) (b:buffer128) (i:nat{i < buffer_length b}) : Lemma
(scale16 i + 16 <= DV.length (get_downview b.bsrc))
=
length_t_eq TUInt128 b
#push-options "--z3rlimit 20"
#restart-solver
let bytes_valid128 ptr h =
reveal_opaque (`%S.valid_addr128) S.valid_addr128;
IB.list_disjoint_or_eq_reveal ();
let t = TUInt128 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
in_bounds128 h b i;
I.addrs_set_mem (_ih h) b ptr;
I.addrs_set_mem (_ih h) b (ptr+1);
I.addrs_set_mem (_ih h) b (ptr+2);
I.addrs_set_mem (_ih h) b (ptr+3);
I.addrs_set_mem (_ih h) b (ptr+4);
I.addrs_set_mem (_ih h) b (ptr+5);
I.addrs_set_mem (_ih h) b (ptr+6);
I.addrs_set_mem (_ih h) b (ptr+7);
I.addrs_set_mem (_ih h) b (ptr+8);
I.addrs_set_mem (_ih h) b (ptr+9);
I.addrs_set_mem (_ih h) b (ptr+10);
I.addrs_set_mem (_ih h) b (ptr+11);
I.addrs_set_mem (_ih h) b (ptr+12);
I.addrs_set_mem (_ih h) b (ptr+13);
I.addrs_set_mem (_ih h) b (ptr+14);
I.addrs_set_mem (_ih h) b (ptr+15)
#pop-options
let equiv_load_mem64 ptr h =
let t = TUInt64 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let addr = buffer_addr b h in
let contents = DV.as_seq (_ih h).IB.hs (get_downview b.bsrc) in
let heap = get_heap h in
index64_get_heap_val64 h b heap i;
lemma_load_mem64 b i h
//let low_lemma_valid_mem64 b i h =
// lemma_valid_mem64 b i h;
// bytes_valid64 (buffer_addr b h + scale8 i) h
//let low_lemma_load_mem64 b i h =
// lemma_valid_mem64 b i h;
// lemma_load_mem64 b i h;
// equiv_load_mem64 (buffer_addr b h + scale8 i) h
//let same_domain_update64 b i v h =
// low_lemma_valid_mem64 b i h;
// Vale.Arch.MachineHeap.same_domain_update64 (buffer_addr b h + scale8 i) v (get_heap h)
open Vale.X64.BufferViewStore
let low_lemma_store_mem64_aux
(b:buffer64)
(heap:S.machine_heap)
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{buffer_readable h b /\ buffer_writeable b})
: Lemma
(requires IB.correct_down_p (_ih h) heap b)
(ensures (let heap' = S.update_heap64 (buffer_addr b h + scale8 i) v heap in
let h' = store_mem64 (buffer_addr b h + scale8 i) v h in
(_ih h').IB.hs == DV.upd_seq (_ih h).IB.hs (get_downview b.bsrc) (I.get_seq_heap heap' (_ih h).IB.addrs b))) =
let ptr = buffer_addr b h + scale8 i in
let heap' = S.update_heap64 ptr v heap in
let h' = store_mem64 ptr v h in
lemma_store_mem64 b i v h;
length_t_eq TUInt64 b;
bv_upd_update_heap64 b heap i v (_ih h);
let db = get_downview b.bsrc in
let bv = UV.mk_buffer db Vale.Interop.Views.up_view64 in
assert (UV.upd (_ih h).IB.hs bv i (UInt64.uint_to_t v) == (_ih h').IB.hs)
val valid_state_store_mem64_aux: (i:nat) -> (v:nat64) -> (h:vale_heap) -> Lemma
(requires writeable_mem64 i h)
(ensures (
let heap = get_heap h in
let heap' = S.update_heap64 i v heap in
let h' = store_mem64 i v h in
heap' == I.down_mem (_ih h')
))
let valid_state_store_mem64_aux i v h =
let heap = get_heap h in
let heap' = S.update_heap64 i v heap in
let h1 = store_mem TUInt64 i v h in
store_buffer_aux_down64_mem i v h;
store_buffer_aux_down64_mem2 i v h;
let mem1 = heap' in
let mem2 = I.down_mem (_ih h1) in
let aux () : Lemma (forall j. mem1.[j] == mem2.[j]) =
Vale.Arch.MachineHeap.same_mem_get_heap_val64 i mem1 mem2;
Vale.Arch.MachineHeap.correct_update_get64 i v heap;
Vale.Arch.MachineHeap.frame_update_heap64 i v heap
in let aux2 () : Lemma (Set.equal (Map.domain mem1) (Map.domain mem2)) =
bytes_valid64 i h;
Vale.Arch.MachineHeap.same_domain_update64 i v heap
in aux(); aux2();
Map.lemma_equal_intro mem1 mem2
let low_lemma_load_mem64_full b i vfh t hid =
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
()
#push-options "--z3rlimit 20"
#restart-solver
let low_lemma_store_mem64 b i v h =
lemma_writeable_mem64 b i h;
lemma_store_mem64 b i v h;
valid_state_store_mem64_aux (buffer_addr b h + scale8 i) v h;
let heap = get_heap h in
let heap' = S.update_heap64 (buffer_addr b h + scale8 i) v heap in
low_lemma_store_mem64_aux b heap i v h;
Vale.Arch.MachineHeap.frame_update_heap64 (buffer_addr b h + scale8 i) v heap;
in_bounds64 h b i;
I.addrs_set_lemma_all ();
I.update_buffer_up_mem (_ih h) b heap heap'
#pop-options
#set-options "--z3rlimit 100"
#restart-solver
let lemma_is_full_update
(vfh:vale_full_heap) (h hk hk':vale_heap) (k:heaplet_id) (mh mh' mhk mhk':machine_heap) (mt mt':memtaint)
(t:base_typ) (b:buffer t) (ptr:int) (v_size:nat)
(index:nat) (v:base_typ_as_vale_type t) (tn:taint)
: Lemma
(requires
vfh.vf_layout.vl_inner.vl_heaplets_initialized /\
mem_inv vfh /\
buffer_readable hk b /\
buffer_writeable b /\
index < Seq.length (buffer_as_seq hk b) /\
mt == vfh.vf_layout.vl_taint /\
h == vfh.vf_heap /\
hk == Map16.sel vfh.vf_heaplets k /\
mh == h.mh /\
mhk == hk.mh /\
ptr == buffer_addr b hk + scale_by v_size index /\
mt' == S.update_n ptr v_size (heap_taint (coerce vfh)) tn /\
hk' == buffer_write b index v hk /\
valid_layout_buffer b vfh.vf_layout hk true /\
valid_taint_buf b hk mt tn /\
is_machine_heap_update mh mh' /\ upd_heap h mh' == buffer_write b index v h /\
is_machine_heap_update mhk mhk' /\ upd_heap hk mhk' == buffer_write b index v hk /\
(forall j.{:pattern mh.[j] \/ mh'.[j]} j < ptr \/ j >= ptr + v_size ==> mh.[j] == mh'.[j]) /\
(forall j.{:pattern mhk.[j] \/ mhk'.[j]} j < ptr \/ j >= ptr + v_size ==> mhk.[j] == mhk'.[j]) /\
0 <= scale_by v_size index /\ scale_by v_size index + v_size <= DV.length (get_downview b.bsrc) /\
(forall i.{:pattern mh'.[i] \/ mhk'.[i]} i >= ptr /\ i < ptr + v_size ==> mh'.[i] == mhk'.[i]) /\
True
)
(ensures is_full_update vfh hk' k mh' mt')
=
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
let vfh' = coerce (heap_upd (coerce vfh) mh' mt') in
let dom_upd = Set.intersect (vfh.vf_layout.vl_inner.vl_heaplet_sets k) (Map.domain mhk) in
let mhk'' = Map.concat mhk (Map.restrict dom_upd mh') in
assert (Map.equal mhk'' mhk');
let unchanged (j:heaplet_id) : Lemma
(requires j =!= k)
(ensures Map16.sel vfh'.vf_heaplets j == Map16.sel vfh.vf_heaplets j)
[SMTPat (Map16.sel vfh'.vf_heaplets j)]
=
assert (Map.equal (Map16.sel vfh'.vf_heaplets j).mh (Map16.sel vfh.vf_heaplets j).mh);
I.down_up_identity (Map16.sel vfh.vf_heaplets j).ih;
()
in
assert (Map16.equal vfh'.vf_heaplets (Map16.upd vfh.vf_heaplets k hk'));
assert (Map.equal mt' mt);
Vale.Interop.Heap_s.list_disjoint_or_eq_reveal ();
()
let low_lemma_store_mem64_full b i v vfh t hid =
let (h, mt, hk) = (vfh.vf_heap, vfh.vf_layout.vl_taint, Map16.get vfh.vf_heaplets hid) in
let ptr = buffer_addr b hk + scale8 i in
let mh' = S.update_heap64 ptr v (heap_get (coerce vfh)) in
let mt' = S.update_n ptr 8 (heap_taint (coerce vfh)) t in
let hk' = buffer_write b i v hk in
let mhk' = S.update_heap64 ptr v (get_heap hk) in
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
low_lemma_store_mem64 b i v h;
low_lemma_store_mem64 b i v (Map16.get vfh.vf_heaplets hid);
Vale.Arch.MachineHeap.frame_update_heap64 ptr v h.mh;
Vale.Arch.MachineHeap.frame_update_heap64 ptr v hk.mh;
in_bounds64 hk b i;
Vale.Arch.MachineHeap.same_mem_get_heap_val64 ptr mh' mhk';
lemma_is_full_update vfh h hk hk' hid h.mh mh' hk.mh mhk' mt mt' TUInt64 b ptr 8 i v t;
()
val low_lemma_valid_mem128 (b:buffer128) (i:nat) (h:vale_heap) : Lemma
(requires
i < Seq.length (buffer_as_seq h b) /\
buffer_readable h b
)
(ensures
S.valid_addr128 (buffer_addr b h + scale16 i) (get_heap h)
)
let low_lemma_valid_mem128 b i h =
lemma_valid_mem128 b i h;
bytes_valid128 (buffer_addr b h + scale16 i) h
val equiv_load_mem128_aux: (ptr:int) -> (h:vale_heap) -> Lemma
(requires valid_mem128 ptr h)
(ensures load_mem128 ptr h == S.get_heap_val128 ptr (get_heap h))
let equiv_load_mem128_aux ptr h =
let t = TUInt128 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let addr = buffer_addr b h in
let contents = DV.as_seq (_ih h).IB.hs (get_downview b.bsrc) in
let heap = get_heap h in
S.get_heap_val128_reveal ();
index128_get_heap_val128 h b heap i;
lemma_load_mem128 b i h
let equiv_load_mem128 ptr h =
equiv_load_mem128_aux ptr h
val low_lemma_load_mem128 (b:buffer128) (i:nat) (h:vale_heap) : Lemma
(requires
i < Seq.length (buffer_as_seq h b) /\
buffer_readable h b
)
(ensures
S.get_heap_val128 (buffer_addr b h + scale16 i) (get_heap h) == buffer_read b i h
) | false | false | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val low_lemma_load_mem128 (b:buffer128) (i:nat) (h:vale_heap) : Lemma
(requires
i < Seq.length (buffer_as_seq h b) /\
buffer_readable h b
)
(ensures
S.get_heap_val128 (buffer_addr b h + scale16 i) (get_heap h) == buffer_read b i h
) | [] | Vale.X64.Memory_Sems.low_lemma_load_mem128 | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | b: Vale.X64.Memory.buffer128 -> i: Prims.nat -> h: Vale.Arch.HeapImpl.vale_heap
-> FStar.Pervasives.Lemma
(requires
i < FStar.Seq.Base.length (Vale.X64.Memory.buffer_as_seq h b) /\
Vale.X64.Memory.buffer_readable h b)
(ensures
Vale.Arch.MachineHeap_s.get_heap_val128 (Vale.X64.Memory.buffer_addr b h +
Vale.X64.Memory.scale16 i)
(Vale.X64.Memory_Sems.get_heap h) ==
Vale.X64.Memory.buffer_read b i h) | {
"end_col": 55,
"end_line": 767,
"start_col": 2,
"start_line": 765
} |
FStar.Pervasives.Lemma | val lemma_loc_mutable_buffers (l: list buffer_info)
: Lemma
(ensures
(let s = list_to_seq l in
forall (i: nat). {:pattern Seq.index s i}
i < Seq.length s ==>
(let bi = Seq.index s i in
bi.bi_mutable == Mutable ==>
loc_includes (loc_mutable_buffers l) (loc_buffer bi.bi_buffer)))) | [
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let lemma_loc_mutable_buffers (l:list buffer_info) : Lemma
(ensures (
let s = list_to_seq l in
forall (i:nat).{:pattern Seq.index s i} i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes (loc_mutable_buffers l) (loc_buffer bi.bi_buffer))
))
=
lemma_list_to_seq l;
lemma_loc_mutable_buffers_rec l (list_to_seq l) 0 | val lemma_loc_mutable_buffers (l: list buffer_info)
: Lemma
(ensures
(let s = list_to_seq l in
forall (i: nat). {:pattern Seq.index s i}
i < Seq.length s ==>
(let bi = Seq.index s i in
bi.bi_mutable == Mutable ==>
loc_includes (loc_mutable_buffers l) (loc_buffer bi.bi_buffer))))
let lemma_loc_mutable_buffers (l: list buffer_info)
: Lemma
(ensures
(let s = list_to_seq l in
forall (i: nat). {:pattern Seq.index s i}
i < Seq.length s ==>
(let bi = Seq.index s i in
bi.bi_mutable == Mutable ==>
loc_includes (loc_mutable_buffers l) (loc_buffer bi.bi_buffer)))) = | false | null | true | lemma_list_to_seq l;
lemma_loc_mutable_buffers_rec l (list_to_seq l) 0 | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [
"lemma"
] | [
"Prims.list",
"Vale.Arch.HeapImpl.buffer_info",
"Vale.X64.Memory_Sems.lemma_loc_mutable_buffers_rec",
"Vale.Lib.Seqs.list_to_seq",
"Prims.unit",
"Vale.Lib.Seqs.lemma_list_to_seq",
"Prims.l_True",
"Prims.squash",
"Prims.l_Forall",
"Prims.nat",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_LessThan",
"FStar.Seq.Base.length",
"Prims.eq2",
"Vale.Arch.HeapImpl.mutability",
"Vale.Arch.HeapImpl.__proj__Mkbuffer_info__item__bi_mutable",
"Vale.Arch.HeapImpl.Mutable",
"Vale.X64.Memory.loc_includes",
"Vale.X64.Memory.loc_mutable_buffers",
"Vale.X64.Memory.loc_buffer",
"Vale.Arch.HeapImpl.__proj__Mkbuffer_info__item__bi_typ",
"Vale.Arch.HeapImpl.__proj__Mkbuffer_info__item__bi_buffer",
"FStar.Seq.Base.index",
"FStar.Seq.Base.seq",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base
let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m)
let lemma_same_domains h m1 m2 = ()
let get_heap h = I.down_mem (_ih h)
let upd_heap h m = mi_heap_upd h m
//let lemma_upd_get_heap h = I.down_up_identity (_ih h)
let lemma_get_upd_heap h m = I.up_down_identity (_ih h) m
let lemma_heap_impl = ()
let lemma_heap_get_heap h = ()
let lemma_heap_taint h = ()
//let lemma_heap_upd_heap h mh mt = ()
[@"opaque_to_smt"]
let rec set_of_range (a:int) (n:nat) : Pure (Set.set int)
(requires True)
(ensures fun s -> (forall (i:int).{:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n))
=
if n = 0 then Set.empty else Set.union (set_of_range a (n - 1)) (Set.singleton (a + n - 1))
let buffer_info_has_addr (bi:buffer_info) (a:int) =
let b = bi.bi_buffer in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
addr <= a /\ a < addr + len
let buffer_info_has_addr_opt (bi:option buffer_info) (a:int) =
match bi with
| None -> False
| Some bi -> buffer_info_has_addr bi a
#set-options "--z3rlimit 100"
let rec make_owns_rec (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
if n = 0 then ((fun _ -> None), (fun _ -> Set.empty)) else
let (m0, s0) = make_owns_rec h bs (n - 1) in
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let s i = if i = hi then Set.union (s0 i) s_b else s0 i in
let m a = if addr <= a && a < addr + len then Some (n - 1) else m0 a in
(m, s)
[@"opaque_to_smt"]
let make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
make_owns_rec h bs n
let rec lemma_make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n <= Seq.length bs /\
(forall (i:nat).{:pattern Seq.index bs i} i < Seq.length bs ==> buffer_readable h (Seq.index bs i).bi_buffer) /\
(forall (i1 i2:nat).{:pattern (Seq.index bs i1); (Seq.index bs i2)}
i1 < Seq.length bs /\ i2 < Seq.length bs ==> buffer_info_disjoint (Seq.index bs i1) (Seq.index bs i2))
)
(ensures (
let (m, s) = make_owns h bs n in
(forall (i:heaplet_id) (a:int).{:pattern Set.mem a (s i)}
(Set.mem a (s i) <==> Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (m a) == Some i) /\
(Set.mem a (s i) ==> buffer_info_has_addr_opt (Option.mapTot (fun n -> Seq.index bs n) (m a)) a) /\
(Set.mem a (s i) ==> Set.mem a (Map.domain h.mh))
) /\
(forall (k:nat) (a:int).{:pattern Set.mem a (s (Seq.index bs k).bi_heaplet)}
k < n /\ buffer_info_has_addr (Seq.index bs k) a ==> Set.mem a (s (Seq.index bs k).bi_heaplet))
))
=
reveal_opaque (`%make_owns) make_owns;
if n = 0 then () else
let _ = make_owns h bs (n - 1) in
let (m, s) = make_owns h bs n in
lemma_make_owns h bs (n - 1);
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let lem1 (a:int) : Lemma
(requires Set.mem a s_b)
(ensures Set.mem a (Map.domain h.mh))
[SMTPat (Set.mem a (Map.domain h.mh))]
=
I.addrs_set_mem h.ih b a
in
let lem2 (i:heaplet_id) (a:int) : Lemma
(requires i =!= hi /\ Set.mem a (Set.intersect s_b (s i)))
(ensures False)
[SMTPat (Set.mem a (s i))]
=
reveal_opaque (`%addr_map_pred) addr_map_pred
in
()
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 2 --max_ifuel 2"
let rec lemma_loc_mutable_buffers_rec (l:list buffer_info) (s:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n + List.length l == Seq.length s /\
list_to_seq_post l s n
)
(ensures (
let modloc = loc_mutable_buffers l in
forall (i:nat).{:pattern Seq.index s i} n <= i /\ i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes modloc (loc_buffer bi.bi_buffer))
))
(decreases l)
=
match l with
| [] -> ()
| h::t -> lemma_loc_mutable_buffers_rec t s (n + 1)
#pop-options
let lemma_loc_mutable_buffers (l:list buffer_info) : Lemma
(ensures (
let s = list_to_seq l in
forall (i:nat).{:pattern Seq.index s i} i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes (loc_mutable_buffers l) (loc_buffer bi.bi_buffer))
)) | false | false | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val lemma_loc_mutable_buffers (l: list buffer_info)
: Lemma
(ensures
(let s = list_to_seq l in
forall (i: nat). {:pattern Seq.index s i}
i < Seq.length s ==>
(let bi = Seq.index s i in
bi.bi_mutable == Mutable ==>
loc_includes (loc_mutable_buffers l) (loc_buffer bi.bi_buffer)))) | [] | Vale.X64.Memory_Sems.lemma_loc_mutable_buffers | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | l: Prims.list Vale.Arch.HeapImpl.buffer_info
-> FStar.Pervasives.Lemma
(ensures
(let s = Vale.Lib.Seqs.list_to_seq l in
forall (i: Prims.nat). {:pattern FStar.Seq.Base.index s i}
i < FStar.Seq.Base.length s ==>
(let bi = FStar.Seq.Base.index s i in
Mkbuffer_info?.bi_mutable bi == Vale.Arch.HeapImpl.Mutable ==>
Vale.X64.Memory.loc_includes (Vale.X64.Memory.loc_mutable_buffers l)
(Vale.X64.Memory.loc_buffer (Mkbuffer_info?.bi_buffer bi))))) | {
"end_col": 51,
"end_line": 154,
"start_col": 2,
"start_line": 153
} |
Prims.GTot | val upd_heap (h:vale_heap) (m:S.machine_heap{is_machine_heap_update (get_heap h) m}) : GTot vale_heap | [
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let upd_heap h m = mi_heap_upd h m | val upd_heap (h:vale_heap) (m:S.machine_heap{is_machine_heap_update (get_heap h) m}) : GTot vale_heap
let upd_heap h m = | false | null | false | mi_heap_upd h m | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [
"sometrivial"
] | [
"Vale.Arch.HeapImpl.vale_heap",
"Vale.Arch.MachineHeap_s.machine_heap",
"Vale.Arch.MachineHeap_s.is_machine_heap_update",
"Vale.X64.Memory_Sems.get_heap",
"Vale.Arch.HeapImpl.mi_heap_upd"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base
let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m)
let lemma_same_domains h m1 m2 = ()
let get_heap h = I.down_mem (_ih h) | false | false | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val upd_heap (h:vale_heap) (m:S.machine_heap{is_machine_heap_update (get_heap h) m}) : GTot vale_heap | [] | Vale.X64.Memory_Sems.upd_heap | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
h: Vale.Arch.HeapImpl.vale_heap ->
m:
Vale.Arch.MachineHeap_s.machine_heap
{Vale.Arch.MachineHeap_s.is_machine_heap_update (Vale.X64.Memory_Sems.get_heap h) m}
-> Prims.GTot Vale.Arch.HeapImpl.vale_heap | {
"end_col": 34,
"end_line": 27,
"start_col": 19,
"start_line": 27
} |
FStar.Pervasives.Lemma | val frame_get_heap32 (ptr: int) (mem1 mem2: S.machine_heap)
: Lemma (requires (forall i. i >= ptr /\ i < ptr + 4 ==> mem1.[ i ] == mem2.[ i ]))
(ensures S.get_heap_val32 ptr mem1 == S.get_heap_val32 ptr mem2) | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Two_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.BufferViewStore",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let frame_get_heap32 (ptr:int) (mem1 mem2:S.machine_heap) : Lemma
(requires (forall i. i >= ptr /\ i < ptr + 4 ==> mem1.[i] == mem2.[i]))
(ensures S.get_heap_val32 ptr mem1 == S.get_heap_val32 ptr mem2) =
S.get_heap_val32_reveal () | val frame_get_heap32 (ptr: int) (mem1 mem2: S.machine_heap)
: Lemma (requires (forall i. i >= ptr /\ i < ptr + 4 ==> mem1.[ i ] == mem2.[ i ]))
(ensures S.get_heap_val32 ptr mem1 == S.get_heap_val32 ptr mem2)
let frame_get_heap32 (ptr: int) (mem1 mem2: S.machine_heap)
: Lemma (requires (forall i. i >= ptr /\ i < ptr + 4 ==> mem1.[ i ] == mem2.[ i ]))
(ensures S.get_heap_val32 ptr mem1 == S.get_heap_val32 ptr mem2) = | false | null | true | S.get_heap_val32_reveal () | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [
"lemma"
] | [
"Prims.int",
"Vale.Arch.MachineHeap_s.machine_heap",
"Vale.Arch.MachineHeap_s.get_heap_val32_reveal",
"Prims.unit",
"Prims.l_Forall",
"Prims.l_imp",
"Prims.l_and",
"Prims.b2t",
"Prims.op_GreaterThanOrEqual",
"Prims.op_LessThan",
"Prims.op_Addition",
"Prims.eq2",
"Vale.Def.Types_s.nat8",
"Vale.X64.Memory.op_String_Access",
"Prims.squash",
"Vale.Def.Types_s.nat32",
"Vale.Arch.MachineHeap_s.get_heap_val32",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base
let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m)
let lemma_same_domains h m1 m2 = ()
let get_heap h = I.down_mem (_ih h)
let upd_heap h m = mi_heap_upd h m
//let lemma_upd_get_heap h = I.down_up_identity (_ih h)
let lemma_get_upd_heap h m = I.up_down_identity (_ih h) m
let lemma_heap_impl = ()
let lemma_heap_get_heap h = ()
let lemma_heap_taint h = ()
//let lemma_heap_upd_heap h mh mt = ()
[@"opaque_to_smt"]
let rec set_of_range (a:int) (n:nat) : Pure (Set.set int)
(requires True)
(ensures fun s -> (forall (i:int).{:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n))
=
if n = 0 then Set.empty else Set.union (set_of_range a (n - 1)) (Set.singleton (a + n - 1))
let buffer_info_has_addr (bi:buffer_info) (a:int) =
let b = bi.bi_buffer in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
addr <= a /\ a < addr + len
let buffer_info_has_addr_opt (bi:option buffer_info) (a:int) =
match bi with
| None -> False
| Some bi -> buffer_info_has_addr bi a
#set-options "--z3rlimit 100"
let rec make_owns_rec (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
if n = 0 then ((fun _ -> None), (fun _ -> Set.empty)) else
let (m0, s0) = make_owns_rec h bs (n - 1) in
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let s i = if i = hi then Set.union (s0 i) s_b else s0 i in
let m a = if addr <= a && a < addr + len then Some (n - 1) else m0 a in
(m, s)
[@"opaque_to_smt"]
let make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
make_owns_rec h bs n
let rec lemma_make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n <= Seq.length bs /\
(forall (i:nat).{:pattern Seq.index bs i} i < Seq.length bs ==> buffer_readable h (Seq.index bs i).bi_buffer) /\
(forall (i1 i2:nat).{:pattern (Seq.index bs i1); (Seq.index bs i2)}
i1 < Seq.length bs /\ i2 < Seq.length bs ==> buffer_info_disjoint (Seq.index bs i1) (Seq.index bs i2))
)
(ensures (
let (m, s) = make_owns h bs n in
(forall (i:heaplet_id) (a:int).{:pattern Set.mem a (s i)}
(Set.mem a (s i) <==> Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (m a) == Some i) /\
(Set.mem a (s i) ==> buffer_info_has_addr_opt (Option.mapTot (fun n -> Seq.index bs n) (m a)) a) /\
(Set.mem a (s i) ==> Set.mem a (Map.domain h.mh))
) /\
(forall (k:nat) (a:int).{:pattern Set.mem a (s (Seq.index bs k).bi_heaplet)}
k < n /\ buffer_info_has_addr (Seq.index bs k) a ==> Set.mem a (s (Seq.index bs k).bi_heaplet))
))
=
reveal_opaque (`%make_owns) make_owns;
if n = 0 then () else
let _ = make_owns h bs (n - 1) in
let (m, s) = make_owns h bs n in
lemma_make_owns h bs (n - 1);
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let lem1 (a:int) : Lemma
(requires Set.mem a s_b)
(ensures Set.mem a (Map.domain h.mh))
[SMTPat (Set.mem a (Map.domain h.mh))]
=
I.addrs_set_mem h.ih b a
in
let lem2 (i:heaplet_id) (a:int) : Lemma
(requires i =!= hi /\ Set.mem a (Set.intersect s_b (s i)))
(ensures False)
[SMTPat (Set.mem a (s i))]
=
reveal_opaque (`%addr_map_pred) addr_map_pred
in
()
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 2 --max_ifuel 2"
let rec lemma_loc_mutable_buffers_rec (l:list buffer_info) (s:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n + List.length l == Seq.length s /\
list_to_seq_post l s n
)
(ensures (
let modloc = loc_mutable_buffers l in
forall (i:nat).{:pattern Seq.index s i} n <= i /\ i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes modloc (loc_buffer bi.bi_buffer))
))
(decreases l)
=
match l with
| [] -> ()
| h::t -> lemma_loc_mutable_buffers_rec t s (n + 1)
#pop-options
let lemma_loc_mutable_buffers (l:list buffer_info) : Lemma
(ensures (
let s = list_to_seq l in
forall (i:nat).{:pattern Seq.index s i} i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes (loc_mutable_buffers l) (loc_buffer bi.bi_buffer))
))
=
lemma_list_to_seq l;
lemma_loc_mutable_buffers_rec l (list_to_seq l) 0
let create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let modloc = loc_mutable_buffers buffers in
let layout1 = h1.vf_layout in
let layin1 = layout1.vl_inner in
let (hmap, hsets) = make_owns h1.vf_heap bs (Seq.length bs) in
let hmap a = Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (hmap a) in
let l = {
vl_heaplets_initialized = true;
vl_heaplet_map = hmap;
vl_heaplet_sets = hsets;
vl_old_heap = h1.vf_heap;
vl_buffers = bs;
vl_mod_loc = modloc;
} in
let layout2 = {layout1 with vl_inner = l} in
let h2 = {
vf_layout = layout2;
vf_heap = h1.vf_heap;
vf_heaplets = h1.vf_heaplets;
} in
h2
let lemma_create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let h2 = create_heaplets buffers h1 in
assert (h2.vf_layout.vl_inner.vl_buffers == bs); // REVIEW: why is this necessary, even with extra ifuel?
lemma_make_owns h1.vf_heap bs (Seq.length bs);
lemma_loc_mutable_buffers buffers;
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
()
let destroy_heaplets h1 =
h1
let lemma_destroy_heaplets h1 =
()
val heap_shift (m1 m2:S.machine_heap) (base:int) (n:nat) : Lemma
(requires (forall i. 0 <= i /\ i < n ==> m1.[base + i] == m2.[base + i]))
(ensures (forall i. {:pattern (m1.[i])} base <= i /\ i < base + n ==> m1.[i] == m2.[i]))
#push-options "--smtencoding.l_arith_repr boxwrap"
let heap_shift m1 m2 base n =
assert (forall i. base <= i /\ i < base + n ==>
m1.[base + (i - base)] == m2.[base + (i - base)])
#pop-options
val same_mem_eq_slices64 (b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1))})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (
k * 8 + 8 <= DV.length (get_downview b.bsrc) /\
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) (get_downview b.bsrc)) (k * 8) (k * 8 + 8) ==
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) (get_downview b.bsrc)) (k * 8) (k * 8 + 8)))
let same_mem_eq_slices64 b i v k h1 h2 mem1 mem2 =
let t = TUInt64 in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db (uint_view t) in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.length_eq ub
let length_up64 (b:buffer64) (h:vale_heap) (k:nat{k < buffer_length b}) (i:nat{i < 8}) : Lemma
(scale8 k + i <= DV.length (get_downview b.bsrc)) =
let vb = UV.mk_buffer (get_downview b.bsrc) uint64_view in
UV.length_eq vb
val same_mem_get_heap_val64 (b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1))})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (let ptr = buffer_addr b h1 + scale8 k in
forall (x:int).{:pattern (mem1.[x])} ptr <= x /\ x < ptr + 8 ==> mem1.[x] == mem2.[x]))
let same_mem_get_heap_val64 b j v k h1 h2 mem1 mem2 =
let ptr = buffer_addr b h1 + scale8 k in
let addr = buffer_addr b h1 in
let aux (x:int{ptr <= x /\ x < ptr + 8}) : Lemma (mem1.[x] == mem2.[x]) =
let i = x - ptr in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db uint64_view in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
same_mem_eq_slices64 b j v k h1 h2 mem1 mem2;
let s1 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8) (k * 8 + 8)) in
let s2 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8) (k * 8 + 8)) in
assert (Seq.index s1 i == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8 + i));
length_up64 b h1 k i;
assert (mem1.[addr+(scale8 k + i)] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8 + i)));
assert (Seq.index s2 i == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8 + i));
length_up64 b h2 k i;
assert (mem2.[addr+(scale8 k + i)] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8 + i)))
in
Classical.forall_intro aux;
assert (forall i. addr + (scale8 k + i) == ptr + i)
let rec written_buffer_down64_aux1
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
(base:nat{base == buffer_addr b h})
(k:nat) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j.{:pattern (mem1.[j]) \/ (mem2.[j])}
base <= j /\ j < base + k * 8 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem1.[j])}
j >= base /\ j < base + scale8 i ==>
mem1.[j] == mem2.[j]))
(decreases %[i-k]) =
if k >= i then ()
else begin
let ptr = base + scale8 k in
same_mem_get_heap_val64 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 8;
written_buffer_down64_aux1 b i v h base (k+1) h1 mem1 mem2
end
let rec written_buffer_down64_aux2
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
(base:nat{base == buffer_addr b h})
(n:nat{n == buffer_length b})
(k:nat{k > i}) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
base + scale8 (i+1) <= j /\ j < base + k * 8 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
j >= base + scale8 (i+1) /\ j < base + scale8 n ==>
mem1.[j] == mem2.[j]))
(decreases %[n-k]) =
if k >= n then ()
else begin
let ptr = base + scale8 k in
same_mem_get_heap_val64 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 8;
written_buffer_down64_aux2 b i v h base n (k+1) h1 mem1 mem2
end
let written_buffer_down64 (b:buffer64{buffer_writeable b}) (i:nat{i < buffer_length b}) (v:nat64) (h:vale_heap)
: Lemma
(requires List.memP b (IB.ptrs_of_mem (_ih h)))
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
(base <= j /\ j < base + scale8 i) \/
(base + scale8 (i+1) <= j /\ j < base + scale8 n) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
written_buffer_down64_aux1 b i v h base 0 h1 mem1 mem2;
written_buffer_down64_aux2 b i v h base n (i+1) h1 mem1 mem2
let unwritten_buffer_down (t:base_typ) (b:buffer t{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:base_typ_as_vale_type t)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
forall (a:b8{List.memP a (IB.ptrs_of_mem (_ih h)) /\ a =!= b}) j. {:pattern mem1.[j]; List.memP a (IB.ptrs_of_mem (_ih h)) \/ mem2.[j]; List.memP a (IB.ptrs_of_mem (_ih h))}
let base = (IB.addrs_of_mem (_ih h)) a in
j >= base /\ j < base + DV.length (get_downview a.bsrc) ==> mem1.[j] == mem2.[j]))
= let aux (a:b8{a =!= b /\ List.memP a (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let base = (IB.addrs_of_mem (_ih h)) a in
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
forall j.
j >= base /\ j < base + DV.length (get_downview a.bsrc) ==>
mem1.[j] == mem2.[j]))
= let db = get_downview a.bsrc in
if DV.length db = 0 then ()
else
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = (IB.addrs_of_mem (_ih h)) a in
let s0 = DV.as_seq (IB.hs_of_mem (_ih h)) db in
let s1 = DV.as_seq (IB.hs_of_mem (_ih h1)) db in
opaque_assert (`%IB.list_disjoint_or_eq) IB.list_disjoint_or_eq IB.list_disjoint_or_eq_def (MB.disjoint a.bsrc b.bsrc);
lemma_dv_equal (IB.down_view a.src) a.bsrc (IB.hs_of_mem (_ih h)) (IB.hs_of_mem (_ih h1));
assert (Seq.equal s0 s1);
assert (forall (j:int).{:pattern (mem1.[j])}
base <= j /\ j < base + Seq.length s0 ==> v_to_typ TUInt8 (Seq.index s0 (j - base)) == mem1.[j]);
heap_shift mem1 mem2 base (DV.length db)
in
Classical.forall_intro aux
let store_buffer_down64_mem
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
forall (j:int). {:pattern mem1.[j] \/ mem2.[j]}
j < base + scale8 i \/ j >= base + scale8 (i+1) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
let aux (j:int)
: Lemma
(j < base + scale8 i \/ j >= base + scale8 (i+1) ==>
mem1.[j] == mem2.[j])
=
I.addrs_set_lemma_all ();
if j >= base && j < base + DV.length (get_downview b.bsrc) then begin
written_buffer_down64 b i v h;
length_t_eq (TUInt64) b
end
else if not (I.valid_addr (_ih h) j)
then I.same_unspecified_down (IB.hs_of_mem (_ih h)) (IB.hs_of_mem (_ih h1)) (IB.ptrs_of_mem (_ih h))
else unwritten_buffer_down TUInt64 b i v h
in
Classical.forall_intro aux
let store_buffer_aux_down64_mem (ptr:int) (v:nat64) (h:vale_heap{writeable_mem64 ptr h})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = store_mem (TUInt64) ptr v h in
let mem2 = I.down_mem (_ih h1) in
forall j. {:pattern mem1.[j] \/ mem2.[j]}
j < ptr \/ j >= ptr + 8 ==>
mem1.[j] == mem2.[j]))
= let t = TUInt64 in
let h1 = store_mem t ptr v h in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
store_buffer_write t ptr v h;
assert (buffer_addr b h + scale8 i == ptr);
assert (buffer_addr b h + scale8 (i+1) == ptr + 8);
store_buffer_down64_mem b i v h
let store_buffer_aux_down64_mem2 (ptr:int) (v:nat64) (h:vale_heap{writeable_mem64 ptr h})
: Lemma
(ensures (
let h1 = store_mem (TUInt64) ptr v h in
let mem2 = I.down_mem (_ih h1) in
S.get_heap_val64 ptr mem2 == v))
= let t = TUInt64 in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let h1 = store_mem t ptr v h in
let mem2 = I.down_mem (_ih h1) in
store_buffer_write t ptr v h;
assert (Seq.index (buffer_as_seq h1 b) i == v);
index64_get_heap_val64 h1 b mem2 i
let in_bounds64 (h:vale_heap) (b:buffer64) (i:nat{i < buffer_length b})
: Lemma (scale8 i + 8 <= DV.length (get_downview b.bsrc))
=
length_t_eq TUInt64 b
let bytes_valid64 ptr h =
reveal_opaque (`%S.valid_addr64) S.valid_addr64;
let t = TUInt64 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
in_bounds64 h b i;
I.addrs_set_mem (_ih h) b ptr;
I.addrs_set_mem (_ih h) b (ptr+1);
I.addrs_set_mem (_ih h) b (ptr+2);
I.addrs_set_mem (_ih h) b (ptr+3);
I.addrs_set_mem (_ih h) b (ptr+4);
I.addrs_set_mem (_ih h) b (ptr+5);
I.addrs_set_mem (_ih h) b (ptr+6);
I.addrs_set_mem (_ih h) b (ptr+7)
val same_mem_get_heap_val128 (b:buffer128)
(i:nat{i < buffer_length b})
(v:quad32)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1)) /\ buffer_writeable b})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (let ptr = buffer_addr b h1 + scale16 k in
forall i.{:pattern mem1.[i]} i >= ptr /\ i < ptr+16 ==> mem1.[i] == mem2.[i]))
val same_mem_eq_slices128 (b:buffer128)
(i:nat{i < buffer_length b})
(v:quad32)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1)) /\ buffer_writeable b})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (
k * 16 + 16 <= DV.length (get_downview b.bsrc) /\
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) (get_downview b.bsrc)) (k * 16) (k * 16 + 16) ==
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) (get_downview b.bsrc)) (k * 16) (k * 16 + 16)))
let same_mem_eq_slices128 b i v k h1 h2 mem1 mem2 =
let t = TUInt128 in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db (uint_view t) in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.length_eq ub
let length_up128 (b:buffer128) (h:vale_heap) (k:nat{k < buffer_length b}) (i:nat{i < 16}) : Lemma
(scale16 k + i <= DV.length (get_downview b.bsrc)) =
let vb = UV.mk_buffer (get_downview b.bsrc) uint128_view in
UV.length_eq vb
let same_mem_get_heap_val128 b j v k h1 h2 mem1 mem2 =
let ptr = buffer_addr b h1 + scale16 k in
let addr = buffer_addr b h1 in
let aux (i:nat{ptr <= i /\ i < ptr+16}) : Lemma (mem1.[i] == mem2.[i]) =
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db uint128_view in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
same_mem_eq_slices128 b j v k h1 h2 mem1 mem2;
let s1 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16) (k * 16 + 16)) in
let s2 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16) (k * 16 + 16)) in
assert (Seq.index s1 (i - ptr) == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16 + (i-ptr)));
length_up128 b h1 k (i-ptr);
assert (mem1.[i] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16 + (i-ptr))));
assert (Seq.index s2 (i-ptr) == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16 + (i-ptr)));
length_up128 b h2 k (i-ptr);
assert (mem2.[addr+(scale16 k + (i-ptr))] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16 + (i-ptr))));
assert (forall i. addr + (scale16 k + (i-ptr)) == i)
in
Classical.forall_intro aux
let in_bounds128 (h:vale_heap) (b:buffer128) (i:nat{i < buffer_length b}) : Lemma
(scale16 i + 16 <= DV.length (get_downview b.bsrc))
=
length_t_eq TUInt128 b
#push-options "--z3rlimit 20"
#restart-solver
let bytes_valid128 ptr h =
reveal_opaque (`%S.valid_addr128) S.valid_addr128;
IB.list_disjoint_or_eq_reveal ();
let t = TUInt128 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
in_bounds128 h b i;
I.addrs_set_mem (_ih h) b ptr;
I.addrs_set_mem (_ih h) b (ptr+1);
I.addrs_set_mem (_ih h) b (ptr+2);
I.addrs_set_mem (_ih h) b (ptr+3);
I.addrs_set_mem (_ih h) b (ptr+4);
I.addrs_set_mem (_ih h) b (ptr+5);
I.addrs_set_mem (_ih h) b (ptr+6);
I.addrs_set_mem (_ih h) b (ptr+7);
I.addrs_set_mem (_ih h) b (ptr+8);
I.addrs_set_mem (_ih h) b (ptr+9);
I.addrs_set_mem (_ih h) b (ptr+10);
I.addrs_set_mem (_ih h) b (ptr+11);
I.addrs_set_mem (_ih h) b (ptr+12);
I.addrs_set_mem (_ih h) b (ptr+13);
I.addrs_set_mem (_ih h) b (ptr+14);
I.addrs_set_mem (_ih h) b (ptr+15)
#pop-options
let equiv_load_mem64 ptr h =
let t = TUInt64 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let addr = buffer_addr b h in
let contents = DV.as_seq (_ih h).IB.hs (get_downview b.bsrc) in
let heap = get_heap h in
index64_get_heap_val64 h b heap i;
lemma_load_mem64 b i h
//let low_lemma_valid_mem64 b i h =
// lemma_valid_mem64 b i h;
// bytes_valid64 (buffer_addr b h + scale8 i) h
//let low_lemma_load_mem64 b i h =
// lemma_valid_mem64 b i h;
// lemma_load_mem64 b i h;
// equiv_load_mem64 (buffer_addr b h + scale8 i) h
//let same_domain_update64 b i v h =
// low_lemma_valid_mem64 b i h;
// Vale.Arch.MachineHeap.same_domain_update64 (buffer_addr b h + scale8 i) v (get_heap h)
open Vale.X64.BufferViewStore
let low_lemma_store_mem64_aux
(b:buffer64)
(heap:S.machine_heap)
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{buffer_readable h b /\ buffer_writeable b})
: Lemma
(requires IB.correct_down_p (_ih h) heap b)
(ensures (let heap' = S.update_heap64 (buffer_addr b h + scale8 i) v heap in
let h' = store_mem64 (buffer_addr b h + scale8 i) v h in
(_ih h').IB.hs == DV.upd_seq (_ih h).IB.hs (get_downview b.bsrc) (I.get_seq_heap heap' (_ih h).IB.addrs b))) =
let ptr = buffer_addr b h + scale8 i in
let heap' = S.update_heap64 ptr v heap in
let h' = store_mem64 ptr v h in
lemma_store_mem64 b i v h;
length_t_eq TUInt64 b;
bv_upd_update_heap64 b heap i v (_ih h);
let db = get_downview b.bsrc in
let bv = UV.mk_buffer db Vale.Interop.Views.up_view64 in
assert (UV.upd (_ih h).IB.hs bv i (UInt64.uint_to_t v) == (_ih h').IB.hs)
val valid_state_store_mem64_aux: (i:nat) -> (v:nat64) -> (h:vale_heap) -> Lemma
(requires writeable_mem64 i h)
(ensures (
let heap = get_heap h in
let heap' = S.update_heap64 i v heap in
let h' = store_mem64 i v h in
heap' == I.down_mem (_ih h')
))
let valid_state_store_mem64_aux i v h =
let heap = get_heap h in
let heap' = S.update_heap64 i v heap in
let h1 = store_mem TUInt64 i v h in
store_buffer_aux_down64_mem i v h;
store_buffer_aux_down64_mem2 i v h;
let mem1 = heap' in
let mem2 = I.down_mem (_ih h1) in
let aux () : Lemma (forall j. mem1.[j] == mem2.[j]) =
Vale.Arch.MachineHeap.same_mem_get_heap_val64 i mem1 mem2;
Vale.Arch.MachineHeap.correct_update_get64 i v heap;
Vale.Arch.MachineHeap.frame_update_heap64 i v heap
in let aux2 () : Lemma (Set.equal (Map.domain mem1) (Map.domain mem2)) =
bytes_valid64 i h;
Vale.Arch.MachineHeap.same_domain_update64 i v heap
in aux(); aux2();
Map.lemma_equal_intro mem1 mem2
let low_lemma_load_mem64_full b i vfh t hid =
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
()
#push-options "--z3rlimit 20"
#restart-solver
let low_lemma_store_mem64 b i v h =
lemma_writeable_mem64 b i h;
lemma_store_mem64 b i v h;
valid_state_store_mem64_aux (buffer_addr b h + scale8 i) v h;
let heap = get_heap h in
let heap' = S.update_heap64 (buffer_addr b h + scale8 i) v heap in
low_lemma_store_mem64_aux b heap i v h;
Vale.Arch.MachineHeap.frame_update_heap64 (buffer_addr b h + scale8 i) v heap;
in_bounds64 h b i;
I.addrs_set_lemma_all ();
I.update_buffer_up_mem (_ih h) b heap heap'
#pop-options
#set-options "--z3rlimit 100"
#restart-solver
let lemma_is_full_update
(vfh:vale_full_heap) (h hk hk':vale_heap) (k:heaplet_id) (mh mh' mhk mhk':machine_heap) (mt mt':memtaint)
(t:base_typ) (b:buffer t) (ptr:int) (v_size:nat)
(index:nat) (v:base_typ_as_vale_type t) (tn:taint)
: Lemma
(requires
vfh.vf_layout.vl_inner.vl_heaplets_initialized /\
mem_inv vfh /\
buffer_readable hk b /\
buffer_writeable b /\
index < Seq.length (buffer_as_seq hk b) /\
mt == vfh.vf_layout.vl_taint /\
h == vfh.vf_heap /\
hk == Map16.sel vfh.vf_heaplets k /\
mh == h.mh /\
mhk == hk.mh /\
ptr == buffer_addr b hk + scale_by v_size index /\
mt' == S.update_n ptr v_size (heap_taint (coerce vfh)) tn /\
hk' == buffer_write b index v hk /\
valid_layout_buffer b vfh.vf_layout hk true /\
valid_taint_buf b hk mt tn /\
is_machine_heap_update mh mh' /\ upd_heap h mh' == buffer_write b index v h /\
is_machine_heap_update mhk mhk' /\ upd_heap hk mhk' == buffer_write b index v hk /\
(forall j.{:pattern mh.[j] \/ mh'.[j]} j < ptr \/ j >= ptr + v_size ==> mh.[j] == mh'.[j]) /\
(forall j.{:pattern mhk.[j] \/ mhk'.[j]} j < ptr \/ j >= ptr + v_size ==> mhk.[j] == mhk'.[j]) /\
0 <= scale_by v_size index /\ scale_by v_size index + v_size <= DV.length (get_downview b.bsrc) /\
(forall i.{:pattern mh'.[i] \/ mhk'.[i]} i >= ptr /\ i < ptr + v_size ==> mh'.[i] == mhk'.[i]) /\
True
)
(ensures is_full_update vfh hk' k mh' mt')
=
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
let vfh' = coerce (heap_upd (coerce vfh) mh' mt') in
let dom_upd = Set.intersect (vfh.vf_layout.vl_inner.vl_heaplet_sets k) (Map.domain mhk) in
let mhk'' = Map.concat mhk (Map.restrict dom_upd mh') in
assert (Map.equal mhk'' mhk');
let unchanged (j:heaplet_id) : Lemma
(requires j =!= k)
(ensures Map16.sel vfh'.vf_heaplets j == Map16.sel vfh.vf_heaplets j)
[SMTPat (Map16.sel vfh'.vf_heaplets j)]
=
assert (Map.equal (Map16.sel vfh'.vf_heaplets j).mh (Map16.sel vfh.vf_heaplets j).mh);
I.down_up_identity (Map16.sel vfh.vf_heaplets j).ih;
()
in
assert (Map16.equal vfh'.vf_heaplets (Map16.upd vfh.vf_heaplets k hk'));
assert (Map.equal mt' mt);
Vale.Interop.Heap_s.list_disjoint_or_eq_reveal ();
()
let low_lemma_store_mem64_full b i v vfh t hid =
let (h, mt, hk) = (vfh.vf_heap, vfh.vf_layout.vl_taint, Map16.get vfh.vf_heaplets hid) in
let ptr = buffer_addr b hk + scale8 i in
let mh' = S.update_heap64 ptr v (heap_get (coerce vfh)) in
let mt' = S.update_n ptr 8 (heap_taint (coerce vfh)) t in
let hk' = buffer_write b i v hk in
let mhk' = S.update_heap64 ptr v (get_heap hk) in
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
low_lemma_store_mem64 b i v h;
low_lemma_store_mem64 b i v (Map16.get vfh.vf_heaplets hid);
Vale.Arch.MachineHeap.frame_update_heap64 ptr v h.mh;
Vale.Arch.MachineHeap.frame_update_heap64 ptr v hk.mh;
in_bounds64 hk b i;
Vale.Arch.MachineHeap.same_mem_get_heap_val64 ptr mh' mhk';
lemma_is_full_update vfh h hk hk' hid h.mh mh' hk.mh mhk' mt mt' TUInt64 b ptr 8 i v t;
()
val low_lemma_valid_mem128 (b:buffer128) (i:nat) (h:vale_heap) : Lemma
(requires
i < Seq.length (buffer_as_seq h b) /\
buffer_readable h b
)
(ensures
S.valid_addr128 (buffer_addr b h + scale16 i) (get_heap h)
)
let low_lemma_valid_mem128 b i h =
lemma_valid_mem128 b i h;
bytes_valid128 (buffer_addr b h + scale16 i) h
val equiv_load_mem128_aux: (ptr:int) -> (h:vale_heap) -> Lemma
(requires valid_mem128 ptr h)
(ensures load_mem128 ptr h == S.get_heap_val128 ptr (get_heap h))
let equiv_load_mem128_aux ptr h =
let t = TUInt128 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let addr = buffer_addr b h in
let contents = DV.as_seq (_ih h).IB.hs (get_downview b.bsrc) in
let heap = get_heap h in
S.get_heap_val128_reveal ();
index128_get_heap_val128 h b heap i;
lemma_load_mem128 b i h
let equiv_load_mem128 ptr h =
equiv_load_mem128_aux ptr h
val low_lemma_load_mem128 (b:buffer128) (i:nat) (h:vale_heap) : Lemma
(requires
i < Seq.length (buffer_as_seq h b) /\
buffer_readable h b
)
(ensures
S.get_heap_val128 (buffer_addr b h + scale16 i) (get_heap h) == buffer_read b i h
)
let low_lemma_load_mem128 b i h =
lemma_valid_mem128 b i h;
lemma_load_mem128 b i h;
equiv_load_mem128_aux (buffer_addr b h + scale16 i) h
//let same_domain_update128 b i v h =
// low_lemma_valid_mem128 b i h;
// Vale.Arch.MachineHeap.same_domain_update128 (buffer_addr b h + scale16 i) v (get_heap h)
let low_lemma_store_mem128_aux
(b:buffer128)
(heap:S.machine_heap)
(i:nat{i < buffer_length b})
(v:quad32)
(h:vale_heap{buffer_readable h b /\ buffer_writeable b})
: Lemma
(requires IB.correct_down_p (_ih h) heap b)
(ensures (let heap' = S.update_heap128 (buffer_addr b h + scale16 i) v heap in
let h' = store_mem128 (buffer_addr b h + scale16 i) v h in
(_ih h').IB.hs == DV.upd_seq (_ih h).IB.hs (get_downview b.bsrc) (I.get_seq_heap heap' (_ih h).IB.addrs b))) =
let ptr = buffer_addr b h + scale16 i in
let heap' = S.update_heap128 ptr v heap in
let h' = store_mem128 ptr v h in
lemma_store_mem128 b i v h;
length_t_eq TUInt128 b;
bv_upd_update_heap128 b heap i v (_ih h);
let db = get_downview b.bsrc in
let bv = UV.mk_buffer db Vale.Interop.Views.up_view128 in
assert (UV.upd (_ih h).IB.hs bv i v == (_ih h').IB.hs)
val valid_state_store_mem128_aux (i:int) (v:quad32) (h:vale_heap) : Lemma
(requires writeable_mem128 i h)
(ensures (
let heap = get_heap h in
let heap' = S.update_heap128 i v heap in
let h' = store_mem128 i v h in
heap' == I.down_mem (_ih h')
))
#restart-solver
let rec written_buffer_down128_aux1
(b:buffer128{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:quad32)
(h:vale_heap{List.memP b (_ih h).IB.ptrs})
(base:nat{base == buffer_addr b h})
(k:nat) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j.{:pattern (mem1.[j]) \/ (mem2.[j])}
base <= j /\ j < base + k * 16 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem1.[j])}
j >= base /\ j < base + scale16 i ==>
mem1.[j] == mem2.[j]))
(decreases %[i-k]) =
if k >= i then ()
else begin
let ptr = base + scale16 k in
same_mem_get_heap_val128 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 16;
written_buffer_down128_aux1 b i v h base (k+1) h1 mem1 mem2
end
#restart-solver
let rec written_buffer_down128_aux2
(b:buffer128{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:quad32)
(h:vale_heap{List.memP b (_ih h).IB.ptrs})
(base:nat{base == buffer_addr b h})
(n:nat{n == buffer_length b})
(k:nat{k > i}) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
base + scale16 (i+1) <= j /\ j < base + k * 16 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
j >= base + scale16 (i+1) /\ j < base + scale16 n ==>
mem1.[j] == mem2.[j]))
(decreases %[n-k]) =
if k >= n then ()
else begin
let ptr = base + scale16 k in
same_mem_get_heap_val128 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 16;
written_buffer_down128_aux2 b i v h base n (k+1) h1 mem1 mem2
end
let written_buffer_down128 (b:buffer128) (i:nat{i < buffer_length b}) (v:quad32) (h:vale_heap)
: Lemma
(requires List.memP b (_ih h).IB.ptrs /\ buffer_writeable b)
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
(base <= j /\ j < base + scale16 i) \/
(base + scale16 (i+1) <= j /\ j < base + scale16 n) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
written_buffer_down128_aux1 b i v h base 0 h1 mem1 mem2;
written_buffer_down128_aux2 b i v h base n (i+1) h1 mem1 mem2
let store_buffer_down128_mem
(b:buffer128{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:quad32)
(h:vale_heap{List.memP b (_ih h).IB.ptrs})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
forall (j:int). {:pattern mem1.[j] \/ mem2.[j]}
j < base + scale16 i \/ j >= base + scale16 (i+1) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
let aux (j:int)
: Lemma
(j < base + scale16 i \/ j >= base + scale16 (i+1) ==>
mem1.[j] == mem2.[j])
=
I.addrs_set_lemma_all ();
if j >= base && j < base + DV.length (get_downview b.bsrc) then begin
written_buffer_down128 b i v h;
length_t_eq TUInt128 b
end
else if not (I.valid_addr (_ih h) j)
then I.same_unspecified_down (_ih h).IB.hs (_ih h1).IB.hs (_ih h).IB.ptrs
else unwritten_buffer_down TUInt128 b i v h
in
Classical.forall_intro aux
let store_buffer_aux_down128_mem (ptr:int) (v:quad32) (h:vale_heap{writeable_mem128 ptr h})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = store_mem TUInt128 ptr v h in
let mem2 = I.down_mem (_ih h1) in
forall j. {:pattern mem1.[j] \/ mem2.[j]}
j < ptr \/ j >= ptr + 16 ==>
mem1.[j] == mem2.[j]))
= let t = TUInt128 in
let h1 = store_mem t ptr v h in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
store_buffer_write t ptr v h;
assert (buffer_addr b h + scale16 i == ptr);
assert (buffer_addr b h + scale16 (i+1) == ptr + 16);
store_buffer_down128_mem b i v h
let store_buffer_aux_down128_mem2 (ptr:int) (v:quad32) (h:vale_heap{writeable_mem128 ptr h})
: Lemma
(ensures (
let h1 = store_mem TUInt128 ptr v h in
let mem2 = I.down_mem (_ih h1) in
Mkfour
(S.get_heap_val32 ptr mem2)
(S.get_heap_val32 (ptr+4) mem2)
(S.get_heap_val32 (ptr+8) mem2)
(S.get_heap_val32 (ptr+12) mem2)
== v)) =
let t = TUInt128 in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let h1 = store_mem t ptr v h in
let mem2 = I.down_mem (_ih h1) in
store_buffer_write t ptr v h;
assert (Seq.index (buffer_as_seq h1 b) i == v);
index128_get_heap_val128 h1 b mem2 i
let valid_state_store_mem128_aux i v h =
let heap = get_heap h in
let heap' = S.update_heap128 i v heap in
let h1 = store_mem TUInt128 i v h in
store_buffer_aux_down128_mem i v h;
store_buffer_aux_down128_mem2 i v h;
let mem1 = heap' in
let mem2 = I.down_mem (_ih h1) in
let aux () : Lemma (forall j. mem1.[j] == mem2.[j]) =
Vale.Arch.MachineHeap.correct_update_get128 i v heap;
Vale.X64.Machine_Semantics_s.get_heap_val128_reveal ();
Vale.Arch.MachineHeap.same_mem_get_heap_val32 i mem1 mem2;
Vale.Arch.MachineHeap.same_mem_get_heap_val32 (i+4) mem1 mem2;
Vale.Arch.MachineHeap.same_mem_get_heap_val32 (i+8) mem1 mem2;
Vale.Arch.MachineHeap.same_mem_get_heap_val32 (i+12) mem1 mem2;
Vale.Arch.MachineHeap.frame_update_heap128 i v heap
in
let aux2 () : Lemma (Set.equal (Map.domain mem1) (Map.domain mem2)) =
bytes_valid128 i h;
Vale.Arch.MachineHeap.same_domain_update128 i v heap
in aux (); aux2 ();
Map.lemma_equal_intro mem1 mem2
let low_lemma_load_mem128_full b i vfh t hid =
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
()
let low_lemma_store_mem128 b i v h =
lemma_valid_mem128 b i h;
lemma_store_mem128 b i v h;
valid_state_store_mem128_aux (buffer_addr b h + scale16 i) v h;
let heap = get_heap h in
let heap' = S.update_heap128 (buffer_addr b h + scale16 i) v heap in
let h' = store_mem128 (buffer_addr b h + scale16 i) v h in
low_lemma_store_mem128_aux b heap i v h;
Vale.Arch.MachineHeap.frame_update_heap128 (buffer_addr b h + scale16 i) v heap;
in_bounds128 h b i;
I.addrs_set_lemma_all ();
I.update_buffer_up_mem (_ih h) b heap heap'
let low_lemma_store_mem128_full b i v vfh t hid =
let (h, mt, hk) = (vfh.vf_heap, vfh.vf_layout.vl_taint, Map16.get vfh.vf_heaplets hid) in
let ptr = buffer_addr b hk + scale16 i in
let mh' = S.update_heap128 ptr v (heap_get (coerce vfh)) in
let mt' = S.update_n ptr 16 (heap_taint (coerce vfh)) t in
let hk' = buffer_write b i v hk in
let mhk' = S.update_heap128 ptr v (get_heap hk) in
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
low_lemma_store_mem128 b i v h;
low_lemma_store_mem128 b i v (Map16.get vfh.vf_heaplets hid);
Vale.Arch.MachineHeap.frame_update_heap128 ptr v h.mh;
Vale.Arch.MachineHeap.frame_update_heap128 ptr v hk.mh;
in_bounds128 hk b i;
Vale.Arch.MachineHeap.same_mem_get_heap_val128 ptr mh' mhk';
lemma_is_full_update vfh h hk hk' hid h.mh mh' hk.mh mhk' mt mt' TUInt128 b ptr 16 i v t;
()
#push-options "--smtencoding.l_arith_repr boxwrap"
let low_lemma_valid_mem128_64 b i h =
reveal_opaque (`%S.valid_addr64) S.valid_addr64;
reveal_opaque (`%S.valid_addr128) S.valid_addr128;
low_lemma_valid_mem128 b i h;
let ptr = buffer_addr b h + scale16 i in
assert (buffer_addr b h + scale16 i + 8 = ptr + 8)
#pop-options
open Vale.Def.Words.Two_s
open Vale.Def.Words.Four_s
let low_lemma_load_mem128_lo64 b i h =
low_lemma_load_mem128 b i h;
lo64_reveal ();
S.get_heap_val128_reveal ();
S.get_heap_val64_reveal ();
S.get_heap_val32_reveal ()
let low_lemma_load_mem128_hi64 b i h =
low_lemma_load_mem128 b i h;
hi64_reveal ();
S.get_heap_val128_reveal ();
S.get_heap_val64_reveal ();
S.get_heap_val32_reveal ()
//let same_domain_update128_64 b i v h =
// low_lemma_valid_mem128_64 b i (_ih h);
// Vale.Arch.MachineHeap.same_domain_update64 (buffer_addr b h + scale16 i) v (get_heap h);
// Vale.Arch.MachineHeap.same_domain_update64 (buffer_addr b h + scale16 i + 8) v (get_heap h)
open Vale.Def.Types_s
let frame_get_heap32 (ptr:int) (mem1 mem2:S.machine_heap) : Lemma
(requires (forall i. i >= ptr /\ i < ptr + 4 ==> mem1.[i] == mem2.[i])) | false | false | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val frame_get_heap32 (ptr: int) (mem1 mem2: S.machine_heap)
: Lemma (requires (forall i. i >= ptr /\ i < ptr + 4 ==> mem1.[ i ] == mem2.[ i ]))
(ensures S.get_heap_val32 ptr mem1 == S.get_heap_val32 ptr mem2) | [] | Vale.X64.Memory_Sems.frame_get_heap32 | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
ptr: Prims.int ->
mem1: Vale.Arch.MachineHeap_s.machine_heap ->
mem2: Vale.Arch.MachineHeap_s.machine_heap
-> FStar.Pervasives.Lemma
(requires forall (i: Prims.int). i >= ptr /\ i < ptr + 4 ==> mem1.[ i ] == mem2.[ i ])
(ensures
Vale.Arch.MachineHeap_s.get_heap_val32 ptr mem1 ==
Vale.Arch.MachineHeap_s.get_heap_val32 ptr mem2) | {
"end_col": 28,
"end_line": 1045,
"start_col": 2,
"start_line": 1045
} |
FStar.Pervasives.Lemma | val low_lemma_valid_mem128 (b:buffer128) (i:nat) (h:vale_heap) : Lemma
(requires
i < Seq.length (buffer_as_seq h b) /\
buffer_readable h b
)
(ensures
S.valid_addr128 (buffer_addr b h + scale16 i) (get_heap h)
) | [
{
"abbrev": false,
"full_module": "Vale.X64.BufferViewStore",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let low_lemma_valid_mem128 b i h =
lemma_valid_mem128 b i h;
bytes_valid128 (buffer_addr b h + scale16 i) h | val low_lemma_valid_mem128 (b:buffer128) (i:nat) (h:vale_heap) : Lemma
(requires
i < Seq.length (buffer_as_seq h b) /\
buffer_readable h b
)
(ensures
S.valid_addr128 (buffer_addr b h + scale16 i) (get_heap h)
)
let low_lemma_valid_mem128 b i h = | false | null | true | lemma_valid_mem128 b i h;
bytes_valid128 (buffer_addr b h + scale16 i) h | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [
"lemma"
] | [
"Vale.X64.Memory.buffer128",
"Prims.nat",
"Vale.Arch.HeapImpl.vale_heap",
"Vale.X64.Memory_Sems.bytes_valid128",
"Prims.op_Addition",
"Vale.X64.Memory.buffer_addr",
"Vale.X64.Memory.vuint128",
"Vale.X64.Memory.scale16",
"Prims.unit",
"Vale.X64.Memory.lemma_valid_mem128"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base
let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m)
let lemma_same_domains h m1 m2 = ()
let get_heap h = I.down_mem (_ih h)
let upd_heap h m = mi_heap_upd h m
//let lemma_upd_get_heap h = I.down_up_identity (_ih h)
let lemma_get_upd_heap h m = I.up_down_identity (_ih h) m
let lemma_heap_impl = ()
let lemma_heap_get_heap h = ()
let lemma_heap_taint h = ()
//let lemma_heap_upd_heap h mh mt = ()
[@"opaque_to_smt"]
let rec set_of_range (a:int) (n:nat) : Pure (Set.set int)
(requires True)
(ensures fun s -> (forall (i:int).{:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n))
=
if n = 0 then Set.empty else Set.union (set_of_range a (n - 1)) (Set.singleton (a + n - 1))
let buffer_info_has_addr (bi:buffer_info) (a:int) =
let b = bi.bi_buffer in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
addr <= a /\ a < addr + len
let buffer_info_has_addr_opt (bi:option buffer_info) (a:int) =
match bi with
| None -> False
| Some bi -> buffer_info_has_addr bi a
#set-options "--z3rlimit 100"
let rec make_owns_rec (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
if n = 0 then ((fun _ -> None), (fun _ -> Set.empty)) else
let (m0, s0) = make_owns_rec h bs (n - 1) in
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let s i = if i = hi then Set.union (s0 i) s_b else s0 i in
let m a = if addr <= a && a < addr + len then Some (n - 1) else m0 a in
(m, s)
[@"opaque_to_smt"]
let make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
make_owns_rec h bs n
let rec lemma_make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n <= Seq.length bs /\
(forall (i:nat).{:pattern Seq.index bs i} i < Seq.length bs ==> buffer_readable h (Seq.index bs i).bi_buffer) /\
(forall (i1 i2:nat).{:pattern (Seq.index bs i1); (Seq.index bs i2)}
i1 < Seq.length bs /\ i2 < Seq.length bs ==> buffer_info_disjoint (Seq.index bs i1) (Seq.index bs i2))
)
(ensures (
let (m, s) = make_owns h bs n in
(forall (i:heaplet_id) (a:int).{:pattern Set.mem a (s i)}
(Set.mem a (s i) <==> Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (m a) == Some i) /\
(Set.mem a (s i) ==> buffer_info_has_addr_opt (Option.mapTot (fun n -> Seq.index bs n) (m a)) a) /\
(Set.mem a (s i) ==> Set.mem a (Map.domain h.mh))
) /\
(forall (k:nat) (a:int).{:pattern Set.mem a (s (Seq.index bs k).bi_heaplet)}
k < n /\ buffer_info_has_addr (Seq.index bs k) a ==> Set.mem a (s (Seq.index bs k).bi_heaplet))
))
=
reveal_opaque (`%make_owns) make_owns;
if n = 0 then () else
let _ = make_owns h bs (n - 1) in
let (m, s) = make_owns h bs n in
lemma_make_owns h bs (n - 1);
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let lem1 (a:int) : Lemma
(requires Set.mem a s_b)
(ensures Set.mem a (Map.domain h.mh))
[SMTPat (Set.mem a (Map.domain h.mh))]
=
I.addrs_set_mem h.ih b a
in
let lem2 (i:heaplet_id) (a:int) : Lemma
(requires i =!= hi /\ Set.mem a (Set.intersect s_b (s i)))
(ensures False)
[SMTPat (Set.mem a (s i))]
=
reveal_opaque (`%addr_map_pred) addr_map_pred
in
()
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 2 --max_ifuel 2"
let rec lemma_loc_mutable_buffers_rec (l:list buffer_info) (s:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n + List.length l == Seq.length s /\
list_to_seq_post l s n
)
(ensures (
let modloc = loc_mutable_buffers l in
forall (i:nat).{:pattern Seq.index s i} n <= i /\ i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes modloc (loc_buffer bi.bi_buffer))
))
(decreases l)
=
match l with
| [] -> ()
| h::t -> lemma_loc_mutable_buffers_rec t s (n + 1)
#pop-options
let lemma_loc_mutable_buffers (l:list buffer_info) : Lemma
(ensures (
let s = list_to_seq l in
forall (i:nat).{:pattern Seq.index s i} i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes (loc_mutable_buffers l) (loc_buffer bi.bi_buffer))
))
=
lemma_list_to_seq l;
lemma_loc_mutable_buffers_rec l (list_to_seq l) 0
let create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let modloc = loc_mutable_buffers buffers in
let layout1 = h1.vf_layout in
let layin1 = layout1.vl_inner in
let (hmap, hsets) = make_owns h1.vf_heap bs (Seq.length bs) in
let hmap a = Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (hmap a) in
let l = {
vl_heaplets_initialized = true;
vl_heaplet_map = hmap;
vl_heaplet_sets = hsets;
vl_old_heap = h1.vf_heap;
vl_buffers = bs;
vl_mod_loc = modloc;
} in
let layout2 = {layout1 with vl_inner = l} in
let h2 = {
vf_layout = layout2;
vf_heap = h1.vf_heap;
vf_heaplets = h1.vf_heaplets;
} in
h2
let lemma_create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let h2 = create_heaplets buffers h1 in
assert (h2.vf_layout.vl_inner.vl_buffers == bs); // REVIEW: why is this necessary, even with extra ifuel?
lemma_make_owns h1.vf_heap bs (Seq.length bs);
lemma_loc_mutable_buffers buffers;
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
()
let destroy_heaplets h1 =
h1
let lemma_destroy_heaplets h1 =
()
val heap_shift (m1 m2:S.machine_heap) (base:int) (n:nat) : Lemma
(requires (forall i. 0 <= i /\ i < n ==> m1.[base + i] == m2.[base + i]))
(ensures (forall i. {:pattern (m1.[i])} base <= i /\ i < base + n ==> m1.[i] == m2.[i]))
#push-options "--smtencoding.l_arith_repr boxwrap"
let heap_shift m1 m2 base n =
assert (forall i. base <= i /\ i < base + n ==>
m1.[base + (i - base)] == m2.[base + (i - base)])
#pop-options
val same_mem_eq_slices64 (b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1))})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (
k * 8 + 8 <= DV.length (get_downview b.bsrc) /\
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) (get_downview b.bsrc)) (k * 8) (k * 8 + 8) ==
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) (get_downview b.bsrc)) (k * 8) (k * 8 + 8)))
let same_mem_eq_slices64 b i v k h1 h2 mem1 mem2 =
let t = TUInt64 in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db (uint_view t) in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.length_eq ub
let length_up64 (b:buffer64) (h:vale_heap) (k:nat{k < buffer_length b}) (i:nat{i < 8}) : Lemma
(scale8 k + i <= DV.length (get_downview b.bsrc)) =
let vb = UV.mk_buffer (get_downview b.bsrc) uint64_view in
UV.length_eq vb
val same_mem_get_heap_val64 (b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1))})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (let ptr = buffer_addr b h1 + scale8 k in
forall (x:int).{:pattern (mem1.[x])} ptr <= x /\ x < ptr + 8 ==> mem1.[x] == mem2.[x]))
let same_mem_get_heap_val64 b j v k h1 h2 mem1 mem2 =
let ptr = buffer_addr b h1 + scale8 k in
let addr = buffer_addr b h1 in
let aux (x:int{ptr <= x /\ x < ptr + 8}) : Lemma (mem1.[x] == mem2.[x]) =
let i = x - ptr in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db uint64_view in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
same_mem_eq_slices64 b j v k h1 h2 mem1 mem2;
let s1 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8) (k * 8 + 8)) in
let s2 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8) (k * 8 + 8)) in
assert (Seq.index s1 i == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8 + i));
length_up64 b h1 k i;
assert (mem1.[addr+(scale8 k + i)] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8 + i)));
assert (Seq.index s2 i == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8 + i));
length_up64 b h2 k i;
assert (mem2.[addr+(scale8 k + i)] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8 + i)))
in
Classical.forall_intro aux;
assert (forall i. addr + (scale8 k + i) == ptr + i)
let rec written_buffer_down64_aux1
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
(base:nat{base == buffer_addr b h})
(k:nat) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j.{:pattern (mem1.[j]) \/ (mem2.[j])}
base <= j /\ j < base + k * 8 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem1.[j])}
j >= base /\ j < base + scale8 i ==>
mem1.[j] == mem2.[j]))
(decreases %[i-k]) =
if k >= i then ()
else begin
let ptr = base + scale8 k in
same_mem_get_heap_val64 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 8;
written_buffer_down64_aux1 b i v h base (k+1) h1 mem1 mem2
end
let rec written_buffer_down64_aux2
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
(base:nat{base == buffer_addr b h})
(n:nat{n == buffer_length b})
(k:nat{k > i}) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
base + scale8 (i+1) <= j /\ j < base + k * 8 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
j >= base + scale8 (i+1) /\ j < base + scale8 n ==>
mem1.[j] == mem2.[j]))
(decreases %[n-k]) =
if k >= n then ()
else begin
let ptr = base + scale8 k in
same_mem_get_heap_val64 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 8;
written_buffer_down64_aux2 b i v h base n (k+1) h1 mem1 mem2
end
let written_buffer_down64 (b:buffer64{buffer_writeable b}) (i:nat{i < buffer_length b}) (v:nat64) (h:vale_heap)
: Lemma
(requires List.memP b (IB.ptrs_of_mem (_ih h)))
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
(base <= j /\ j < base + scale8 i) \/
(base + scale8 (i+1) <= j /\ j < base + scale8 n) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
written_buffer_down64_aux1 b i v h base 0 h1 mem1 mem2;
written_buffer_down64_aux2 b i v h base n (i+1) h1 mem1 mem2
let unwritten_buffer_down (t:base_typ) (b:buffer t{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:base_typ_as_vale_type t)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
forall (a:b8{List.memP a (IB.ptrs_of_mem (_ih h)) /\ a =!= b}) j. {:pattern mem1.[j]; List.memP a (IB.ptrs_of_mem (_ih h)) \/ mem2.[j]; List.memP a (IB.ptrs_of_mem (_ih h))}
let base = (IB.addrs_of_mem (_ih h)) a in
j >= base /\ j < base + DV.length (get_downview a.bsrc) ==> mem1.[j] == mem2.[j]))
= let aux (a:b8{a =!= b /\ List.memP a (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let base = (IB.addrs_of_mem (_ih h)) a in
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
forall j.
j >= base /\ j < base + DV.length (get_downview a.bsrc) ==>
mem1.[j] == mem2.[j]))
= let db = get_downview a.bsrc in
if DV.length db = 0 then ()
else
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = (IB.addrs_of_mem (_ih h)) a in
let s0 = DV.as_seq (IB.hs_of_mem (_ih h)) db in
let s1 = DV.as_seq (IB.hs_of_mem (_ih h1)) db in
opaque_assert (`%IB.list_disjoint_or_eq) IB.list_disjoint_or_eq IB.list_disjoint_or_eq_def (MB.disjoint a.bsrc b.bsrc);
lemma_dv_equal (IB.down_view a.src) a.bsrc (IB.hs_of_mem (_ih h)) (IB.hs_of_mem (_ih h1));
assert (Seq.equal s0 s1);
assert (forall (j:int).{:pattern (mem1.[j])}
base <= j /\ j < base + Seq.length s0 ==> v_to_typ TUInt8 (Seq.index s0 (j - base)) == mem1.[j]);
heap_shift mem1 mem2 base (DV.length db)
in
Classical.forall_intro aux
let store_buffer_down64_mem
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
forall (j:int). {:pattern mem1.[j] \/ mem2.[j]}
j < base + scale8 i \/ j >= base + scale8 (i+1) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
let aux (j:int)
: Lemma
(j < base + scale8 i \/ j >= base + scale8 (i+1) ==>
mem1.[j] == mem2.[j])
=
I.addrs_set_lemma_all ();
if j >= base && j < base + DV.length (get_downview b.bsrc) then begin
written_buffer_down64 b i v h;
length_t_eq (TUInt64) b
end
else if not (I.valid_addr (_ih h) j)
then I.same_unspecified_down (IB.hs_of_mem (_ih h)) (IB.hs_of_mem (_ih h1)) (IB.ptrs_of_mem (_ih h))
else unwritten_buffer_down TUInt64 b i v h
in
Classical.forall_intro aux
let store_buffer_aux_down64_mem (ptr:int) (v:nat64) (h:vale_heap{writeable_mem64 ptr h})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = store_mem (TUInt64) ptr v h in
let mem2 = I.down_mem (_ih h1) in
forall j. {:pattern mem1.[j] \/ mem2.[j]}
j < ptr \/ j >= ptr + 8 ==>
mem1.[j] == mem2.[j]))
= let t = TUInt64 in
let h1 = store_mem t ptr v h in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
store_buffer_write t ptr v h;
assert (buffer_addr b h + scale8 i == ptr);
assert (buffer_addr b h + scale8 (i+1) == ptr + 8);
store_buffer_down64_mem b i v h
let store_buffer_aux_down64_mem2 (ptr:int) (v:nat64) (h:vale_heap{writeable_mem64 ptr h})
: Lemma
(ensures (
let h1 = store_mem (TUInt64) ptr v h in
let mem2 = I.down_mem (_ih h1) in
S.get_heap_val64 ptr mem2 == v))
= let t = TUInt64 in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let h1 = store_mem t ptr v h in
let mem2 = I.down_mem (_ih h1) in
store_buffer_write t ptr v h;
assert (Seq.index (buffer_as_seq h1 b) i == v);
index64_get_heap_val64 h1 b mem2 i
let in_bounds64 (h:vale_heap) (b:buffer64) (i:nat{i < buffer_length b})
: Lemma (scale8 i + 8 <= DV.length (get_downview b.bsrc))
=
length_t_eq TUInt64 b
let bytes_valid64 ptr h =
reveal_opaque (`%S.valid_addr64) S.valid_addr64;
let t = TUInt64 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
in_bounds64 h b i;
I.addrs_set_mem (_ih h) b ptr;
I.addrs_set_mem (_ih h) b (ptr+1);
I.addrs_set_mem (_ih h) b (ptr+2);
I.addrs_set_mem (_ih h) b (ptr+3);
I.addrs_set_mem (_ih h) b (ptr+4);
I.addrs_set_mem (_ih h) b (ptr+5);
I.addrs_set_mem (_ih h) b (ptr+6);
I.addrs_set_mem (_ih h) b (ptr+7)
val same_mem_get_heap_val128 (b:buffer128)
(i:nat{i < buffer_length b})
(v:quad32)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1)) /\ buffer_writeable b})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (let ptr = buffer_addr b h1 + scale16 k in
forall i.{:pattern mem1.[i]} i >= ptr /\ i < ptr+16 ==> mem1.[i] == mem2.[i]))
val same_mem_eq_slices128 (b:buffer128)
(i:nat{i < buffer_length b})
(v:quad32)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1)) /\ buffer_writeable b})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (
k * 16 + 16 <= DV.length (get_downview b.bsrc) /\
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) (get_downview b.bsrc)) (k * 16) (k * 16 + 16) ==
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) (get_downview b.bsrc)) (k * 16) (k * 16 + 16)))
let same_mem_eq_slices128 b i v k h1 h2 mem1 mem2 =
let t = TUInt128 in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db (uint_view t) in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.length_eq ub
let length_up128 (b:buffer128) (h:vale_heap) (k:nat{k < buffer_length b}) (i:nat{i < 16}) : Lemma
(scale16 k + i <= DV.length (get_downview b.bsrc)) =
let vb = UV.mk_buffer (get_downview b.bsrc) uint128_view in
UV.length_eq vb
let same_mem_get_heap_val128 b j v k h1 h2 mem1 mem2 =
let ptr = buffer_addr b h1 + scale16 k in
let addr = buffer_addr b h1 in
let aux (i:nat{ptr <= i /\ i < ptr+16}) : Lemma (mem1.[i] == mem2.[i]) =
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db uint128_view in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
same_mem_eq_slices128 b j v k h1 h2 mem1 mem2;
let s1 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16) (k * 16 + 16)) in
let s2 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16) (k * 16 + 16)) in
assert (Seq.index s1 (i - ptr) == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16 + (i-ptr)));
length_up128 b h1 k (i-ptr);
assert (mem1.[i] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16 + (i-ptr))));
assert (Seq.index s2 (i-ptr) == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16 + (i-ptr)));
length_up128 b h2 k (i-ptr);
assert (mem2.[addr+(scale16 k + (i-ptr))] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16 + (i-ptr))));
assert (forall i. addr + (scale16 k + (i-ptr)) == i)
in
Classical.forall_intro aux
let in_bounds128 (h:vale_heap) (b:buffer128) (i:nat{i < buffer_length b}) : Lemma
(scale16 i + 16 <= DV.length (get_downview b.bsrc))
=
length_t_eq TUInt128 b
#push-options "--z3rlimit 20"
#restart-solver
let bytes_valid128 ptr h =
reveal_opaque (`%S.valid_addr128) S.valid_addr128;
IB.list_disjoint_or_eq_reveal ();
let t = TUInt128 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
in_bounds128 h b i;
I.addrs_set_mem (_ih h) b ptr;
I.addrs_set_mem (_ih h) b (ptr+1);
I.addrs_set_mem (_ih h) b (ptr+2);
I.addrs_set_mem (_ih h) b (ptr+3);
I.addrs_set_mem (_ih h) b (ptr+4);
I.addrs_set_mem (_ih h) b (ptr+5);
I.addrs_set_mem (_ih h) b (ptr+6);
I.addrs_set_mem (_ih h) b (ptr+7);
I.addrs_set_mem (_ih h) b (ptr+8);
I.addrs_set_mem (_ih h) b (ptr+9);
I.addrs_set_mem (_ih h) b (ptr+10);
I.addrs_set_mem (_ih h) b (ptr+11);
I.addrs_set_mem (_ih h) b (ptr+12);
I.addrs_set_mem (_ih h) b (ptr+13);
I.addrs_set_mem (_ih h) b (ptr+14);
I.addrs_set_mem (_ih h) b (ptr+15)
#pop-options
let equiv_load_mem64 ptr h =
let t = TUInt64 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let addr = buffer_addr b h in
let contents = DV.as_seq (_ih h).IB.hs (get_downview b.bsrc) in
let heap = get_heap h in
index64_get_heap_val64 h b heap i;
lemma_load_mem64 b i h
//let low_lemma_valid_mem64 b i h =
// lemma_valid_mem64 b i h;
// bytes_valid64 (buffer_addr b h + scale8 i) h
//let low_lemma_load_mem64 b i h =
// lemma_valid_mem64 b i h;
// lemma_load_mem64 b i h;
// equiv_load_mem64 (buffer_addr b h + scale8 i) h
//let same_domain_update64 b i v h =
// low_lemma_valid_mem64 b i h;
// Vale.Arch.MachineHeap.same_domain_update64 (buffer_addr b h + scale8 i) v (get_heap h)
open Vale.X64.BufferViewStore
let low_lemma_store_mem64_aux
(b:buffer64)
(heap:S.machine_heap)
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{buffer_readable h b /\ buffer_writeable b})
: Lemma
(requires IB.correct_down_p (_ih h) heap b)
(ensures (let heap' = S.update_heap64 (buffer_addr b h + scale8 i) v heap in
let h' = store_mem64 (buffer_addr b h + scale8 i) v h in
(_ih h').IB.hs == DV.upd_seq (_ih h).IB.hs (get_downview b.bsrc) (I.get_seq_heap heap' (_ih h).IB.addrs b))) =
let ptr = buffer_addr b h + scale8 i in
let heap' = S.update_heap64 ptr v heap in
let h' = store_mem64 ptr v h in
lemma_store_mem64 b i v h;
length_t_eq TUInt64 b;
bv_upd_update_heap64 b heap i v (_ih h);
let db = get_downview b.bsrc in
let bv = UV.mk_buffer db Vale.Interop.Views.up_view64 in
assert (UV.upd (_ih h).IB.hs bv i (UInt64.uint_to_t v) == (_ih h').IB.hs)
val valid_state_store_mem64_aux: (i:nat) -> (v:nat64) -> (h:vale_heap) -> Lemma
(requires writeable_mem64 i h)
(ensures (
let heap = get_heap h in
let heap' = S.update_heap64 i v heap in
let h' = store_mem64 i v h in
heap' == I.down_mem (_ih h')
))
let valid_state_store_mem64_aux i v h =
let heap = get_heap h in
let heap' = S.update_heap64 i v heap in
let h1 = store_mem TUInt64 i v h in
store_buffer_aux_down64_mem i v h;
store_buffer_aux_down64_mem2 i v h;
let mem1 = heap' in
let mem2 = I.down_mem (_ih h1) in
let aux () : Lemma (forall j. mem1.[j] == mem2.[j]) =
Vale.Arch.MachineHeap.same_mem_get_heap_val64 i mem1 mem2;
Vale.Arch.MachineHeap.correct_update_get64 i v heap;
Vale.Arch.MachineHeap.frame_update_heap64 i v heap
in let aux2 () : Lemma (Set.equal (Map.domain mem1) (Map.domain mem2)) =
bytes_valid64 i h;
Vale.Arch.MachineHeap.same_domain_update64 i v heap
in aux(); aux2();
Map.lemma_equal_intro mem1 mem2
let low_lemma_load_mem64_full b i vfh t hid =
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
()
#push-options "--z3rlimit 20"
#restart-solver
let low_lemma_store_mem64 b i v h =
lemma_writeable_mem64 b i h;
lemma_store_mem64 b i v h;
valid_state_store_mem64_aux (buffer_addr b h + scale8 i) v h;
let heap = get_heap h in
let heap' = S.update_heap64 (buffer_addr b h + scale8 i) v heap in
low_lemma_store_mem64_aux b heap i v h;
Vale.Arch.MachineHeap.frame_update_heap64 (buffer_addr b h + scale8 i) v heap;
in_bounds64 h b i;
I.addrs_set_lemma_all ();
I.update_buffer_up_mem (_ih h) b heap heap'
#pop-options
#set-options "--z3rlimit 100"
#restart-solver
let lemma_is_full_update
(vfh:vale_full_heap) (h hk hk':vale_heap) (k:heaplet_id) (mh mh' mhk mhk':machine_heap) (mt mt':memtaint)
(t:base_typ) (b:buffer t) (ptr:int) (v_size:nat)
(index:nat) (v:base_typ_as_vale_type t) (tn:taint)
: Lemma
(requires
vfh.vf_layout.vl_inner.vl_heaplets_initialized /\
mem_inv vfh /\
buffer_readable hk b /\
buffer_writeable b /\
index < Seq.length (buffer_as_seq hk b) /\
mt == vfh.vf_layout.vl_taint /\
h == vfh.vf_heap /\
hk == Map16.sel vfh.vf_heaplets k /\
mh == h.mh /\
mhk == hk.mh /\
ptr == buffer_addr b hk + scale_by v_size index /\
mt' == S.update_n ptr v_size (heap_taint (coerce vfh)) tn /\
hk' == buffer_write b index v hk /\
valid_layout_buffer b vfh.vf_layout hk true /\
valid_taint_buf b hk mt tn /\
is_machine_heap_update mh mh' /\ upd_heap h mh' == buffer_write b index v h /\
is_machine_heap_update mhk mhk' /\ upd_heap hk mhk' == buffer_write b index v hk /\
(forall j.{:pattern mh.[j] \/ mh'.[j]} j < ptr \/ j >= ptr + v_size ==> mh.[j] == mh'.[j]) /\
(forall j.{:pattern mhk.[j] \/ mhk'.[j]} j < ptr \/ j >= ptr + v_size ==> mhk.[j] == mhk'.[j]) /\
0 <= scale_by v_size index /\ scale_by v_size index + v_size <= DV.length (get_downview b.bsrc) /\
(forall i.{:pattern mh'.[i] \/ mhk'.[i]} i >= ptr /\ i < ptr + v_size ==> mh'.[i] == mhk'.[i]) /\
True
)
(ensures is_full_update vfh hk' k mh' mt')
=
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
let vfh' = coerce (heap_upd (coerce vfh) mh' mt') in
let dom_upd = Set.intersect (vfh.vf_layout.vl_inner.vl_heaplet_sets k) (Map.domain mhk) in
let mhk'' = Map.concat mhk (Map.restrict dom_upd mh') in
assert (Map.equal mhk'' mhk');
let unchanged (j:heaplet_id) : Lemma
(requires j =!= k)
(ensures Map16.sel vfh'.vf_heaplets j == Map16.sel vfh.vf_heaplets j)
[SMTPat (Map16.sel vfh'.vf_heaplets j)]
=
assert (Map.equal (Map16.sel vfh'.vf_heaplets j).mh (Map16.sel vfh.vf_heaplets j).mh);
I.down_up_identity (Map16.sel vfh.vf_heaplets j).ih;
()
in
assert (Map16.equal vfh'.vf_heaplets (Map16.upd vfh.vf_heaplets k hk'));
assert (Map.equal mt' mt);
Vale.Interop.Heap_s.list_disjoint_or_eq_reveal ();
()
let low_lemma_store_mem64_full b i v vfh t hid =
let (h, mt, hk) = (vfh.vf_heap, vfh.vf_layout.vl_taint, Map16.get vfh.vf_heaplets hid) in
let ptr = buffer_addr b hk + scale8 i in
let mh' = S.update_heap64 ptr v (heap_get (coerce vfh)) in
let mt' = S.update_n ptr 8 (heap_taint (coerce vfh)) t in
let hk' = buffer_write b i v hk in
let mhk' = S.update_heap64 ptr v (get_heap hk) in
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
low_lemma_store_mem64 b i v h;
low_lemma_store_mem64 b i v (Map16.get vfh.vf_heaplets hid);
Vale.Arch.MachineHeap.frame_update_heap64 ptr v h.mh;
Vale.Arch.MachineHeap.frame_update_heap64 ptr v hk.mh;
in_bounds64 hk b i;
Vale.Arch.MachineHeap.same_mem_get_heap_val64 ptr mh' mhk';
lemma_is_full_update vfh h hk hk' hid h.mh mh' hk.mh mhk' mt mt' TUInt64 b ptr 8 i v t;
()
val low_lemma_valid_mem128 (b:buffer128) (i:nat) (h:vale_heap) : Lemma
(requires
i < Seq.length (buffer_as_seq h b) /\
buffer_readable h b
)
(ensures
S.valid_addr128 (buffer_addr b h + scale16 i) (get_heap h)
) | false | false | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val low_lemma_valid_mem128 (b:buffer128) (i:nat) (h:vale_heap) : Lemma
(requires
i < Seq.length (buffer_as_seq h b) /\
buffer_readable h b
)
(ensures
S.valid_addr128 (buffer_addr b h + scale16 i) (get_heap h)
) | [] | Vale.X64.Memory_Sems.low_lemma_valid_mem128 | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | b: Vale.X64.Memory.buffer128 -> i: Prims.nat -> h: Vale.Arch.HeapImpl.vale_heap
-> FStar.Pervasives.Lemma
(requires
i < FStar.Seq.Base.length (Vale.X64.Memory.buffer_as_seq h b) /\
Vale.X64.Memory.buffer_readable h b)
(ensures
Vale.Arch.MachineHeap_s.valid_addr128 (Vale.X64.Memory.buffer_addr b h +
Vale.X64.Memory.scale16 i)
(Vale.X64.Memory_Sems.get_heap h)) | {
"end_col": 48,
"end_line": 735,
"start_col": 2,
"start_line": 734
} |
Prims.Pure | val set_of_range (a: int) (n: nat)
: Pure (Set.set int)
(requires True)
(ensures
fun s -> (forall (i: int). {:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n)) | [
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec set_of_range (a:int) (n:nat) : Pure (Set.set int)
(requires True)
(ensures fun s -> (forall (i:int).{:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n))
=
if n = 0 then Set.empty else Set.union (set_of_range a (n - 1)) (Set.singleton (a + n - 1)) | val set_of_range (a: int) (n: nat)
: Pure (Set.set int)
(requires True)
(ensures
fun s -> (forall (i: int). {:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n))
let rec set_of_range (a: int) (n: nat)
: Pure (Set.set int)
(requires True)
(ensures
fun s -> (forall (i: int). {:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n)) = | false | null | false | if n = 0 then Set.empty else Set.union (set_of_range a (n - 1)) (Set.singleton (a + n - 1)) | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [] | [
"Prims.int",
"Prims.nat",
"Prims.op_Equality",
"FStar.Set.empty",
"Prims.bool",
"FStar.Set.union",
"Vale.X64.Memory_Sems.set_of_range",
"Prims.op_Subtraction",
"FStar.Set.singleton",
"Prims.op_Addition",
"FStar.Set.set",
"Prims.l_True",
"Prims.l_Forall",
"Prims.l_iff",
"Prims.b2t",
"FStar.Set.mem",
"Prims.l_and",
"Prims.op_LessThanOrEqual",
"Prims.op_LessThan"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base
let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m)
let lemma_same_domains h m1 m2 = ()
let get_heap h = I.down_mem (_ih h)
let upd_heap h m = mi_heap_upd h m
//let lemma_upd_get_heap h = I.down_up_identity (_ih h)
let lemma_get_upd_heap h m = I.up_down_identity (_ih h) m
let lemma_heap_impl = ()
let lemma_heap_get_heap h = ()
let lemma_heap_taint h = ()
//let lemma_heap_upd_heap h mh mt = ()
[@"opaque_to_smt"]
let rec set_of_range (a:int) (n:nat) : Pure (Set.set int)
(requires True)
(ensures fun s -> (forall (i:int).{:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n)) | false | false | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val set_of_range (a: int) (n: nat)
: Pure (Set.set int)
(requires True)
(ensures
fun s -> (forall (i: int). {:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n)) | [
"recursion"
] | Vale.X64.Memory_Sems.set_of_range | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Prims.int -> n: Prims.nat -> Prims.Pure (FStar.Set.set Prims.int) | {
"end_col": 93,
"end_line": 46,
"start_col": 2,
"start_line": 46
} |
Prims.GTot | val make_owns_rec (h: vale_heap) (bs: Seq.seq buffer_info) (n: nat{n <= Seq.length bs})
: GTot ((int -> option (n: nat{n < Seq.length bs})) & (heaplet_id -> Set.set int)) | [
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec make_owns_rec (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
if n = 0 then ((fun _ -> None), (fun _ -> Set.empty)) else
let (m0, s0) = make_owns_rec h bs (n - 1) in
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let s i = if i = hi then Set.union (s0 i) s_b else s0 i in
let m a = if addr <= a && a < addr + len then Some (n - 1) else m0 a in
(m, s) | val make_owns_rec (h: vale_heap) (bs: Seq.seq buffer_info) (n: nat{n <= Seq.length bs})
: GTot ((int -> option (n: nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
let rec make_owns_rec (h: vale_heap) (bs: Seq.seq buffer_info) (n: nat{n <= Seq.length bs})
: GTot ((int -> option (n: nat{n < Seq.length bs})) & (heaplet_id -> Set.set int)) = | false | null | false | if n = 0
then ((fun _ -> None), (fun _ -> Set.empty))
else
let m0, s0 = make_owns_rec h bs (n - 1) in
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let s i = if i = hi then Set.union (s0 i) s_b else s0 i in
let m a = if addr <= a && a < addr + len then Some (n - 1) else m0 a in
(m, s) | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [
"sometrivial"
] | [
"Vale.Arch.HeapImpl.vale_heap",
"FStar.Seq.Base.seq",
"Vale.Arch.HeapImpl.buffer_info",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"FStar.Seq.Base.length",
"Prims.op_Equality",
"Prims.int",
"FStar.Pervasives.Native.Mktuple2",
"FStar.Pervasives.Native.option",
"Prims.op_LessThan",
"Vale.Arch.HeapImpl.heaplet_id",
"FStar.Set.set",
"FStar.Pervasives.Native.None",
"FStar.Set.empty",
"Prims.bool",
"Prims.op_AmpAmp",
"Prims.op_Addition",
"FStar.Pervasives.Native.Some",
"Prims.op_Subtraction",
"FStar.Set.union",
"Vale.X64.Memory_Sems.set_of_range",
"LowStar.BufferView.Down.length",
"FStar.UInt8.t",
"Vale.Interop.Types.get_downview",
"Vale.Interop.Types.__proj__Buffer__item__src",
"Vale.Interop.Types.b8_preorder",
"Vale.Interop.Types.__proj__Buffer__item__writeable",
"Vale.Interop.Types.base_typ_as_type",
"Vale.Interop.Types.__proj__Buffer__item__bsrc",
"Vale.Def.Words_s.nat64",
"Vale.Interop.Heap_s.global_addrs_map",
"Vale.Arch.HeapImpl.__proj__Mkbuffer_info__item__bi_heaplet",
"Vale.Arch.HeapImpl.buffer",
"Vale.Arch.HeapImpl.__proj__Mkbuffer_info__item__bi_typ",
"Vale.Arch.HeapImpl.__proj__Mkbuffer_info__item__bi_buffer",
"FStar.Seq.Base.index",
"FStar.Pervasives.Native.tuple2",
"Vale.X64.Memory_Sems.make_owns_rec"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base
let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m)
let lemma_same_domains h m1 m2 = ()
let get_heap h = I.down_mem (_ih h)
let upd_heap h m = mi_heap_upd h m
//let lemma_upd_get_heap h = I.down_up_identity (_ih h)
let lemma_get_upd_heap h m = I.up_down_identity (_ih h) m
let lemma_heap_impl = ()
let lemma_heap_get_heap h = ()
let lemma_heap_taint h = ()
//let lemma_heap_upd_heap h mh mt = ()
[@"opaque_to_smt"]
let rec set_of_range (a:int) (n:nat) : Pure (Set.set int)
(requires True)
(ensures fun s -> (forall (i:int).{:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n))
=
if n = 0 then Set.empty else Set.union (set_of_range a (n - 1)) (Set.singleton (a + n - 1))
let buffer_info_has_addr (bi:buffer_info) (a:int) =
let b = bi.bi_buffer in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
addr <= a /\ a < addr + len
let buffer_info_has_addr_opt (bi:option buffer_info) (a:int) =
match bi with
| None -> False
| Some bi -> buffer_info_has_addr bi a
#set-options "--z3rlimit 100"
let rec make_owns_rec (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int)) | false | false | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val make_owns_rec (h: vale_heap) (bs: Seq.seq buffer_info) (n: nat{n <= Seq.length bs})
: GTot ((int -> option (n: nat{n < Seq.length bs})) & (heaplet_id -> Set.set int)) | [
"recursion"
] | Vale.X64.Memory_Sems.make_owns_rec | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
h: Vale.Arch.HeapImpl.vale_heap ->
bs: FStar.Seq.Base.seq Vale.Arch.HeapImpl.buffer_info ->
n: Prims.nat{n <= FStar.Seq.Base.length bs}
-> Prims.GTot
((_: Prims.int -> FStar.Pervasives.Native.option (n: Prims.nat{n < FStar.Seq.Base.length bs})) *
(_: Vale.Arch.HeapImpl.heaplet_id -> FStar.Set.set Prims.int)) | {
"end_col": 8,
"end_line": 73,
"start_col": 2,
"start_line": 63
} |
FStar.Pervasives.Lemma | val in_bounds128 (h: vale_heap) (b: buffer128) (i: nat{i < buffer_length b})
: Lemma (scale16 i + 16 <= DV.length (get_downview b.bsrc)) | [
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let in_bounds128 (h:vale_heap) (b:buffer128) (i:nat{i < buffer_length b}) : Lemma
(scale16 i + 16 <= DV.length (get_downview b.bsrc))
=
length_t_eq TUInt128 b | val in_bounds128 (h: vale_heap) (b: buffer128) (i: nat{i < buffer_length b})
: Lemma (scale16 i + 16 <= DV.length (get_downview b.bsrc))
let in_bounds128 (h: vale_heap) (b: buffer128) (i: nat{i < buffer_length b})
: Lemma (scale16 i + 16 <= DV.length (get_downview b.bsrc)) = | false | null | true | length_t_eq TUInt128 b | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [
"lemma"
] | [
"Vale.Arch.HeapImpl.vale_heap",
"Vale.X64.Memory.buffer128",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThan",
"Vale.X64.Memory.buffer_length",
"Vale.X64.Memory.vuint128",
"Vale.X64.Memory.length_t_eq",
"Vale.Arch.HeapTypes_s.TUInt128",
"Prims.unit",
"Prims.l_True",
"Prims.squash",
"Prims.op_LessThanOrEqual",
"Prims.op_Addition",
"Vale.X64.Memory.scale16",
"LowStar.BufferView.Down.length",
"FStar.UInt8.t",
"Vale.Interop.Types.get_downview",
"Vale.Interop.Types.__proj__Buffer__item__src",
"Vale.Interop.Types.b8_preorder",
"Vale.Interop.Types.__proj__Buffer__item__writeable",
"Vale.Interop.Types.base_typ_as_type",
"Vale.Interop.Types.__proj__Buffer__item__bsrc",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base
let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m)
let lemma_same_domains h m1 m2 = ()
let get_heap h = I.down_mem (_ih h)
let upd_heap h m = mi_heap_upd h m
//let lemma_upd_get_heap h = I.down_up_identity (_ih h)
let lemma_get_upd_heap h m = I.up_down_identity (_ih h) m
let lemma_heap_impl = ()
let lemma_heap_get_heap h = ()
let lemma_heap_taint h = ()
//let lemma_heap_upd_heap h mh mt = ()
[@"opaque_to_smt"]
let rec set_of_range (a:int) (n:nat) : Pure (Set.set int)
(requires True)
(ensures fun s -> (forall (i:int).{:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n))
=
if n = 0 then Set.empty else Set.union (set_of_range a (n - 1)) (Set.singleton (a + n - 1))
let buffer_info_has_addr (bi:buffer_info) (a:int) =
let b = bi.bi_buffer in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
addr <= a /\ a < addr + len
let buffer_info_has_addr_opt (bi:option buffer_info) (a:int) =
match bi with
| None -> False
| Some bi -> buffer_info_has_addr bi a
#set-options "--z3rlimit 100"
let rec make_owns_rec (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
if n = 0 then ((fun _ -> None), (fun _ -> Set.empty)) else
let (m0, s0) = make_owns_rec h bs (n - 1) in
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let s i = if i = hi then Set.union (s0 i) s_b else s0 i in
let m a = if addr <= a && a < addr + len then Some (n - 1) else m0 a in
(m, s)
[@"opaque_to_smt"]
let make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
make_owns_rec h bs n
let rec lemma_make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n <= Seq.length bs /\
(forall (i:nat).{:pattern Seq.index bs i} i < Seq.length bs ==> buffer_readable h (Seq.index bs i).bi_buffer) /\
(forall (i1 i2:nat).{:pattern (Seq.index bs i1); (Seq.index bs i2)}
i1 < Seq.length bs /\ i2 < Seq.length bs ==> buffer_info_disjoint (Seq.index bs i1) (Seq.index bs i2))
)
(ensures (
let (m, s) = make_owns h bs n in
(forall (i:heaplet_id) (a:int).{:pattern Set.mem a (s i)}
(Set.mem a (s i) <==> Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (m a) == Some i) /\
(Set.mem a (s i) ==> buffer_info_has_addr_opt (Option.mapTot (fun n -> Seq.index bs n) (m a)) a) /\
(Set.mem a (s i) ==> Set.mem a (Map.domain h.mh))
) /\
(forall (k:nat) (a:int).{:pattern Set.mem a (s (Seq.index bs k).bi_heaplet)}
k < n /\ buffer_info_has_addr (Seq.index bs k) a ==> Set.mem a (s (Seq.index bs k).bi_heaplet))
))
=
reveal_opaque (`%make_owns) make_owns;
if n = 0 then () else
let _ = make_owns h bs (n - 1) in
let (m, s) = make_owns h bs n in
lemma_make_owns h bs (n - 1);
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let lem1 (a:int) : Lemma
(requires Set.mem a s_b)
(ensures Set.mem a (Map.domain h.mh))
[SMTPat (Set.mem a (Map.domain h.mh))]
=
I.addrs_set_mem h.ih b a
in
let lem2 (i:heaplet_id) (a:int) : Lemma
(requires i =!= hi /\ Set.mem a (Set.intersect s_b (s i)))
(ensures False)
[SMTPat (Set.mem a (s i))]
=
reveal_opaque (`%addr_map_pred) addr_map_pred
in
()
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 2 --max_ifuel 2"
let rec lemma_loc_mutable_buffers_rec (l:list buffer_info) (s:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n + List.length l == Seq.length s /\
list_to_seq_post l s n
)
(ensures (
let modloc = loc_mutable_buffers l in
forall (i:nat).{:pattern Seq.index s i} n <= i /\ i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes modloc (loc_buffer bi.bi_buffer))
))
(decreases l)
=
match l with
| [] -> ()
| h::t -> lemma_loc_mutable_buffers_rec t s (n + 1)
#pop-options
let lemma_loc_mutable_buffers (l:list buffer_info) : Lemma
(ensures (
let s = list_to_seq l in
forall (i:nat).{:pattern Seq.index s i} i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes (loc_mutable_buffers l) (loc_buffer bi.bi_buffer))
))
=
lemma_list_to_seq l;
lemma_loc_mutable_buffers_rec l (list_to_seq l) 0
let create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let modloc = loc_mutable_buffers buffers in
let layout1 = h1.vf_layout in
let layin1 = layout1.vl_inner in
let (hmap, hsets) = make_owns h1.vf_heap bs (Seq.length bs) in
let hmap a = Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (hmap a) in
let l = {
vl_heaplets_initialized = true;
vl_heaplet_map = hmap;
vl_heaplet_sets = hsets;
vl_old_heap = h1.vf_heap;
vl_buffers = bs;
vl_mod_loc = modloc;
} in
let layout2 = {layout1 with vl_inner = l} in
let h2 = {
vf_layout = layout2;
vf_heap = h1.vf_heap;
vf_heaplets = h1.vf_heaplets;
} in
h2
let lemma_create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let h2 = create_heaplets buffers h1 in
assert (h2.vf_layout.vl_inner.vl_buffers == bs); // REVIEW: why is this necessary, even with extra ifuel?
lemma_make_owns h1.vf_heap bs (Seq.length bs);
lemma_loc_mutable_buffers buffers;
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
()
let destroy_heaplets h1 =
h1
let lemma_destroy_heaplets h1 =
()
val heap_shift (m1 m2:S.machine_heap) (base:int) (n:nat) : Lemma
(requires (forall i. 0 <= i /\ i < n ==> m1.[base + i] == m2.[base + i]))
(ensures (forall i. {:pattern (m1.[i])} base <= i /\ i < base + n ==> m1.[i] == m2.[i]))
#push-options "--smtencoding.l_arith_repr boxwrap"
let heap_shift m1 m2 base n =
assert (forall i. base <= i /\ i < base + n ==>
m1.[base + (i - base)] == m2.[base + (i - base)])
#pop-options
val same_mem_eq_slices64 (b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1))})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (
k * 8 + 8 <= DV.length (get_downview b.bsrc) /\
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) (get_downview b.bsrc)) (k * 8) (k * 8 + 8) ==
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) (get_downview b.bsrc)) (k * 8) (k * 8 + 8)))
let same_mem_eq_slices64 b i v k h1 h2 mem1 mem2 =
let t = TUInt64 in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db (uint_view t) in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.length_eq ub
let length_up64 (b:buffer64) (h:vale_heap) (k:nat{k < buffer_length b}) (i:nat{i < 8}) : Lemma
(scale8 k + i <= DV.length (get_downview b.bsrc)) =
let vb = UV.mk_buffer (get_downview b.bsrc) uint64_view in
UV.length_eq vb
val same_mem_get_heap_val64 (b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1))})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (let ptr = buffer_addr b h1 + scale8 k in
forall (x:int).{:pattern (mem1.[x])} ptr <= x /\ x < ptr + 8 ==> mem1.[x] == mem2.[x]))
let same_mem_get_heap_val64 b j v k h1 h2 mem1 mem2 =
let ptr = buffer_addr b h1 + scale8 k in
let addr = buffer_addr b h1 in
let aux (x:int{ptr <= x /\ x < ptr + 8}) : Lemma (mem1.[x] == mem2.[x]) =
let i = x - ptr in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db uint64_view in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
same_mem_eq_slices64 b j v k h1 h2 mem1 mem2;
let s1 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8) (k * 8 + 8)) in
let s2 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8) (k * 8 + 8)) in
assert (Seq.index s1 i == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8 + i));
length_up64 b h1 k i;
assert (mem1.[addr+(scale8 k + i)] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8 + i)));
assert (Seq.index s2 i == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8 + i));
length_up64 b h2 k i;
assert (mem2.[addr+(scale8 k + i)] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8 + i)))
in
Classical.forall_intro aux;
assert (forall i. addr + (scale8 k + i) == ptr + i)
let rec written_buffer_down64_aux1
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
(base:nat{base == buffer_addr b h})
(k:nat) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j.{:pattern (mem1.[j]) \/ (mem2.[j])}
base <= j /\ j < base + k * 8 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem1.[j])}
j >= base /\ j < base + scale8 i ==>
mem1.[j] == mem2.[j]))
(decreases %[i-k]) =
if k >= i then ()
else begin
let ptr = base + scale8 k in
same_mem_get_heap_val64 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 8;
written_buffer_down64_aux1 b i v h base (k+1) h1 mem1 mem2
end
let rec written_buffer_down64_aux2
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
(base:nat{base == buffer_addr b h})
(n:nat{n == buffer_length b})
(k:nat{k > i}) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
base + scale8 (i+1) <= j /\ j < base + k * 8 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
j >= base + scale8 (i+1) /\ j < base + scale8 n ==>
mem1.[j] == mem2.[j]))
(decreases %[n-k]) =
if k >= n then ()
else begin
let ptr = base + scale8 k in
same_mem_get_heap_val64 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 8;
written_buffer_down64_aux2 b i v h base n (k+1) h1 mem1 mem2
end
let written_buffer_down64 (b:buffer64{buffer_writeable b}) (i:nat{i < buffer_length b}) (v:nat64) (h:vale_heap)
: Lemma
(requires List.memP b (IB.ptrs_of_mem (_ih h)))
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
(base <= j /\ j < base + scale8 i) \/
(base + scale8 (i+1) <= j /\ j < base + scale8 n) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
written_buffer_down64_aux1 b i v h base 0 h1 mem1 mem2;
written_buffer_down64_aux2 b i v h base n (i+1) h1 mem1 mem2
let unwritten_buffer_down (t:base_typ) (b:buffer t{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:base_typ_as_vale_type t)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
forall (a:b8{List.memP a (IB.ptrs_of_mem (_ih h)) /\ a =!= b}) j. {:pattern mem1.[j]; List.memP a (IB.ptrs_of_mem (_ih h)) \/ mem2.[j]; List.memP a (IB.ptrs_of_mem (_ih h))}
let base = (IB.addrs_of_mem (_ih h)) a in
j >= base /\ j < base + DV.length (get_downview a.bsrc) ==> mem1.[j] == mem2.[j]))
= let aux (a:b8{a =!= b /\ List.memP a (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let base = (IB.addrs_of_mem (_ih h)) a in
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
forall j.
j >= base /\ j < base + DV.length (get_downview a.bsrc) ==>
mem1.[j] == mem2.[j]))
= let db = get_downview a.bsrc in
if DV.length db = 0 then ()
else
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = (IB.addrs_of_mem (_ih h)) a in
let s0 = DV.as_seq (IB.hs_of_mem (_ih h)) db in
let s1 = DV.as_seq (IB.hs_of_mem (_ih h1)) db in
opaque_assert (`%IB.list_disjoint_or_eq) IB.list_disjoint_or_eq IB.list_disjoint_or_eq_def (MB.disjoint a.bsrc b.bsrc);
lemma_dv_equal (IB.down_view a.src) a.bsrc (IB.hs_of_mem (_ih h)) (IB.hs_of_mem (_ih h1));
assert (Seq.equal s0 s1);
assert (forall (j:int).{:pattern (mem1.[j])}
base <= j /\ j < base + Seq.length s0 ==> v_to_typ TUInt8 (Seq.index s0 (j - base)) == mem1.[j]);
heap_shift mem1 mem2 base (DV.length db)
in
Classical.forall_intro aux
let store_buffer_down64_mem
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
forall (j:int). {:pattern mem1.[j] \/ mem2.[j]}
j < base + scale8 i \/ j >= base + scale8 (i+1) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
let aux (j:int)
: Lemma
(j < base + scale8 i \/ j >= base + scale8 (i+1) ==>
mem1.[j] == mem2.[j])
=
I.addrs_set_lemma_all ();
if j >= base && j < base + DV.length (get_downview b.bsrc) then begin
written_buffer_down64 b i v h;
length_t_eq (TUInt64) b
end
else if not (I.valid_addr (_ih h) j)
then I.same_unspecified_down (IB.hs_of_mem (_ih h)) (IB.hs_of_mem (_ih h1)) (IB.ptrs_of_mem (_ih h))
else unwritten_buffer_down TUInt64 b i v h
in
Classical.forall_intro aux
let store_buffer_aux_down64_mem (ptr:int) (v:nat64) (h:vale_heap{writeable_mem64 ptr h})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = store_mem (TUInt64) ptr v h in
let mem2 = I.down_mem (_ih h1) in
forall j. {:pattern mem1.[j] \/ mem2.[j]}
j < ptr \/ j >= ptr + 8 ==>
mem1.[j] == mem2.[j]))
= let t = TUInt64 in
let h1 = store_mem t ptr v h in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
store_buffer_write t ptr v h;
assert (buffer_addr b h + scale8 i == ptr);
assert (buffer_addr b h + scale8 (i+1) == ptr + 8);
store_buffer_down64_mem b i v h
let store_buffer_aux_down64_mem2 (ptr:int) (v:nat64) (h:vale_heap{writeable_mem64 ptr h})
: Lemma
(ensures (
let h1 = store_mem (TUInt64) ptr v h in
let mem2 = I.down_mem (_ih h1) in
S.get_heap_val64 ptr mem2 == v))
= let t = TUInt64 in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let h1 = store_mem t ptr v h in
let mem2 = I.down_mem (_ih h1) in
store_buffer_write t ptr v h;
assert (Seq.index (buffer_as_seq h1 b) i == v);
index64_get_heap_val64 h1 b mem2 i
let in_bounds64 (h:vale_heap) (b:buffer64) (i:nat{i < buffer_length b})
: Lemma (scale8 i + 8 <= DV.length (get_downview b.bsrc))
=
length_t_eq TUInt64 b
let bytes_valid64 ptr h =
reveal_opaque (`%S.valid_addr64) S.valid_addr64;
let t = TUInt64 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
in_bounds64 h b i;
I.addrs_set_mem (_ih h) b ptr;
I.addrs_set_mem (_ih h) b (ptr+1);
I.addrs_set_mem (_ih h) b (ptr+2);
I.addrs_set_mem (_ih h) b (ptr+3);
I.addrs_set_mem (_ih h) b (ptr+4);
I.addrs_set_mem (_ih h) b (ptr+5);
I.addrs_set_mem (_ih h) b (ptr+6);
I.addrs_set_mem (_ih h) b (ptr+7)
val same_mem_get_heap_val128 (b:buffer128)
(i:nat{i < buffer_length b})
(v:quad32)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1)) /\ buffer_writeable b})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (let ptr = buffer_addr b h1 + scale16 k in
forall i.{:pattern mem1.[i]} i >= ptr /\ i < ptr+16 ==> mem1.[i] == mem2.[i]))
val same_mem_eq_slices128 (b:buffer128)
(i:nat{i < buffer_length b})
(v:quad32)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1)) /\ buffer_writeable b})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (
k * 16 + 16 <= DV.length (get_downview b.bsrc) /\
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) (get_downview b.bsrc)) (k * 16) (k * 16 + 16) ==
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) (get_downview b.bsrc)) (k * 16) (k * 16 + 16)))
let same_mem_eq_slices128 b i v k h1 h2 mem1 mem2 =
let t = TUInt128 in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db (uint_view t) in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.length_eq ub
let length_up128 (b:buffer128) (h:vale_heap) (k:nat{k < buffer_length b}) (i:nat{i < 16}) : Lemma
(scale16 k + i <= DV.length (get_downview b.bsrc)) =
let vb = UV.mk_buffer (get_downview b.bsrc) uint128_view in
UV.length_eq vb
let same_mem_get_heap_val128 b j v k h1 h2 mem1 mem2 =
let ptr = buffer_addr b h1 + scale16 k in
let addr = buffer_addr b h1 in
let aux (i:nat{ptr <= i /\ i < ptr+16}) : Lemma (mem1.[i] == mem2.[i]) =
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db uint128_view in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
same_mem_eq_slices128 b j v k h1 h2 mem1 mem2;
let s1 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16) (k * 16 + 16)) in
let s2 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16) (k * 16 + 16)) in
assert (Seq.index s1 (i - ptr) == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16 + (i-ptr)));
length_up128 b h1 k (i-ptr);
assert (mem1.[i] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16 + (i-ptr))));
assert (Seq.index s2 (i-ptr) == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16 + (i-ptr)));
length_up128 b h2 k (i-ptr);
assert (mem2.[addr+(scale16 k + (i-ptr))] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16 + (i-ptr))));
assert (forall i. addr + (scale16 k + (i-ptr)) == i)
in
Classical.forall_intro aux
let in_bounds128 (h:vale_heap) (b:buffer128) (i:nat{i < buffer_length b}) : Lemma
(scale16 i + 16 <= DV.length (get_downview b.bsrc)) | false | false | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val in_bounds128 (h: vale_heap) (b: buffer128) (i: nat{i < buffer_length b})
: Lemma (scale16 i + 16 <= DV.length (get_downview b.bsrc)) | [] | Vale.X64.Memory_Sems.in_bounds128 | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
h: Vale.Arch.HeapImpl.vale_heap ->
b: Vale.X64.Memory.buffer128 ->
i: Prims.nat{i < Vale.X64.Memory.buffer_length b}
-> FStar.Pervasives.Lemma
(ensures
Vale.X64.Memory.scale16 i + 16 <=
LowStar.BufferView.Down.length (Vale.Interop.Types.get_downview (Buffer?.bsrc b))) | {
"end_col": 24,
"end_line": 534,
"start_col": 2,
"start_line": 534
} |
FStar.Pervasives.Lemma | val lemma_get_upd_heap (h:vale_heap) (m:S.machine_heap) : Lemma
(requires is_machine_heap_update (get_heap h) m)
(ensures get_heap (upd_heap h m) == m) | [
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let lemma_get_upd_heap h m = I.up_down_identity (_ih h) m | val lemma_get_upd_heap (h:vale_heap) (m:S.machine_heap) : Lemma
(requires is_machine_heap_update (get_heap h) m)
(ensures get_heap (upd_heap h m) == m)
let lemma_get_upd_heap h m = | false | null | true | I.up_down_identity (_ih h) m | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [
"lemma"
] | [
"Vale.Arch.HeapImpl.vale_heap",
"Vale.Arch.MachineHeap_s.machine_heap",
"Vale.Interop.up_down_identity",
"Vale.Arch.HeapImpl._ih",
"Prims.unit"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base
let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m)
let lemma_same_domains h m1 m2 = ()
let get_heap h = I.down_mem (_ih h)
let upd_heap h m = mi_heap_upd h m
//let lemma_upd_get_heap h = I.down_up_identity (_ih h) | false | false | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val lemma_get_upd_heap (h:vale_heap) (m:S.machine_heap) : Lemma
(requires is_machine_heap_update (get_heap h) m)
(ensures get_heap (upd_heap h m) == m) | [] | Vale.X64.Memory_Sems.lemma_get_upd_heap | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | h: Vale.Arch.HeapImpl.vale_heap -> m: Vale.Arch.MachineHeap_s.machine_heap
-> FStar.Pervasives.Lemma
(requires Vale.Arch.MachineHeap_s.is_machine_heap_update (Vale.X64.Memory_Sems.get_heap h) m)
(ensures Vale.X64.Memory_Sems.get_heap (Vale.X64.Memory_Sems.upd_heap h m) == m) | {
"end_col": 57,
"end_line": 31,
"start_col": 29,
"start_line": 31
} |
FStar.Pervasives.Lemma | val low_lemma_load_mem128_lo64 (b:buffer128) (i:nat) (h:vale_heap) : Lemma
(requires
i < Seq.length (buffer_as_seq h b) /\
buffer_readable h b
)
(ensures
S.get_heap_val64 (buffer_addr b h + scale16 i) (get_heap h) ==
lo64 (buffer_read b i h)
) | [
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Two_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.BufferViewStore",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let low_lemma_load_mem128_lo64 b i h =
low_lemma_load_mem128 b i h;
lo64_reveal ();
S.get_heap_val128_reveal ();
S.get_heap_val64_reveal ();
S.get_heap_val32_reveal () | val low_lemma_load_mem128_lo64 (b:buffer128) (i:nat) (h:vale_heap) : Lemma
(requires
i < Seq.length (buffer_as_seq h b) /\
buffer_readable h b
)
(ensures
S.get_heap_val64 (buffer_addr b h + scale16 i) (get_heap h) ==
lo64 (buffer_read b i h)
)
let low_lemma_load_mem128_lo64 b i h = | false | null | true | low_lemma_load_mem128 b i h;
lo64_reveal ();
S.get_heap_val128_reveal ();
S.get_heap_val64_reveal ();
S.get_heap_val32_reveal () | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [
"lemma"
] | [
"Vale.X64.Memory.buffer128",
"Prims.nat",
"Vale.Arch.HeapImpl.vale_heap",
"Vale.Arch.MachineHeap_s.get_heap_val32_reveal",
"Prims.unit",
"Vale.Arch.MachineHeap_s.get_heap_val64_reveal",
"Vale.Arch.MachineHeap_s.get_heap_val128_reveal",
"Vale.Arch.Types.lo64_reveal",
"Vale.X64.Memory_Sems.low_lemma_load_mem128"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base
let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m)
let lemma_same_domains h m1 m2 = ()
let get_heap h = I.down_mem (_ih h)
let upd_heap h m = mi_heap_upd h m
//let lemma_upd_get_heap h = I.down_up_identity (_ih h)
let lemma_get_upd_heap h m = I.up_down_identity (_ih h) m
let lemma_heap_impl = ()
let lemma_heap_get_heap h = ()
let lemma_heap_taint h = ()
//let lemma_heap_upd_heap h mh mt = ()
[@"opaque_to_smt"]
let rec set_of_range (a:int) (n:nat) : Pure (Set.set int)
(requires True)
(ensures fun s -> (forall (i:int).{:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n))
=
if n = 0 then Set.empty else Set.union (set_of_range a (n - 1)) (Set.singleton (a + n - 1))
let buffer_info_has_addr (bi:buffer_info) (a:int) =
let b = bi.bi_buffer in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
addr <= a /\ a < addr + len
let buffer_info_has_addr_opt (bi:option buffer_info) (a:int) =
match bi with
| None -> False
| Some bi -> buffer_info_has_addr bi a
#set-options "--z3rlimit 100"
let rec make_owns_rec (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
if n = 0 then ((fun _ -> None), (fun _ -> Set.empty)) else
let (m0, s0) = make_owns_rec h bs (n - 1) in
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let s i = if i = hi then Set.union (s0 i) s_b else s0 i in
let m a = if addr <= a && a < addr + len then Some (n - 1) else m0 a in
(m, s)
[@"opaque_to_smt"]
let make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
make_owns_rec h bs n
let rec lemma_make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n <= Seq.length bs /\
(forall (i:nat).{:pattern Seq.index bs i} i < Seq.length bs ==> buffer_readable h (Seq.index bs i).bi_buffer) /\
(forall (i1 i2:nat).{:pattern (Seq.index bs i1); (Seq.index bs i2)}
i1 < Seq.length bs /\ i2 < Seq.length bs ==> buffer_info_disjoint (Seq.index bs i1) (Seq.index bs i2))
)
(ensures (
let (m, s) = make_owns h bs n in
(forall (i:heaplet_id) (a:int).{:pattern Set.mem a (s i)}
(Set.mem a (s i) <==> Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (m a) == Some i) /\
(Set.mem a (s i) ==> buffer_info_has_addr_opt (Option.mapTot (fun n -> Seq.index bs n) (m a)) a) /\
(Set.mem a (s i) ==> Set.mem a (Map.domain h.mh))
) /\
(forall (k:nat) (a:int).{:pattern Set.mem a (s (Seq.index bs k).bi_heaplet)}
k < n /\ buffer_info_has_addr (Seq.index bs k) a ==> Set.mem a (s (Seq.index bs k).bi_heaplet))
))
=
reveal_opaque (`%make_owns) make_owns;
if n = 0 then () else
let _ = make_owns h bs (n - 1) in
let (m, s) = make_owns h bs n in
lemma_make_owns h bs (n - 1);
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let lem1 (a:int) : Lemma
(requires Set.mem a s_b)
(ensures Set.mem a (Map.domain h.mh))
[SMTPat (Set.mem a (Map.domain h.mh))]
=
I.addrs_set_mem h.ih b a
in
let lem2 (i:heaplet_id) (a:int) : Lemma
(requires i =!= hi /\ Set.mem a (Set.intersect s_b (s i)))
(ensures False)
[SMTPat (Set.mem a (s i))]
=
reveal_opaque (`%addr_map_pred) addr_map_pred
in
()
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 2 --max_ifuel 2"
let rec lemma_loc_mutable_buffers_rec (l:list buffer_info) (s:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n + List.length l == Seq.length s /\
list_to_seq_post l s n
)
(ensures (
let modloc = loc_mutable_buffers l in
forall (i:nat).{:pattern Seq.index s i} n <= i /\ i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes modloc (loc_buffer bi.bi_buffer))
))
(decreases l)
=
match l with
| [] -> ()
| h::t -> lemma_loc_mutable_buffers_rec t s (n + 1)
#pop-options
let lemma_loc_mutable_buffers (l:list buffer_info) : Lemma
(ensures (
let s = list_to_seq l in
forall (i:nat).{:pattern Seq.index s i} i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes (loc_mutable_buffers l) (loc_buffer bi.bi_buffer))
))
=
lemma_list_to_seq l;
lemma_loc_mutable_buffers_rec l (list_to_seq l) 0
let create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let modloc = loc_mutable_buffers buffers in
let layout1 = h1.vf_layout in
let layin1 = layout1.vl_inner in
let (hmap, hsets) = make_owns h1.vf_heap bs (Seq.length bs) in
let hmap a = Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (hmap a) in
let l = {
vl_heaplets_initialized = true;
vl_heaplet_map = hmap;
vl_heaplet_sets = hsets;
vl_old_heap = h1.vf_heap;
vl_buffers = bs;
vl_mod_loc = modloc;
} in
let layout2 = {layout1 with vl_inner = l} in
let h2 = {
vf_layout = layout2;
vf_heap = h1.vf_heap;
vf_heaplets = h1.vf_heaplets;
} in
h2
let lemma_create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let h2 = create_heaplets buffers h1 in
assert (h2.vf_layout.vl_inner.vl_buffers == bs); // REVIEW: why is this necessary, even with extra ifuel?
lemma_make_owns h1.vf_heap bs (Seq.length bs);
lemma_loc_mutable_buffers buffers;
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
()
let destroy_heaplets h1 =
h1
let lemma_destroy_heaplets h1 =
()
val heap_shift (m1 m2:S.machine_heap) (base:int) (n:nat) : Lemma
(requires (forall i. 0 <= i /\ i < n ==> m1.[base + i] == m2.[base + i]))
(ensures (forall i. {:pattern (m1.[i])} base <= i /\ i < base + n ==> m1.[i] == m2.[i]))
#push-options "--smtencoding.l_arith_repr boxwrap"
let heap_shift m1 m2 base n =
assert (forall i. base <= i /\ i < base + n ==>
m1.[base + (i - base)] == m2.[base + (i - base)])
#pop-options
val same_mem_eq_slices64 (b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1))})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (
k * 8 + 8 <= DV.length (get_downview b.bsrc) /\
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) (get_downview b.bsrc)) (k * 8) (k * 8 + 8) ==
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) (get_downview b.bsrc)) (k * 8) (k * 8 + 8)))
let same_mem_eq_slices64 b i v k h1 h2 mem1 mem2 =
let t = TUInt64 in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db (uint_view t) in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.length_eq ub
let length_up64 (b:buffer64) (h:vale_heap) (k:nat{k < buffer_length b}) (i:nat{i < 8}) : Lemma
(scale8 k + i <= DV.length (get_downview b.bsrc)) =
let vb = UV.mk_buffer (get_downview b.bsrc) uint64_view in
UV.length_eq vb
val same_mem_get_heap_val64 (b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1))})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (let ptr = buffer_addr b h1 + scale8 k in
forall (x:int).{:pattern (mem1.[x])} ptr <= x /\ x < ptr + 8 ==> mem1.[x] == mem2.[x]))
let same_mem_get_heap_val64 b j v k h1 h2 mem1 mem2 =
let ptr = buffer_addr b h1 + scale8 k in
let addr = buffer_addr b h1 in
let aux (x:int{ptr <= x /\ x < ptr + 8}) : Lemma (mem1.[x] == mem2.[x]) =
let i = x - ptr in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db uint64_view in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
same_mem_eq_slices64 b j v k h1 h2 mem1 mem2;
let s1 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8) (k * 8 + 8)) in
let s2 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8) (k * 8 + 8)) in
assert (Seq.index s1 i == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8 + i));
length_up64 b h1 k i;
assert (mem1.[addr+(scale8 k + i)] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8 + i)));
assert (Seq.index s2 i == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8 + i));
length_up64 b h2 k i;
assert (mem2.[addr+(scale8 k + i)] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8 + i)))
in
Classical.forall_intro aux;
assert (forall i. addr + (scale8 k + i) == ptr + i)
let rec written_buffer_down64_aux1
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
(base:nat{base == buffer_addr b h})
(k:nat) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j.{:pattern (mem1.[j]) \/ (mem2.[j])}
base <= j /\ j < base + k * 8 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem1.[j])}
j >= base /\ j < base + scale8 i ==>
mem1.[j] == mem2.[j]))
(decreases %[i-k]) =
if k >= i then ()
else begin
let ptr = base + scale8 k in
same_mem_get_heap_val64 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 8;
written_buffer_down64_aux1 b i v h base (k+1) h1 mem1 mem2
end
let rec written_buffer_down64_aux2
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
(base:nat{base == buffer_addr b h})
(n:nat{n == buffer_length b})
(k:nat{k > i}) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
base + scale8 (i+1) <= j /\ j < base + k * 8 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
j >= base + scale8 (i+1) /\ j < base + scale8 n ==>
mem1.[j] == mem2.[j]))
(decreases %[n-k]) =
if k >= n then ()
else begin
let ptr = base + scale8 k in
same_mem_get_heap_val64 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 8;
written_buffer_down64_aux2 b i v h base n (k+1) h1 mem1 mem2
end
let written_buffer_down64 (b:buffer64{buffer_writeable b}) (i:nat{i < buffer_length b}) (v:nat64) (h:vale_heap)
: Lemma
(requires List.memP b (IB.ptrs_of_mem (_ih h)))
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
(base <= j /\ j < base + scale8 i) \/
(base + scale8 (i+1) <= j /\ j < base + scale8 n) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
written_buffer_down64_aux1 b i v h base 0 h1 mem1 mem2;
written_buffer_down64_aux2 b i v h base n (i+1) h1 mem1 mem2
let unwritten_buffer_down (t:base_typ) (b:buffer t{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:base_typ_as_vale_type t)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
forall (a:b8{List.memP a (IB.ptrs_of_mem (_ih h)) /\ a =!= b}) j. {:pattern mem1.[j]; List.memP a (IB.ptrs_of_mem (_ih h)) \/ mem2.[j]; List.memP a (IB.ptrs_of_mem (_ih h))}
let base = (IB.addrs_of_mem (_ih h)) a in
j >= base /\ j < base + DV.length (get_downview a.bsrc) ==> mem1.[j] == mem2.[j]))
= let aux (a:b8{a =!= b /\ List.memP a (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let base = (IB.addrs_of_mem (_ih h)) a in
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
forall j.
j >= base /\ j < base + DV.length (get_downview a.bsrc) ==>
mem1.[j] == mem2.[j]))
= let db = get_downview a.bsrc in
if DV.length db = 0 then ()
else
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = (IB.addrs_of_mem (_ih h)) a in
let s0 = DV.as_seq (IB.hs_of_mem (_ih h)) db in
let s1 = DV.as_seq (IB.hs_of_mem (_ih h1)) db in
opaque_assert (`%IB.list_disjoint_or_eq) IB.list_disjoint_or_eq IB.list_disjoint_or_eq_def (MB.disjoint a.bsrc b.bsrc);
lemma_dv_equal (IB.down_view a.src) a.bsrc (IB.hs_of_mem (_ih h)) (IB.hs_of_mem (_ih h1));
assert (Seq.equal s0 s1);
assert (forall (j:int).{:pattern (mem1.[j])}
base <= j /\ j < base + Seq.length s0 ==> v_to_typ TUInt8 (Seq.index s0 (j - base)) == mem1.[j]);
heap_shift mem1 mem2 base (DV.length db)
in
Classical.forall_intro aux
let store_buffer_down64_mem
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
forall (j:int). {:pattern mem1.[j] \/ mem2.[j]}
j < base + scale8 i \/ j >= base + scale8 (i+1) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
let aux (j:int)
: Lemma
(j < base + scale8 i \/ j >= base + scale8 (i+1) ==>
mem1.[j] == mem2.[j])
=
I.addrs_set_lemma_all ();
if j >= base && j < base + DV.length (get_downview b.bsrc) then begin
written_buffer_down64 b i v h;
length_t_eq (TUInt64) b
end
else if not (I.valid_addr (_ih h) j)
then I.same_unspecified_down (IB.hs_of_mem (_ih h)) (IB.hs_of_mem (_ih h1)) (IB.ptrs_of_mem (_ih h))
else unwritten_buffer_down TUInt64 b i v h
in
Classical.forall_intro aux
let store_buffer_aux_down64_mem (ptr:int) (v:nat64) (h:vale_heap{writeable_mem64 ptr h})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = store_mem (TUInt64) ptr v h in
let mem2 = I.down_mem (_ih h1) in
forall j. {:pattern mem1.[j] \/ mem2.[j]}
j < ptr \/ j >= ptr + 8 ==>
mem1.[j] == mem2.[j]))
= let t = TUInt64 in
let h1 = store_mem t ptr v h in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
store_buffer_write t ptr v h;
assert (buffer_addr b h + scale8 i == ptr);
assert (buffer_addr b h + scale8 (i+1) == ptr + 8);
store_buffer_down64_mem b i v h
let store_buffer_aux_down64_mem2 (ptr:int) (v:nat64) (h:vale_heap{writeable_mem64 ptr h})
: Lemma
(ensures (
let h1 = store_mem (TUInt64) ptr v h in
let mem2 = I.down_mem (_ih h1) in
S.get_heap_val64 ptr mem2 == v))
= let t = TUInt64 in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let h1 = store_mem t ptr v h in
let mem2 = I.down_mem (_ih h1) in
store_buffer_write t ptr v h;
assert (Seq.index (buffer_as_seq h1 b) i == v);
index64_get_heap_val64 h1 b mem2 i
let in_bounds64 (h:vale_heap) (b:buffer64) (i:nat{i < buffer_length b})
: Lemma (scale8 i + 8 <= DV.length (get_downview b.bsrc))
=
length_t_eq TUInt64 b
let bytes_valid64 ptr h =
reveal_opaque (`%S.valid_addr64) S.valid_addr64;
let t = TUInt64 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
in_bounds64 h b i;
I.addrs_set_mem (_ih h) b ptr;
I.addrs_set_mem (_ih h) b (ptr+1);
I.addrs_set_mem (_ih h) b (ptr+2);
I.addrs_set_mem (_ih h) b (ptr+3);
I.addrs_set_mem (_ih h) b (ptr+4);
I.addrs_set_mem (_ih h) b (ptr+5);
I.addrs_set_mem (_ih h) b (ptr+6);
I.addrs_set_mem (_ih h) b (ptr+7)
val same_mem_get_heap_val128 (b:buffer128)
(i:nat{i < buffer_length b})
(v:quad32)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1)) /\ buffer_writeable b})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (let ptr = buffer_addr b h1 + scale16 k in
forall i.{:pattern mem1.[i]} i >= ptr /\ i < ptr+16 ==> mem1.[i] == mem2.[i]))
val same_mem_eq_slices128 (b:buffer128)
(i:nat{i < buffer_length b})
(v:quad32)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1)) /\ buffer_writeable b})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (
k * 16 + 16 <= DV.length (get_downview b.bsrc) /\
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) (get_downview b.bsrc)) (k * 16) (k * 16 + 16) ==
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) (get_downview b.bsrc)) (k * 16) (k * 16 + 16)))
let same_mem_eq_slices128 b i v k h1 h2 mem1 mem2 =
let t = TUInt128 in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db (uint_view t) in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.length_eq ub
let length_up128 (b:buffer128) (h:vale_heap) (k:nat{k < buffer_length b}) (i:nat{i < 16}) : Lemma
(scale16 k + i <= DV.length (get_downview b.bsrc)) =
let vb = UV.mk_buffer (get_downview b.bsrc) uint128_view in
UV.length_eq vb
let same_mem_get_heap_val128 b j v k h1 h2 mem1 mem2 =
let ptr = buffer_addr b h1 + scale16 k in
let addr = buffer_addr b h1 in
let aux (i:nat{ptr <= i /\ i < ptr+16}) : Lemma (mem1.[i] == mem2.[i]) =
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db uint128_view in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
same_mem_eq_slices128 b j v k h1 h2 mem1 mem2;
let s1 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16) (k * 16 + 16)) in
let s2 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16) (k * 16 + 16)) in
assert (Seq.index s1 (i - ptr) == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16 + (i-ptr)));
length_up128 b h1 k (i-ptr);
assert (mem1.[i] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16 + (i-ptr))));
assert (Seq.index s2 (i-ptr) == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16 + (i-ptr)));
length_up128 b h2 k (i-ptr);
assert (mem2.[addr+(scale16 k + (i-ptr))] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16 + (i-ptr))));
assert (forall i. addr + (scale16 k + (i-ptr)) == i)
in
Classical.forall_intro aux
let in_bounds128 (h:vale_heap) (b:buffer128) (i:nat{i < buffer_length b}) : Lemma
(scale16 i + 16 <= DV.length (get_downview b.bsrc))
=
length_t_eq TUInt128 b
#push-options "--z3rlimit 20"
#restart-solver
let bytes_valid128 ptr h =
reveal_opaque (`%S.valid_addr128) S.valid_addr128;
IB.list_disjoint_or_eq_reveal ();
let t = TUInt128 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
in_bounds128 h b i;
I.addrs_set_mem (_ih h) b ptr;
I.addrs_set_mem (_ih h) b (ptr+1);
I.addrs_set_mem (_ih h) b (ptr+2);
I.addrs_set_mem (_ih h) b (ptr+3);
I.addrs_set_mem (_ih h) b (ptr+4);
I.addrs_set_mem (_ih h) b (ptr+5);
I.addrs_set_mem (_ih h) b (ptr+6);
I.addrs_set_mem (_ih h) b (ptr+7);
I.addrs_set_mem (_ih h) b (ptr+8);
I.addrs_set_mem (_ih h) b (ptr+9);
I.addrs_set_mem (_ih h) b (ptr+10);
I.addrs_set_mem (_ih h) b (ptr+11);
I.addrs_set_mem (_ih h) b (ptr+12);
I.addrs_set_mem (_ih h) b (ptr+13);
I.addrs_set_mem (_ih h) b (ptr+14);
I.addrs_set_mem (_ih h) b (ptr+15)
#pop-options
let equiv_load_mem64 ptr h =
let t = TUInt64 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let addr = buffer_addr b h in
let contents = DV.as_seq (_ih h).IB.hs (get_downview b.bsrc) in
let heap = get_heap h in
index64_get_heap_val64 h b heap i;
lemma_load_mem64 b i h
//let low_lemma_valid_mem64 b i h =
// lemma_valid_mem64 b i h;
// bytes_valid64 (buffer_addr b h + scale8 i) h
//let low_lemma_load_mem64 b i h =
// lemma_valid_mem64 b i h;
// lemma_load_mem64 b i h;
// equiv_load_mem64 (buffer_addr b h + scale8 i) h
//let same_domain_update64 b i v h =
// low_lemma_valid_mem64 b i h;
// Vale.Arch.MachineHeap.same_domain_update64 (buffer_addr b h + scale8 i) v (get_heap h)
open Vale.X64.BufferViewStore
let low_lemma_store_mem64_aux
(b:buffer64)
(heap:S.machine_heap)
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{buffer_readable h b /\ buffer_writeable b})
: Lemma
(requires IB.correct_down_p (_ih h) heap b)
(ensures (let heap' = S.update_heap64 (buffer_addr b h + scale8 i) v heap in
let h' = store_mem64 (buffer_addr b h + scale8 i) v h in
(_ih h').IB.hs == DV.upd_seq (_ih h).IB.hs (get_downview b.bsrc) (I.get_seq_heap heap' (_ih h).IB.addrs b))) =
let ptr = buffer_addr b h + scale8 i in
let heap' = S.update_heap64 ptr v heap in
let h' = store_mem64 ptr v h in
lemma_store_mem64 b i v h;
length_t_eq TUInt64 b;
bv_upd_update_heap64 b heap i v (_ih h);
let db = get_downview b.bsrc in
let bv = UV.mk_buffer db Vale.Interop.Views.up_view64 in
assert (UV.upd (_ih h).IB.hs bv i (UInt64.uint_to_t v) == (_ih h').IB.hs)
val valid_state_store_mem64_aux: (i:nat) -> (v:nat64) -> (h:vale_heap) -> Lemma
(requires writeable_mem64 i h)
(ensures (
let heap = get_heap h in
let heap' = S.update_heap64 i v heap in
let h' = store_mem64 i v h in
heap' == I.down_mem (_ih h')
))
let valid_state_store_mem64_aux i v h =
let heap = get_heap h in
let heap' = S.update_heap64 i v heap in
let h1 = store_mem TUInt64 i v h in
store_buffer_aux_down64_mem i v h;
store_buffer_aux_down64_mem2 i v h;
let mem1 = heap' in
let mem2 = I.down_mem (_ih h1) in
let aux () : Lemma (forall j. mem1.[j] == mem2.[j]) =
Vale.Arch.MachineHeap.same_mem_get_heap_val64 i mem1 mem2;
Vale.Arch.MachineHeap.correct_update_get64 i v heap;
Vale.Arch.MachineHeap.frame_update_heap64 i v heap
in let aux2 () : Lemma (Set.equal (Map.domain mem1) (Map.domain mem2)) =
bytes_valid64 i h;
Vale.Arch.MachineHeap.same_domain_update64 i v heap
in aux(); aux2();
Map.lemma_equal_intro mem1 mem2
let low_lemma_load_mem64_full b i vfh t hid =
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
()
#push-options "--z3rlimit 20"
#restart-solver
let low_lemma_store_mem64 b i v h =
lemma_writeable_mem64 b i h;
lemma_store_mem64 b i v h;
valid_state_store_mem64_aux (buffer_addr b h + scale8 i) v h;
let heap = get_heap h in
let heap' = S.update_heap64 (buffer_addr b h + scale8 i) v heap in
low_lemma_store_mem64_aux b heap i v h;
Vale.Arch.MachineHeap.frame_update_heap64 (buffer_addr b h + scale8 i) v heap;
in_bounds64 h b i;
I.addrs_set_lemma_all ();
I.update_buffer_up_mem (_ih h) b heap heap'
#pop-options
#set-options "--z3rlimit 100"
#restart-solver
let lemma_is_full_update
(vfh:vale_full_heap) (h hk hk':vale_heap) (k:heaplet_id) (mh mh' mhk mhk':machine_heap) (mt mt':memtaint)
(t:base_typ) (b:buffer t) (ptr:int) (v_size:nat)
(index:nat) (v:base_typ_as_vale_type t) (tn:taint)
: Lemma
(requires
vfh.vf_layout.vl_inner.vl_heaplets_initialized /\
mem_inv vfh /\
buffer_readable hk b /\
buffer_writeable b /\
index < Seq.length (buffer_as_seq hk b) /\
mt == vfh.vf_layout.vl_taint /\
h == vfh.vf_heap /\
hk == Map16.sel vfh.vf_heaplets k /\
mh == h.mh /\
mhk == hk.mh /\
ptr == buffer_addr b hk + scale_by v_size index /\
mt' == S.update_n ptr v_size (heap_taint (coerce vfh)) tn /\
hk' == buffer_write b index v hk /\
valid_layout_buffer b vfh.vf_layout hk true /\
valid_taint_buf b hk mt tn /\
is_machine_heap_update mh mh' /\ upd_heap h mh' == buffer_write b index v h /\
is_machine_heap_update mhk mhk' /\ upd_heap hk mhk' == buffer_write b index v hk /\
(forall j.{:pattern mh.[j] \/ mh'.[j]} j < ptr \/ j >= ptr + v_size ==> mh.[j] == mh'.[j]) /\
(forall j.{:pattern mhk.[j] \/ mhk'.[j]} j < ptr \/ j >= ptr + v_size ==> mhk.[j] == mhk'.[j]) /\
0 <= scale_by v_size index /\ scale_by v_size index + v_size <= DV.length (get_downview b.bsrc) /\
(forall i.{:pattern mh'.[i] \/ mhk'.[i]} i >= ptr /\ i < ptr + v_size ==> mh'.[i] == mhk'.[i]) /\
True
)
(ensures is_full_update vfh hk' k mh' mt')
=
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
let vfh' = coerce (heap_upd (coerce vfh) mh' mt') in
let dom_upd = Set.intersect (vfh.vf_layout.vl_inner.vl_heaplet_sets k) (Map.domain mhk) in
let mhk'' = Map.concat mhk (Map.restrict dom_upd mh') in
assert (Map.equal mhk'' mhk');
let unchanged (j:heaplet_id) : Lemma
(requires j =!= k)
(ensures Map16.sel vfh'.vf_heaplets j == Map16.sel vfh.vf_heaplets j)
[SMTPat (Map16.sel vfh'.vf_heaplets j)]
=
assert (Map.equal (Map16.sel vfh'.vf_heaplets j).mh (Map16.sel vfh.vf_heaplets j).mh);
I.down_up_identity (Map16.sel vfh.vf_heaplets j).ih;
()
in
assert (Map16.equal vfh'.vf_heaplets (Map16.upd vfh.vf_heaplets k hk'));
assert (Map.equal mt' mt);
Vale.Interop.Heap_s.list_disjoint_or_eq_reveal ();
()
let low_lemma_store_mem64_full b i v vfh t hid =
let (h, mt, hk) = (vfh.vf_heap, vfh.vf_layout.vl_taint, Map16.get vfh.vf_heaplets hid) in
let ptr = buffer_addr b hk + scale8 i in
let mh' = S.update_heap64 ptr v (heap_get (coerce vfh)) in
let mt' = S.update_n ptr 8 (heap_taint (coerce vfh)) t in
let hk' = buffer_write b i v hk in
let mhk' = S.update_heap64 ptr v (get_heap hk) in
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
low_lemma_store_mem64 b i v h;
low_lemma_store_mem64 b i v (Map16.get vfh.vf_heaplets hid);
Vale.Arch.MachineHeap.frame_update_heap64 ptr v h.mh;
Vale.Arch.MachineHeap.frame_update_heap64 ptr v hk.mh;
in_bounds64 hk b i;
Vale.Arch.MachineHeap.same_mem_get_heap_val64 ptr mh' mhk';
lemma_is_full_update vfh h hk hk' hid h.mh mh' hk.mh mhk' mt mt' TUInt64 b ptr 8 i v t;
()
val low_lemma_valid_mem128 (b:buffer128) (i:nat) (h:vale_heap) : Lemma
(requires
i < Seq.length (buffer_as_seq h b) /\
buffer_readable h b
)
(ensures
S.valid_addr128 (buffer_addr b h + scale16 i) (get_heap h)
)
let low_lemma_valid_mem128 b i h =
lemma_valid_mem128 b i h;
bytes_valid128 (buffer_addr b h + scale16 i) h
val equiv_load_mem128_aux: (ptr:int) -> (h:vale_heap) -> Lemma
(requires valid_mem128 ptr h)
(ensures load_mem128 ptr h == S.get_heap_val128 ptr (get_heap h))
let equiv_load_mem128_aux ptr h =
let t = TUInt128 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let addr = buffer_addr b h in
let contents = DV.as_seq (_ih h).IB.hs (get_downview b.bsrc) in
let heap = get_heap h in
S.get_heap_val128_reveal ();
index128_get_heap_val128 h b heap i;
lemma_load_mem128 b i h
let equiv_load_mem128 ptr h =
equiv_load_mem128_aux ptr h
val low_lemma_load_mem128 (b:buffer128) (i:nat) (h:vale_heap) : Lemma
(requires
i < Seq.length (buffer_as_seq h b) /\
buffer_readable h b
)
(ensures
S.get_heap_val128 (buffer_addr b h + scale16 i) (get_heap h) == buffer_read b i h
)
let low_lemma_load_mem128 b i h =
lemma_valid_mem128 b i h;
lemma_load_mem128 b i h;
equiv_load_mem128_aux (buffer_addr b h + scale16 i) h
//let same_domain_update128 b i v h =
// low_lemma_valid_mem128 b i h;
// Vale.Arch.MachineHeap.same_domain_update128 (buffer_addr b h + scale16 i) v (get_heap h)
let low_lemma_store_mem128_aux
(b:buffer128)
(heap:S.machine_heap)
(i:nat{i < buffer_length b})
(v:quad32)
(h:vale_heap{buffer_readable h b /\ buffer_writeable b})
: Lemma
(requires IB.correct_down_p (_ih h) heap b)
(ensures (let heap' = S.update_heap128 (buffer_addr b h + scale16 i) v heap in
let h' = store_mem128 (buffer_addr b h + scale16 i) v h in
(_ih h').IB.hs == DV.upd_seq (_ih h).IB.hs (get_downview b.bsrc) (I.get_seq_heap heap' (_ih h).IB.addrs b))) =
let ptr = buffer_addr b h + scale16 i in
let heap' = S.update_heap128 ptr v heap in
let h' = store_mem128 ptr v h in
lemma_store_mem128 b i v h;
length_t_eq TUInt128 b;
bv_upd_update_heap128 b heap i v (_ih h);
let db = get_downview b.bsrc in
let bv = UV.mk_buffer db Vale.Interop.Views.up_view128 in
assert (UV.upd (_ih h).IB.hs bv i v == (_ih h').IB.hs)
val valid_state_store_mem128_aux (i:int) (v:quad32) (h:vale_heap) : Lemma
(requires writeable_mem128 i h)
(ensures (
let heap = get_heap h in
let heap' = S.update_heap128 i v heap in
let h' = store_mem128 i v h in
heap' == I.down_mem (_ih h')
))
#restart-solver
let rec written_buffer_down128_aux1
(b:buffer128{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:quad32)
(h:vale_heap{List.memP b (_ih h).IB.ptrs})
(base:nat{base == buffer_addr b h})
(k:nat) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j.{:pattern (mem1.[j]) \/ (mem2.[j])}
base <= j /\ j < base + k * 16 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem1.[j])}
j >= base /\ j < base + scale16 i ==>
mem1.[j] == mem2.[j]))
(decreases %[i-k]) =
if k >= i then ()
else begin
let ptr = base + scale16 k in
same_mem_get_heap_val128 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 16;
written_buffer_down128_aux1 b i v h base (k+1) h1 mem1 mem2
end
#restart-solver
let rec written_buffer_down128_aux2
(b:buffer128{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:quad32)
(h:vale_heap{List.memP b (_ih h).IB.ptrs})
(base:nat{base == buffer_addr b h})
(n:nat{n == buffer_length b})
(k:nat{k > i}) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
base + scale16 (i+1) <= j /\ j < base + k * 16 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
j >= base + scale16 (i+1) /\ j < base + scale16 n ==>
mem1.[j] == mem2.[j]))
(decreases %[n-k]) =
if k >= n then ()
else begin
let ptr = base + scale16 k in
same_mem_get_heap_val128 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 16;
written_buffer_down128_aux2 b i v h base n (k+1) h1 mem1 mem2
end
let written_buffer_down128 (b:buffer128) (i:nat{i < buffer_length b}) (v:quad32) (h:vale_heap)
: Lemma
(requires List.memP b (_ih h).IB.ptrs /\ buffer_writeable b)
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
(base <= j /\ j < base + scale16 i) \/
(base + scale16 (i+1) <= j /\ j < base + scale16 n) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
written_buffer_down128_aux1 b i v h base 0 h1 mem1 mem2;
written_buffer_down128_aux2 b i v h base n (i+1) h1 mem1 mem2
let store_buffer_down128_mem
(b:buffer128{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:quad32)
(h:vale_heap{List.memP b (_ih h).IB.ptrs})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
forall (j:int). {:pattern mem1.[j] \/ mem2.[j]}
j < base + scale16 i \/ j >= base + scale16 (i+1) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
let aux (j:int)
: Lemma
(j < base + scale16 i \/ j >= base + scale16 (i+1) ==>
mem1.[j] == mem2.[j])
=
I.addrs_set_lemma_all ();
if j >= base && j < base + DV.length (get_downview b.bsrc) then begin
written_buffer_down128 b i v h;
length_t_eq TUInt128 b
end
else if not (I.valid_addr (_ih h) j)
then I.same_unspecified_down (_ih h).IB.hs (_ih h1).IB.hs (_ih h).IB.ptrs
else unwritten_buffer_down TUInt128 b i v h
in
Classical.forall_intro aux
let store_buffer_aux_down128_mem (ptr:int) (v:quad32) (h:vale_heap{writeable_mem128 ptr h})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = store_mem TUInt128 ptr v h in
let mem2 = I.down_mem (_ih h1) in
forall j. {:pattern mem1.[j] \/ mem2.[j]}
j < ptr \/ j >= ptr + 16 ==>
mem1.[j] == mem2.[j]))
= let t = TUInt128 in
let h1 = store_mem t ptr v h in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
store_buffer_write t ptr v h;
assert (buffer_addr b h + scale16 i == ptr);
assert (buffer_addr b h + scale16 (i+1) == ptr + 16);
store_buffer_down128_mem b i v h
let store_buffer_aux_down128_mem2 (ptr:int) (v:quad32) (h:vale_heap{writeable_mem128 ptr h})
: Lemma
(ensures (
let h1 = store_mem TUInt128 ptr v h in
let mem2 = I.down_mem (_ih h1) in
Mkfour
(S.get_heap_val32 ptr mem2)
(S.get_heap_val32 (ptr+4) mem2)
(S.get_heap_val32 (ptr+8) mem2)
(S.get_heap_val32 (ptr+12) mem2)
== v)) =
let t = TUInt128 in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let h1 = store_mem t ptr v h in
let mem2 = I.down_mem (_ih h1) in
store_buffer_write t ptr v h;
assert (Seq.index (buffer_as_seq h1 b) i == v);
index128_get_heap_val128 h1 b mem2 i
let valid_state_store_mem128_aux i v h =
let heap = get_heap h in
let heap' = S.update_heap128 i v heap in
let h1 = store_mem TUInt128 i v h in
store_buffer_aux_down128_mem i v h;
store_buffer_aux_down128_mem2 i v h;
let mem1 = heap' in
let mem2 = I.down_mem (_ih h1) in
let aux () : Lemma (forall j. mem1.[j] == mem2.[j]) =
Vale.Arch.MachineHeap.correct_update_get128 i v heap;
Vale.X64.Machine_Semantics_s.get_heap_val128_reveal ();
Vale.Arch.MachineHeap.same_mem_get_heap_val32 i mem1 mem2;
Vale.Arch.MachineHeap.same_mem_get_heap_val32 (i+4) mem1 mem2;
Vale.Arch.MachineHeap.same_mem_get_heap_val32 (i+8) mem1 mem2;
Vale.Arch.MachineHeap.same_mem_get_heap_val32 (i+12) mem1 mem2;
Vale.Arch.MachineHeap.frame_update_heap128 i v heap
in
let aux2 () : Lemma (Set.equal (Map.domain mem1) (Map.domain mem2)) =
bytes_valid128 i h;
Vale.Arch.MachineHeap.same_domain_update128 i v heap
in aux (); aux2 ();
Map.lemma_equal_intro mem1 mem2
let low_lemma_load_mem128_full b i vfh t hid =
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
()
let low_lemma_store_mem128 b i v h =
lemma_valid_mem128 b i h;
lemma_store_mem128 b i v h;
valid_state_store_mem128_aux (buffer_addr b h + scale16 i) v h;
let heap = get_heap h in
let heap' = S.update_heap128 (buffer_addr b h + scale16 i) v heap in
let h' = store_mem128 (buffer_addr b h + scale16 i) v h in
low_lemma_store_mem128_aux b heap i v h;
Vale.Arch.MachineHeap.frame_update_heap128 (buffer_addr b h + scale16 i) v heap;
in_bounds128 h b i;
I.addrs_set_lemma_all ();
I.update_buffer_up_mem (_ih h) b heap heap'
let low_lemma_store_mem128_full b i v vfh t hid =
let (h, mt, hk) = (vfh.vf_heap, vfh.vf_layout.vl_taint, Map16.get vfh.vf_heaplets hid) in
let ptr = buffer_addr b hk + scale16 i in
let mh' = S.update_heap128 ptr v (heap_get (coerce vfh)) in
let mt' = S.update_n ptr 16 (heap_taint (coerce vfh)) t in
let hk' = buffer_write b i v hk in
let mhk' = S.update_heap128 ptr v (get_heap hk) in
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
low_lemma_store_mem128 b i v h;
low_lemma_store_mem128 b i v (Map16.get vfh.vf_heaplets hid);
Vale.Arch.MachineHeap.frame_update_heap128 ptr v h.mh;
Vale.Arch.MachineHeap.frame_update_heap128 ptr v hk.mh;
in_bounds128 hk b i;
Vale.Arch.MachineHeap.same_mem_get_heap_val128 ptr mh' mhk';
lemma_is_full_update vfh h hk hk' hid h.mh mh' hk.mh mhk' mt mt' TUInt128 b ptr 16 i v t;
()
#push-options "--smtencoding.l_arith_repr boxwrap"
let low_lemma_valid_mem128_64 b i h =
reveal_opaque (`%S.valid_addr64) S.valid_addr64;
reveal_opaque (`%S.valid_addr128) S.valid_addr128;
low_lemma_valid_mem128 b i h;
let ptr = buffer_addr b h + scale16 i in
assert (buffer_addr b h + scale16 i + 8 = ptr + 8)
#pop-options
open Vale.Def.Words.Two_s
open Vale.Def.Words.Four_s | false | false | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val low_lemma_load_mem128_lo64 (b:buffer128) (i:nat) (h:vale_heap) : Lemma
(requires
i < Seq.length (buffer_as_seq h b) /\
buffer_readable h b
)
(ensures
S.get_heap_val64 (buffer_addr b h + scale16 i) (get_heap h) ==
lo64 (buffer_read b i h)
) | [] | Vale.X64.Memory_Sems.low_lemma_load_mem128_lo64 | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | b: Vale.X64.Memory.buffer128 -> i: Prims.nat -> h: Vale.Arch.HeapImpl.vale_heap
-> FStar.Pervasives.Lemma
(requires
i < FStar.Seq.Base.length (Vale.X64.Memory.buffer_as_seq h b) /\
Vale.X64.Memory.buffer_readable h b)
(ensures
Vale.Arch.MachineHeap_s.get_heap_val64 (Vale.X64.Memory.buffer_addr b h +
Vale.X64.Memory.scale16 i)
(Vale.X64.Memory_Sems.get_heap h) ==
Vale.Arch.Types.lo64 (Vale.X64.Memory.buffer_read b i h)) | {
"end_col": 28,
"end_line": 1026,
"start_col": 2,
"start_line": 1022
} |
FStar.Pervasives.Lemma | val update_heap128_lo (ptr: int) (v: quad32) (mem: S.machine_heap)
: Lemma
(requires
S.valid_addr128 ptr mem /\ v.hi2 == S.get_heap_val32 (ptr + 8) mem /\
v.hi3 == S.get_heap_val32 (ptr + 12) mem)
(ensures
S.update_heap128 ptr v mem ==
S.update_heap32 (ptr + 4) v.lo1 (S.update_heap32 ptr v.lo0 mem)) | [
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Two_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.BufferViewStore",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let update_heap128_lo (ptr:int) (v:quad32) (mem:S.machine_heap) : Lemma
(requires
S.valid_addr128 ptr mem /\
v.hi2 == S.get_heap_val32 (ptr+8) mem /\
v.hi3 == S.get_heap_val32 (ptr+12) mem
)
(ensures S.update_heap128 ptr v mem ==
S.update_heap32 (ptr+4) v.lo1 (S.update_heap32 ptr v.lo0 mem)) =
reveal_opaque (`%S.valid_addr128) S.valid_addr128;
S.update_heap128_reveal ();
let mem0 = S.update_heap32 ptr v.lo0 mem in
let mem1 = S.update_heap32 (ptr+4) v.lo1 mem0 in
Vale.Arch.MachineHeap.frame_update_heap32 ptr v.lo0 mem;
Vale.Arch.MachineHeap.frame_update_heap32 (ptr+4) v.lo1 mem0;
Vale.Arch.MachineHeap.same_domain_update32 ptr v.lo0 mem;
Vale.Arch.MachineHeap.same_domain_update32 (ptr+4) v.lo1 mem0;
frame_get_heap32 (ptr+8) mem mem1;
frame_get_heap32 (ptr+12) mem mem1;
Vale.Arch.MachineHeap.update_heap32_get_heap32 (ptr+8) mem1;
Vale.Arch.MachineHeap.update_heap32_get_heap32 (ptr+12) mem1 | val update_heap128_lo (ptr: int) (v: quad32) (mem: S.machine_heap)
: Lemma
(requires
S.valid_addr128 ptr mem /\ v.hi2 == S.get_heap_val32 (ptr + 8) mem /\
v.hi3 == S.get_heap_val32 (ptr + 12) mem)
(ensures
S.update_heap128 ptr v mem ==
S.update_heap32 (ptr + 4) v.lo1 (S.update_heap32 ptr v.lo0 mem))
let update_heap128_lo (ptr: int) (v: quad32) (mem: S.machine_heap)
: Lemma
(requires
S.valid_addr128 ptr mem /\ v.hi2 == S.get_heap_val32 (ptr + 8) mem /\
v.hi3 == S.get_heap_val32 (ptr + 12) mem)
(ensures
S.update_heap128 ptr v mem ==
S.update_heap32 (ptr + 4) v.lo1 (S.update_heap32 ptr v.lo0 mem)) = | false | null | true | reveal_opaque (`%S.valid_addr128) S.valid_addr128;
S.update_heap128_reveal ();
let mem0 = S.update_heap32 ptr v.lo0 mem in
let mem1 = S.update_heap32 (ptr + 4) v.lo1 mem0 in
Vale.Arch.MachineHeap.frame_update_heap32 ptr v.lo0 mem;
Vale.Arch.MachineHeap.frame_update_heap32 (ptr + 4) v.lo1 mem0;
Vale.Arch.MachineHeap.same_domain_update32 ptr v.lo0 mem;
Vale.Arch.MachineHeap.same_domain_update32 (ptr + 4) v.lo1 mem0;
frame_get_heap32 (ptr + 8) mem mem1;
frame_get_heap32 (ptr + 12) mem mem1;
Vale.Arch.MachineHeap.update_heap32_get_heap32 (ptr + 8) mem1;
Vale.Arch.MachineHeap.update_heap32_get_heap32 (ptr + 12) mem1 | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [
"lemma"
] | [
"Prims.int",
"Vale.Def.Types_s.quad32",
"Vale.Arch.MachineHeap_s.machine_heap",
"Vale.Arch.MachineHeap.update_heap32_get_heap32",
"Prims.op_Addition",
"Prims.unit",
"Vale.X64.Memory_Sems.frame_get_heap32",
"Vale.Arch.MachineHeap.same_domain_update32",
"Vale.Def.Words_s.__proj__Mkfour__item__lo1",
"Vale.Def.Types_s.nat32",
"Vale.Def.Words_s.__proj__Mkfour__item__lo0",
"Vale.Arch.MachineHeap.frame_update_heap32",
"Vale.Arch.MachineHeap_s.update_heap32",
"Vale.Arch.MachineHeap_s.update_heap128_reveal",
"FStar.Pervasives.reveal_opaque",
"Prims.bool",
"Vale.Arch.MachineHeap_s.valid_addr128",
"Prims.l_and",
"Prims.b2t",
"Prims.eq2",
"Vale.Def.Words_s.__proj__Mkfour__item__hi2",
"Vale.Arch.MachineHeap_s.get_heap_val32",
"Vale.Def.Words_s.__proj__Mkfour__item__hi3",
"Prims.squash",
"Vale.Arch.MachineHeap_s.update_heap128",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base
let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m)
let lemma_same_domains h m1 m2 = ()
let get_heap h = I.down_mem (_ih h)
let upd_heap h m = mi_heap_upd h m
//let lemma_upd_get_heap h = I.down_up_identity (_ih h)
let lemma_get_upd_heap h m = I.up_down_identity (_ih h) m
let lemma_heap_impl = ()
let lemma_heap_get_heap h = ()
let lemma_heap_taint h = ()
//let lemma_heap_upd_heap h mh mt = ()
[@"opaque_to_smt"]
let rec set_of_range (a:int) (n:nat) : Pure (Set.set int)
(requires True)
(ensures fun s -> (forall (i:int).{:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n))
=
if n = 0 then Set.empty else Set.union (set_of_range a (n - 1)) (Set.singleton (a + n - 1))
let buffer_info_has_addr (bi:buffer_info) (a:int) =
let b = bi.bi_buffer in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
addr <= a /\ a < addr + len
let buffer_info_has_addr_opt (bi:option buffer_info) (a:int) =
match bi with
| None -> False
| Some bi -> buffer_info_has_addr bi a
#set-options "--z3rlimit 100"
let rec make_owns_rec (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
if n = 0 then ((fun _ -> None), (fun _ -> Set.empty)) else
let (m0, s0) = make_owns_rec h bs (n - 1) in
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let s i = if i = hi then Set.union (s0 i) s_b else s0 i in
let m a = if addr <= a && a < addr + len then Some (n - 1) else m0 a in
(m, s)
[@"opaque_to_smt"]
let make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
make_owns_rec h bs n
let rec lemma_make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n <= Seq.length bs /\
(forall (i:nat).{:pattern Seq.index bs i} i < Seq.length bs ==> buffer_readable h (Seq.index bs i).bi_buffer) /\
(forall (i1 i2:nat).{:pattern (Seq.index bs i1); (Seq.index bs i2)}
i1 < Seq.length bs /\ i2 < Seq.length bs ==> buffer_info_disjoint (Seq.index bs i1) (Seq.index bs i2))
)
(ensures (
let (m, s) = make_owns h bs n in
(forall (i:heaplet_id) (a:int).{:pattern Set.mem a (s i)}
(Set.mem a (s i) <==> Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (m a) == Some i) /\
(Set.mem a (s i) ==> buffer_info_has_addr_opt (Option.mapTot (fun n -> Seq.index bs n) (m a)) a) /\
(Set.mem a (s i) ==> Set.mem a (Map.domain h.mh))
) /\
(forall (k:nat) (a:int).{:pattern Set.mem a (s (Seq.index bs k).bi_heaplet)}
k < n /\ buffer_info_has_addr (Seq.index bs k) a ==> Set.mem a (s (Seq.index bs k).bi_heaplet))
))
=
reveal_opaque (`%make_owns) make_owns;
if n = 0 then () else
let _ = make_owns h bs (n - 1) in
let (m, s) = make_owns h bs n in
lemma_make_owns h bs (n - 1);
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let lem1 (a:int) : Lemma
(requires Set.mem a s_b)
(ensures Set.mem a (Map.domain h.mh))
[SMTPat (Set.mem a (Map.domain h.mh))]
=
I.addrs_set_mem h.ih b a
in
let lem2 (i:heaplet_id) (a:int) : Lemma
(requires i =!= hi /\ Set.mem a (Set.intersect s_b (s i)))
(ensures False)
[SMTPat (Set.mem a (s i))]
=
reveal_opaque (`%addr_map_pred) addr_map_pred
in
()
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 2 --max_ifuel 2"
let rec lemma_loc_mutable_buffers_rec (l:list buffer_info) (s:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n + List.length l == Seq.length s /\
list_to_seq_post l s n
)
(ensures (
let modloc = loc_mutable_buffers l in
forall (i:nat).{:pattern Seq.index s i} n <= i /\ i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes modloc (loc_buffer bi.bi_buffer))
))
(decreases l)
=
match l with
| [] -> ()
| h::t -> lemma_loc_mutable_buffers_rec t s (n + 1)
#pop-options
let lemma_loc_mutable_buffers (l:list buffer_info) : Lemma
(ensures (
let s = list_to_seq l in
forall (i:nat).{:pattern Seq.index s i} i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes (loc_mutable_buffers l) (loc_buffer bi.bi_buffer))
))
=
lemma_list_to_seq l;
lemma_loc_mutable_buffers_rec l (list_to_seq l) 0
let create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let modloc = loc_mutable_buffers buffers in
let layout1 = h1.vf_layout in
let layin1 = layout1.vl_inner in
let (hmap, hsets) = make_owns h1.vf_heap bs (Seq.length bs) in
let hmap a = Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (hmap a) in
let l = {
vl_heaplets_initialized = true;
vl_heaplet_map = hmap;
vl_heaplet_sets = hsets;
vl_old_heap = h1.vf_heap;
vl_buffers = bs;
vl_mod_loc = modloc;
} in
let layout2 = {layout1 with vl_inner = l} in
let h2 = {
vf_layout = layout2;
vf_heap = h1.vf_heap;
vf_heaplets = h1.vf_heaplets;
} in
h2
let lemma_create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let h2 = create_heaplets buffers h1 in
assert (h2.vf_layout.vl_inner.vl_buffers == bs); // REVIEW: why is this necessary, even with extra ifuel?
lemma_make_owns h1.vf_heap bs (Seq.length bs);
lemma_loc_mutable_buffers buffers;
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
()
let destroy_heaplets h1 =
h1
let lemma_destroy_heaplets h1 =
()
val heap_shift (m1 m2:S.machine_heap) (base:int) (n:nat) : Lemma
(requires (forall i. 0 <= i /\ i < n ==> m1.[base + i] == m2.[base + i]))
(ensures (forall i. {:pattern (m1.[i])} base <= i /\ i < base + n ==> m1.[i] == m2.[i]))
#push-options "--smtencoding.l_arith_repr boxwrap"
let heap_shift m1 m2 base n =
assert (forall i. base <= i /\ i < base + n ==>
m1.[base + (i - base)] == m2.[base + (i - base)])
#pop-options
val same_mem_eq_slices64 (b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1))})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (
k * 8 + 8 <= DV.length (get_downview b.bsrc) /\
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) (get_downview b.bsrc)) (k * 8) (k * 8 + 8) ==
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) (get_downview b.bsrc)) (k * 8) (k * 8 + 8)))
let same_mem_eq_slices64 b i v k h1 h2 mem1 mem2 =
let t = TUInt64 in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db (uint_view t) in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.length_eq ub
let length_up64 (b:buffer64) (h:vale_heap) (k:nat{k < buffer_length b}) (i:nat{i < 8}) : Lemma
(scale8 k + i <= DV.length (get_downview b.bsrc)) =
let vb = UV.mk_buffer (get_downview b.bsrc) uint64_view in
UV.length_eq vb
val same_mem_get_heap_val64 (b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1))})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (let ptr = buffer_addr b h1 + scale8 k in
forall (x:int).{:pattern (mem1.[x])} ptr <= x /\ x < ptr + 8 ==> mem1.[x] == mem2.[x]))
let same_mem_get_heap_val64 b j v k h1 h2 mem1 mem2 =
let ptr = buffer_addr b h1 + scale8 k in
let addr = buffer_addr b h1 in
let aux (x:int{ptr <= x /\ x < ptr + 8}) : Lemma (mem1.[x] == mem2.[x]) =
let i = x - ptr in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db uint64_view in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
same_mem_eq_slices64 b j v k h1 h2 mem1 mem2;
let s1 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8) (k * 8 + 8)) in
let s2 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8) (k * 8 + 8)) in
assert (Seq.index s1 i == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8 + i));
length_up64 b h1 k i;
assert (mem1.[addr+(scale8 k + i)] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8 + i)));
assert (Seq.index s2 i == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8 + i));
length_up64 b h2 k i;
assert (mem2.[addr+(scale8 k + i)] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8 + i)))
in
Classical.forall_intro aux;
assert (forall i. addr + (scale8 k + i) == ptr + i)
let rec written_buffer_down64_aux1
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
(base:nat{base == buffer_addr b h})
(k:nat) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j.{:pattern (mem1.[j]) \/ (mem2.[j])}
base <= j /\ j < base + k * 8 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem1.[j])}
j >= base /\ j < base + scale8 i ==>
mem1.[j] == mem2.[j]))
(decreases %[i-k]) =
if k >= i then ()
else begin
let ptr = base + scale8 k in
same_mem_get_heap_val64 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 8;
written_buffer_down64_aux1 b i v h base (k+1) h1 mem1 mem2
end
let rec written_buffer_down64_aux2
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
(base:nat{base == buffer_addr b h})
(n:nat{n == buffer_length b})
(k:nat{k > i}) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
base + scale8 (i+1) <= j /\ j < base + k * 8 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
j >= base + scale8 (i+1) /\ j < base + scale8 n ==>
mem1.[j] == mem2.[j]))
(decreases %[n-k]) =
if k >= n then ()
else begin
let ptr = base + scale8 k in
same_mem_get_heap_val64 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 8;
written_buffer_down64_aux2 b i v h base n (k+1) h1 mem1 mem2
end
let written_buffer_down64 (b:buffer64{buffer_writeable b}) (i:nat{i < buffer_length b}) (v:nat64) (h:vale_heap)
: Lemma
(requires List.memP b (IB.ptrs_of_mem (_ih h)))
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
(base <= j /\ j < base + scale8 i) \/
(base + scale8 (i+1) <= j /\ j < base + scale8 n) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
written_buffer_down64_aux1 b i v h base 0 h1 mem1 mem2;
written_buffer_down64_aux2 b i v h base n (i+1) h1 mem1 mem2
let unwritten_buffer_down (t:base_typ) (b:buffer t{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:base_typ_as_vale_type t)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
forall (a:b8{List.memP a (IB.ptrs_of_mem (_ih h)) /\ a =!= b}) j. {:pattern mem1.[j]; List.memP a (IB.ptrs_of_mem (_ih h)) \/ mem2.[j]; List.memP a (IB.ptrs_of_mem (_ih h))}
let base = (IB.addrs_of_mem (_ih h)) a in
j >= base /\ j < base + DV.length (get_downview a.bsrc) ==> mem1.[j] == mem2.[j]))
= let aux (a:b8{a =!= b /\ List.memP a (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let base = (IB.addrs_of_mem (_ih h)) a in
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
forall j.
j >= base /\ j < base + DV.length (get_downview a.bsrc) ==>
mem1.[j] == mem2.[j]))
= let db = get_downview a.bsrc in
if DV.length db = 0 then ()
else
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = (IB.addrs_of_mem (_ih h)) a in
let s0 = DV.as_seq (IB.hs_of_mem (_ih h)) db in
let s1 = DV.as_seq (IB.hs_of_mem (_ih h1)) db in
opaque_assert (`%IB.list_disjoint_or_eq) IB.list_disjoint_or_eq IB.list_disjoint_or_eq_def (MB.disjoint a.bsrc b.bsrc);
lemma_dv_equal (IB.down_view a.src) a.bsrc (IB.hs_of_mem (_ih h)) (IB.hs_of_mem (_ih h1));
assert (Seq.equal s0 s1);
assert (forall (j:int).{:pattern (mem1.[j])}
base <= j /\ j < base + Seq.length s0 ==> v_to_typ TUInt8 (Seq.index s0 (j - base)) == mem1.[j]);
heap_shift mem1 mem2 base (DV.length db)
in
Classical.forall_intro aux
let store_buffer_down64_mem
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
forall (j:int). {:pattern mem1.[j] \/ mem2.[j]}
j < base + scale8 i \/ j >= base + scale8 (i+1) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
let aux (j:int)
: Lemma
(j < base + scale8 i \/ j >= base + scale8 (i+1) ==>
mem1.[j] == mem2.[j])
=
I.addrs_set_lemma_all ();
if j >= base && j < base + DV.length (get_downview b.bsrc) then begin
written_buffer_down64 b i v h;
length_t_eq (TUInt64) b
end
else if not (I.valid_addr (_ih h) j)
then I.same_unspecified_down (IB.hs_of_mem (_ih h)) (IB.hs_of_mem (_ih h1)) (IB.ptrs_of_mem (_ih h))
else unwritten_buffer_down TUInt64 b i v h
in
Classical.forall_intro aux
let store_buffer_aux_down64_mem (ptr:int) (v:nat64) (h:vale_heap{writeable_mem64 ptr h})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = store_mem (TUInt64) ptr v h in
let mem2 = I.down_mem (_ih h1) in
forall j. {:pattern mem1.[j] \/ mem2.[j]}
j < ptr \/ j >= ptr + 8 ==>
mem1.[j] == mem2.[j]))
= let t = TUInt64 in
let h1 = store_mem t ptr v h in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
store_buffer_write t ptr v h;
assert (buffer_addr b h + scale8 i == ptr);
assert (buffer_addr b h + scale8 (i+1) == ptr + 8);
store_buffer_down64_mem b i v h
let store_buffer_aux_down64_mem2 (ptr:int) (v:nat64) (h:vale_heap{writeable_mem64 ptr h})
: Lemma
(ensures (
let h1 = store_mem (TUInt64) ptr v h in
let mem2 = I.down_mem (_ih h1) in
S.get_heap_val64 ptr mem2 == v))
= let t = TUInt64 in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let h1 = store_mem t ptr v h in
let mem2 = I.down_mem (_ih h1) in
store_buffer_write t ptr v h;
assert (Seq.index (buffer_as_seq h1 b) i == v);
index64_get_heap_val64 h1 b mem2 i
let in_bounds64 (h:vale_heap) (b:buffer64) (i:nat{i < buffer_length b})
: Lemma (scale8 i + 8 <= DV.length (get_downview b.bsrc))
=
length_t_eq TUInt64 b
let bytes_valid64 ptr h =
reveal_opaque (`%S.valid_addr64) S.valid_addr64;
let t = TUInt64 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
in_bounds64 h b i;
I.addrs_set_mem (_ih h) b ptr;
I.addrs_set_mem (_ih h) b (ptr+1);
I.addrs_set_mem (_ih h) b (ptr+2);
I.addrs_set_mem (_ih h) b (ptr+3);
I.addrs_set_mem (_ih h) b (ptr+4);
I.addrs_set_mem (_ih h) b (ptr+5);
I.addrs_set_mem (_ih h) b (ptr+6);
I.addrs_set_mem (_ih h) b (ptr+7)
val same_mem_get_heap_val128 (b:buffer128)
(i:nat{i < buffer_length b})
(v:quad32)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1)) /\ buffer_writeable b})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (let ptr = buffer_addr b h1 + scale16 k in
forall i.{:pattern mem1.[i]} i >= ptr /\ i < ptr+16 ==> mem1.[i] == mem2.[i]))
val same_mem_eq_slices128 (b:buffer128)
(i:nat{i < buffer_length b})
(v:quad32)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1)) /\ buffer_writeable b})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (
k * 16 + 16 <= DV.length (get_downview b.bsrc) /\
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) (get_downview b.bsrc)) (k * 16) (k * 16 + 16) ==
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) (get_downview b.bsrc)) (k * 16) (k * 16 + 16)))
let same_mem_eq_slices128 b i v k h1 h2 mem1 mem2 =
let t = TUInt128 in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db (uint_view t) in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.length_eq ub
let length_up128 (b:buffer128) (h:vale_heap) (k:nat{k < buffer_length b}) (i:nat{i < 16}) : Lemma
(scale16 k + i <= DV.length (get_downview b.bsrc)) =
let vb = UV.mk_buffer (get_downview b.bsrc) uint128_view in
UV.length_eq vb
let same_mem_get_heap_val128 b j v k h1 h2 mem1 mem2 =
let ptr = buffer_addr b h1 + scale16 k in
let addr = buffer_addr b h1 in
let aux (i:nat{ptr <= i /\ i < ptr+16}) : Lemma (mem1.[i] == mem2.[i]) =
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db uint128_view in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
same_mem_eq_slices128 b j v k h1 h2 mem1 mem2;
let s1 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16) (k * 16 + 16)) in
let s2 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16) (k * 16 + 16)) in
assert (Seq.index s1 (i - ptr) == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16 + (i-ptr)));
length_up128 b h1 k (i-ptr);
assert (mem1.[i] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16 + (i-ptr))));
assert (Seq.index s2 (i-ptr) == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16 + (i-ptr)));
length_up128 b h2 k (i-ptr);
assert (mem2.[addr+(scale16 k + (i-ptr))] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16 + (i-ptr))));
assert (forall i. addr + (scale16 k + (i-ptr)) == i)
in
Classical.forall_intro aux
let in_bounds128 (h:vale_heap) (b:buffer128) (i:nat{i < buffer_length b}) : Lemma
(scale16 i + 16 <= DV.length (get_downview b.bsrc))
=
length_t_eq TUInt128 b
#push-options "--z3rlimit 20"
#restart-solver
let bytes_valid128 ptr h =
reveal_opaque (`%S.valid_addr128) S.valid_addr128;
IB.list_disjoint_or_eq_reveal ();
let t = TUInt128 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
in_bounds128 h b i;
I.addrs_set_mem (_ih h) b ptr;
I.addrs_set_mem (_ih h) b (ptr+1);
I.addrs_set_mem (_ih h) b (ptr+2);
I.addrs_set_mem (_ih h) b (ptr+3);
I.addrs_set_mem (_ih h) b (ptr+4);
I.addrs_set_mem (_ih h) b (ptr+5);
I.addrs_set_mem (_ih h) b (ptr+6);
I.addrs_set_mem (_ih h) b (ptr+7);
I.addrs_set_mem (_ih h) b (ptr+8);
I.addrs_set_mem (_ih h) b (ptr+9);
I.addrs_set_mem (_ih h) b (ptr+10);
I.addrs_set_mem (_ih h) b (ptr+11);
I.addrs_set_mem (_ih h) b (ptr+12);
I.addrs_set_mem (_ih h) b (ptr+13);
I.addrs_set_mem (_ih h) b (ptr+14);
I.addrs_set_mem (_ih h) b (ptr+15)
#pop-options
let equiv_load_mem64 ptr h =
let t = TUInt64 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let addr = buffer_addr b h in
let contents = DV.as_seq (_ih h).IB.hs (get_downview b.bsrc) in
let heap = get_heap h in
index64_get_heap_val64 h b heap i;
lemma_load_mem64 b i h
//let low_lemma_valid_mem64 b i h =
// lemma_valid_mem64 b i h;
// bytes_valid64 (buffer_addr b h + scale8 i) h
//let low_lemma_load_mem64 b i h =
// lemma_valid_mem64 b i h;
// lemma_load_mem64 b i h;
// equiv_load_mem64 (buffer_addr b h + scale8 i) h
//let same_domain_update64 b i v h =
// low_lemma_valid_mem64 b i h;
// Vale.Arch.MachineHeap.same_domain_update64 (buffer_addr b h + scale8 i) v (get_heap h)
open Vale.X64.BufferViewStore
let low_lemma_store_mem64_aux
(b:buffer64)
(heap:S.machine_heap)
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{buffer_readable h b /\ buffer_writeable b})
: Lemma
(requires IB.correct_down_p (_ih h) heap b)
(ensures (let heap' = S.update_heap64 (buffer_addr b h + scale8 i) v heap in
let h' = store_mem64 (buffer_addr b h + scale8 i) v h in
(_ih h').IB.hs == DV.upd_seq (_ih h).IB.hs (get_downview b.bsrc) (I.get_seq_heap heap' (_ih h).IB.addrs b))) =
let ptr = buffer_addr b h + scale8 i in
let heap' = S.update_heap64 ptr v heap in
let h' = store_mem64 ptr v h in
lemma_store_mem64 b i v h;
length_t_eq TUInt64 b;
bv_upd_update_heap64 b heap i v (_ih h);
let db = get_downview b.bsrc in
let bv = UV.mk_buffer db Vale.Interop.Views.up_view64 in
assert (UV.upd (_ih h).IB.hs bv i (UInt64.uint_to_t v) == (_ih h').IB.hs)
val valid_state_store_mem64_aux: (i:nat) -> (v:nat64) -> (h:vale_heap) -> Lemma
(requires writeable_mem64 i h)
(ensures (
let heap = get_heap h in
let heap' = S.update_heap64 i v heap in
let h' = store_mem64 i v h in
heap' == I.down_mem (_ih h')
))
let valid_state_store_mem64_aux i v h =
let heap = get_heap h in
let heap' = S.update_heap64 i v heap in
let h1 = store_mem TUInt64 i v h in
store_buffer_aux_down64_mem i v h;
store_buffer_aux_down64_mem2 i v h;
let mem1 = heap' in
let mem2 = I.down_mem (_ih h1) in
let aux () : Lemma (forall j. mem1.[j] == mem2.[j]) =
Vale.Arch.MachineHeap.same_mem_get_heap_val64 i mem1 mem2;
Vale.Arch.MachineHeap.correct_update_get64 i v heap;
Vale.Arch.MachineHeap.frame_update_heap64 i v heap
in let aux2 () : Lemma (Set.equal (Map.domain mem1) (Map.domain mem2)) =
bytes_valid64 i h;
Vale.Arch.MachineHeap.same_domain_update64 i v heap
in aux(); aux2();
Map.lemma_equal_intro mem1 mem2
let low_lemma_load_mem64_full b i vfh t hid =
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
()
#push-options "--z3rlimit 20"
#restart-solver
let low_lemma_store_mem64 b i v h =
lemma_writeable_mem64 b i h;
lemma_store_mem64 b i v h;
valid_state_store_mem64_aux (buffer_addr b h + scale8 i) v h;
let heap = get_heap h in
let heap' = S.update_heap64 (buffer_addr b h + scale8 i) v heap in
low_lemma_store_mem64_aux b heap i v h;
Vale.Arch.MachineHeap.frame_update_heap64 (buffer_addr b h + scale8 i) v heap;
in_bounds64 h b i;
I.addrs_set_lemma_all ();
I.update_buffer_up_mem (_ih h) b heap heap'
#pop-options
#set-options "--z3rlimit 100"
#restart-solver
let lemma_is_full_update
(vfh:vale_full_heap) (h hk hk':vale_heap) (k:heaplet_id) (mh mh' mhk mhk':machine_heap) (mt mt':memtaint)
(t:base_typ) (b:buffer t) (ptr:int) (v_size:nat)
(index:nat) (v:base_typ_as_vale_type t) (tn:taint)
: Lemma
(requires
vfh.vf_layout.vl_inner.vl_heaplets_initialized /\
mem_inv vfh /\
buffer_readable hk b /\
buffer_writeable b /\
index < Seq.length (buffer_as_seq hk b) /\
mt == vfh.vf_layout.vl_taint /\
h == vfh.vf_heap /\
hk == Map16.sel vfh.vf_heaplets k /\
mh == h.mh /\
mhk == hk.mh /\
ptr == buffer_addr b hk + scale_by v_size index /\
mt' == S.update_n ptr v_size (heap_taint (coerce vfh)) tn /\
hk' == buffer_write b index v hk /\
valid_layout_buffer b vfh.vf_layout hk true /\
valid_taint_buf b hk mt tn /\
is_machine_heap_update mh mh' /\ upd_heap h mh' == buffer_write b index v h /\
is_machine_heap_update mhk mhk' /\ upd_heap hk mhk' == buffer_write b index v hk /\
(forall j.{:pattern mh.[j] \/ mh'.[j]} j < ptr \/ j >= ptr + v_size ==> mh.[j] == mh'.[j]) /\
(forall j.{:pattern mhk.[j] \/ mhk'.[j]} j < ptr \/ j >= ptr + v_size ==> mhk.[j] == mhk'.[j]) /\
0 <= scale_by v_size index /\ scale_by v_size index + v_size <= DV.length (get_downview b.bsrc) /\
(forall i.{:pattern mh'.[i] \/ mhk'.[i]} i >= ptr /\ i < ptr + v_size ==> mh'.[i] == mhk'.[i]) /\
True
)
(ensures is_full_update vfh hk' k mh' mt')
=
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
let vfh' = coerce (heap_upd (coerce vfh) mh' mt') in
let dom_upd = Set.intersect (vfh.vf_layout.vl_inner.vl_heaplet_sets k) (Map.domain mhk) in
let mhk'' = Map.concat mhk (Map.restrict dom_upd mh') in
assert (Map.equal mhk'' mhk');
let unchanged (j:heaplet_id) : Lemma
(requires j =!= k)
(ensures Map16.sel vfh'.vf_heaplets j == Map16.sel vfh.vf_heaplets j)
[SMTPat (Map16.sel vfh'.vf_heaplets j)]
=
assert (Map.equal (Map16.sel vfh'.vf_heaplets j).mh (Map16.sel vfh.vf_heaplets j).mh);
I.down_up_identity (Map16.sel vfh.vf_heaplets j).ih;
()
in
assert (Map16.equal vfh'.vf_heaplets (Map16.upd vfh.vf_heaplets k hk'));
assert (Map.equal mt' mt);
Vale.Interop.Heap_s.list_disjoint_or_eq_reveal ();
()
let low_lemma_store_mem64_full b i v vfh t hid =
let (h, mt, hk) = (vfh.vf_heap, vfh.vf_layout.vl_taint, Map16.get vfh.vf_heaplets hid) in
let ptr = buffer_addr b hk + scale8 i in
let mh' = S.update_heap64 ptr v (heap_get (coerce vfh)) in
let mt' = S.update_n ptr 8 (heap_taint (coerce vfh)) t in
let hk' = buffer_write b i v hk in
let mhk' = S.update_heap64 ptr v (get_heap hk) in
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
low_lemma_store_mem64 b i v h;
low_lemma_store_mem64 b i v (Map16.get vfh.vf_heaplets hid);
Vale.Arch.MachineHeap.frame_update_heap64 ptr v h.mh;
Vale.Arch.MachineHeap.frame_update_heap64 ptr v hk.mh;
in_bounds64 hk b i;
Vale.Arch.MachineHeap.same_mem_get_heap_val64 ptr mh' mhk';
lemma_is_full_update vfh h hk hk' hid h.mh mh' hk.mh mhk' mt mt' TUInt64 b ptr 8 i v t;
()
val low_lemma_valid_mem128 (b:buffer128) (i:nat) (h:vale_heap) : Lemma
(requires
i < Seq.length (buffer_as_seq h b) /\
buffer_readable h b
)
(ensures
S.valid_addr128 (buffer_addr b h + scale16 i) (get_heap h)
)
let low_lemma_valid_mem128 b i h =
lemma_valid_mem128 b i h;
bytes_valid128 (buffer_addr b h + scale16 i) h
val equiv_load_mem128_aux: (ptr:int) -> (h:vale_heap) -> Lemma
(requires valid_mem128 ptr h)
(ensures load_mem128 ptr h == S.get_heap_val128 ptr (get_heap h))
let equiv_load_mem128_aux ptr h =
let t = TUInt128 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let addr = buffer_addr b h in
let contents = DV.as_seq (_ih h).IB.hs (get_downview b.bsrc) in
let heap = get_heap h in
S.get_heap_val128_reveal ();
index128_get_heap_val128 h b heap i;
lemma_load_mem128 b i h
let equiv_load_mem128 ptr h =
equiv_load_mem128_aux ptr h
val low_lemma_load_mem128 (b:buffer128) (i:nat) (h:vale_heap) : Lemma
(requires
i < Seq.length (buffer_as_seq h b) /\
buffer_readable h b
)
(ensures
S.get_heap_val128 (buffer_addr b h + scale16 i) (get_heap h) == buffer_read b i h
)
let low_lemma_load_mem128 b i h =
lemma_valid_mem128 b i h;
lemma_load_mem128 b i h;
equiv_load_mem128_aux (buffer_addr b h + scale16 i) h
//let same_domain_update128 b i v h =
// low_lemma_valid_mem128 b i h;
// Vale.Arch.MachineHeap.same_domain_update128 (buffer_addr b h + scale16 i) v (get_heap h)
let low_lemma_store_mem128_aux
(b:buffer128)
(heap:S.machine_heap)
(i:nat{i < buffer_length b})
(v:quad32)
(h:vale_heap{buffer_readable h b /\ buffer_writeable b})
: Lemma
(requires IB.correct_down_p (_ih h) heap b)
(ensures (let heap' = S.update_heap128 (buffer_addr b h + scale16 i) v heap in
let h' = store_mem128 (buffer_addr b h + scale16 i) v h in
(_ih h').IB.hs == DV.upd_seq (_ih h).IB.hs (get_downview b.bsrc) (I.get_seq_heap heap' (_ih h).IB.addrs b))) =
let ptr = buffer_addr b h + scale16 i in
let heap' = S.update_heap128 ptr v heap in
let h' = store_mem128 ptr v h in
lemma_store_mem128 b i v h;
length_t_eq TUInt128 b;
bv_upd_update_heap128 b heap i v (_ih h);
let db = get_downview b.bsrc in
let bv = UV.mk_buffer db Vale.Interop.Views.up_view128 in
assert (UV.upd (_ih h).IB.hs bv i v == (_ih h').IB.hs)
val valid_state_store_mem128_aux (i:int) (v:quad32) (h:vale_heap) : Lemma
(requires writeable_mem128 i h)
(ensures (
let heap = get_heap h in
let heap' = S.update_heap128 i v heap in
let h' = store_mem128 i v h in
heap' == I.down_mem (_ih h')
))
#restart-solver
let rec written_buffer_down128_aux1
(b:buffer128{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:quad32)
(h:vale_heap{List.memP b (_ih h).IB.ptrs})
(base:nat{base == buffer_addr b h})
(k:nat) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j.{:pattern (mem1.[j]) \/ (mem2.[j])}
base <= j /\ j < base + k * 16 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem1.[j])}
j >= base /\ j < base + scale16 i ==>
mem1.[j] == mem2.[j]))
(decreases %[i-k]) =
if k >= i then ()
else begin
let ptr = base + scale16 k in
same_mem_get_heap_val128 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 16;
written_buffer_down128_aux1 b i v h base (k+1) h1 mem1 mem2
end
#restart-solver
let rec written_buffer_down128_aux2
(b:buffer128{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:quad32)
(h:vale_heap{List.memP b (_ih h).IB.ptrs})
(base:nat{base == buffer_addr b h})
(n:nat{n == buffer_length b})
(k:nat{k > i}) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
base + scale16 (i+1) <= j /\ j < base + k * 16 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
j >= base + scale16 (i+1) /\ j < base + scale16 n ==>
mem1.[j] == mem2.[j]))
(decreases %[n-k]) =
if k >= n then ()
else begin
let ptr = base + scale16 k in
same_mem_get_heap_val128 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 16;
written_buffer_down128_aux2 b i v h base n (k+1) h1 mem1 mem2
end
let written_buffer_down128 (b:buffer128) (i:nat{i < buffer_length b}) (v:quad32) (h:vale_heap)
: Lemma
(requires List.memP b (_ih h).IB.ptrs /\ buffer_writeable b)
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
(base <= j /\ j < base + scale16 i) \/
(base + scale16 (i+1) <= j /\ j < base + scale16 n) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
written_buffer_down128_aux1 b i v h base 0 h1 mem1 mem2;
written_buffer_down128_aux2 b i v h base n (i+1) h1 mem1 mem2
let store_buffer_down128_mem
(b:buffer128{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:quad32)
(h:vale_heap{List.memP b (_ih h).IB.ptrs})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
forall (j:int). {:pattern mem1.[j] \/ mem2.[j]}
j < base + scale16 i \/ j >= base + scale16 (i+1) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
let aux (j:int)
: Lemma
(j < base + scale16 i \/ j >= base + scale16 (i+1) ==>
mem1.[j] == mem2.[j])
=
I.addrs_set_lemma_all ();
if j >= base && j < base + DV.length (get_downview b.bsrc) then begin
written_buffer_down128 b i v h;
length_t_eq TUInt128 b
end
else if not (I.valid_addr (_ih h) j)
then I.same_unspecified_down (_ih h).IB.hs (_ih h1).IB.hs (_ih h).IB.ptrs
else unwritten_buffer_down TUInt128 b i v h
in
Classical.forall_intro aux
let store_buffer_aux_down128_mem (ptr:int) (v:quad32) (h:vale_heap{writeable_mem128 ptr h})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = store_mem TUInt128 ptr v h in
let mem2 = I.down_mem (_ih h1) in
forall j. {:pattern mem1.[j] \/ mem2.[j]}
j < ptr \/ j >= ptr + 16 ==>
mem1.[j] == mem2.[j]))
= let t = TUInt128 in
let h1 = store_mem t ptr v h in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
store_buffer_write t ptr v h;
assert (buffer_addr b h + scale16 i == ptr);
assert (buffer_addr b h + scale16 (i+1) == ptr + 16);
store_buffer_down128_mem b i v h
let store_buffer_aux_down128_mem2 (ptr:int) (v:quad32) (h:vale_heap{writeable_mem128 ptr h})
: Lemma
(ensures (
let h1 = store_mem TUInt128 ptr v h in
let mem2 = I.down_mem (_ih h1) in
Mkfour
(S.get_heap_val32 ptr mem2)
(S.get_heap_val32 (ptr+4) mem2)
(S.get_heap_val32 (ptr+8) mem2)
(S.get_heap_val32 (ptr+12) mem2)
== v)) =
let t = TUInt128 in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let h1 = store_mem t ptr v h in
let mem2 = I.down_mem (_ih h1) in
store_buffer_write t ptr v h;
assert (Seq.index (buffer_as_seq h1 b) i == v);
index128_get_heap_val128 h1 b mem2 i
let valid_state_store_mem128_aux i v h =
let heap = get_heap h in
let heap' = S.update_heap128 i v heap in
let h1 = store_mem TUInt128 i v h in
store_buffer_aux_down128_mem i v h;
store_buffer_aux_down128_mem2 i v h;
let mem1 = heap' in
let mem2 = I.down_mem (_ih h1) in
let aux () : Lemma (forall j. mem1.[j] == mem2.[j]) =
Vale.Arch.MachineHeap.correct_update_get128 i v heap;
Vale.X64.Machine_Semantics_s.get_heap_val128_reveal ();
Vale.Arch.MachineHeap.same_mem_get_heap_val32 i mem1 mem2;
Vale.Arch.MachineHeap.same_mem_get_heap_val32 (i+4) mem1 mem2;
Vale.Arch.MachineHeap.same_mem_get_heap_val32 (i+8) mem1 mem2;
Vale.Arch.MachineHeap.same_mem_get_heap_val32 (i+12) mem1 mem2;
Vale.Arch.MachineHeap.frame_update_heap128 i v heap
in
let aux2 () : Lemma (Set.equal (Map.domain mem1) (Map.domain mem2)) =
bytes_valid128 i h;
Vale.Arch.MachineHeap.same_domain_update128 i v heap
in aux (); aux2 ();
Map.lemma_equal_intro mem1 mem2
let low_lemma_load_mem128_full b i vfh t hid =
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
()
let low_lemma_store_mem128 b i v h =
lemma_valid_mem128 b i h;
lemma_store_mem128 b i v h;
valid_state_store_mem128_aux (buffer_addr b h + scale16 i) v h;
let heap = get_heap h in
let heap' = S.update_heap128 (buffer_addr b h + scale16 i) v heap in
let h' = store_mem128 (buffer_addr b h + scale16 i) v h in
low_lemma_store_mem128_aux b heap i v h;
Vale.Arch.MachineHeap.frame_update_heap128 (buffer_addr b h + scale16 i) v heap;
in_bounds128 h b i;
I.addrs_set_lemma_all ();
I.update_buffer_up_mem (_ih h) b heap heap'
let low_lemma_store_mem128_full b i v vfh t hid =
let (h, mt, hk) = (vfh.vf_heap, vfh.vf_layout.vl_taint, Map16.get vfh.vf_heaplets hid) in
let ptr = buffer_addr b hk + scale16 i in
let mh' = S.update_heap128 ptr v (heap_get (coerce vfh)) in
let mt' = S.update_n ptr 16 (heap_taint (coerce vfh)) t in
let hk' = buffer_write b i v hk in
let mhk' = S.update_heap128 ptr v (get_heap hk) in
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
low_lemma_store_mem128 b i v h;
low_lemma_store_mem128 b i v (Map16.get vfh.vf_heaplets hid);
Vale.Arch.MachineHeap.frame_update_heap128 ptr v h.mh;
Vale.Arch.MachineHeap.frame_update_heap128 ptr v hk.mh;
in_bounds128 hk b i;
Vale.Arch.MachineHeap.same_mem_get_heap_val128 ptr mh' mhk';
lemma_is_full_update vfh h hk hk' hid h.mh mh' hk.mh mhk' mt mt' TUInt128 b ptr 16 i v t;
()
#push-options "--smtencoding.l_arith_repr boxwrap"
let low_lemma_valid_mem128_64 b i h =
reveal_opaque (`%S.valid_addr64) S.valid_addr64;
reveal_opaque (`%S.valid_addr128) S.valid_addr128;
low_lemma_valid_mem128 b i h;
let ptr = buffer_addr b h + scale16 i in
assert (buffer_addr b h + scale16 i + 8 = ptr + 8)
#pop-options
open Vale.Def.Words.Two_s
open Vale.Def.Words.Four_s
let low_lemma_load_mem128_lo64 b i h =
low_lemma_load_mem128 b i h;
lo64_reveal ();
S.get_heap_val128_reveal ();
S.get_heap_val64_reveal ();
S.get_heap_val32_reveal ()
let low_lemma_load_mem128_hi64 b i h =
low_lemma_load_mem128 b i h;
hi64_reveal ();
S.get_heap_val128_reveal ();
S.get_heap_val64_reveal ();
S.get_heap_val32_reveal ()
//let same_domain_update128_64 b i v h =
// low_lemma_valid_mem128_64 b i (_ih h);
// Vale.Arch.MachineHeap.same_domain_update64 (buffer_addr b h + scale16 i) v (get_heap h);
// Vale.Arch.MachineHeap.same_domain_update64 (buffer_addr b h + scale16 i + 8) v (get_heap h)
open Vale.Def.Types_s
let frame_get_heap32 (ptr:int) (mem1 mem2:S.machine_heap) : Lemma
(requires (forall i. i >= ptr /\ i < ptr + 4 ==> mem1.[i] == mem2.[i]))
(ensures S.get_heap_val32 ptr mem1 == S.get_heap_val32 ptr mem2) =
S.get_heap_val32_reveal ()
let update_heap128_lo (ptr:int) (v:quad32) (mem:S.machine_heap) : Lemma
(requires
S.valid_addr128 ptr mem /\
v.hi2 == S.get_heap_val32 (ptr+8) mem /\
v.hi3 == S.get_heap_val32 (ptr+12) mem
)
(ensures S.update_heap128 ptr v mem == | false | false | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val update_heap128_lo (ptr: int) (v: quad32) (mem: S.machine_heap)
: Lemma
(requires
S.valid_addr128 ptr mem /\ v.hi2 == S.get_heap_val32 (ptr + 8) mem /\
v.hi3 == S.get_heap_val32 (ptr + 12) mem)
(ensures
S.update_heap128 ptr v mem ==
S.update_heap32 (ptr + 4) v.lo1 (S.update_heap32 ptr v.lo0 mem)) | [] | Vale.X64.Memory_Sems.update_heap128_lo | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | ptr: Prims.int -> v: Vale.Def.Types_s.quad32 -> mem: Vale.Arch.MachineHeap_s.machine_heap
-> FStar.Pervasives.Lemma
(requires
Vale.Arch.MachineHeap_s.valid_addr128 ptr mem /\
Mkfour?.hi2 v == Vale.Arch.MachineHeap_s.get_heap_val32 (ptr + 8) mem /\
Mkfour?.hi3 v == Vale.Arch.MachineHeap_s.get_heap_val32 (ptr + 12) mem)
(ensures
Vale.Arch.MachineHeap_s.update_heap128 ptr v mem ==
Vale.Arch.MachineHeap_s.update_heap32 (ptr + 4)
(Mkfour?.lo1 v)
(Vale.Arch.MachineHeap_s.update_heap32 ptr (Mkfour?.lo0 v) mem)) | {
"end_col": 62,
"end_line": 1066,
"start_col": 2,
"start_line": 1055
} |
FStar.Pervasives.Lemma | val written_buffer_down64
(b: buffer64{buffer_writeable b})
(i: nat{i < buffer_length b})
(v: nat64)
(h: vale_heap)
: Lemma (requires List.memP b (IB.ptrs_of_mem (_ih h)))
(ensures
(let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
forall j. {:pattern (mem1.[ j ])\/(mem2.[ j ])}
(base <= j /\ j < base + scale8 i) \/
(base + scale8 (i + 1) <= j /\ j < base + scale8 n) ==>
mem1.[ j ] == mem2.[ j ])) | [
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let written_buffer_down64 (b:buffer64{buffer_writeable b}) (i:nat{i < buffer_length b}) (v:nat64) (h:vale_heap)
: Lemma
(requires List.memP b (IB.ptrs_of_mem (_ih h)))
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
(base <= j /\ j < base + scale8 i) \/
(base + scale8 (i+1) <= j /\ j < base + scale8 n) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
written_buffer_down64_aux1 b i v h base 0 h1 mem1 mem2;
written_buffer_down64_aux2 b i v h base n (i+1) h1 mem1 mem2 | val written_buffer_down64
(b: buffer64{buffer_writeable b})
(i: nat{i < buffer_length b})
(v: nat64)
(h: vale_heap)
: Lemma (requires List.memP b (IB.ptrs_of_mem (_ih h)))
(ensures
(let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
forall j. {:pattern (mem1.[ j ])\/(mem2.[ j ])}
(base <= j /\ j < base + scale8 i) \/
(base + scale8 (i + 1) <= j /\ j < base + scale8 n) ==>
mem1.[ j ] == mem2.[ j ]))
let written_buffer_down64
(b: buffer64{buffer_writeable b})
(i: nat{i < buffer_length b})
(v: nat64)
(h: vale_heap)
: Lemma (requires List.memP b (IB.ptrs_of_mem (_ih h)))
(ensures
(let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
forall j. {:pattern (mem1.[ j ])\/(mem2.[ j ])}
(base <= j /\ j < base + scale8 i) \/
(base + scale8 (i + 1) <= j /\ j < base + scale8 n) ==>
mem1.[ j ] == mem2.[ j ])) = | false | null | true | let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
written_buffer_down64_aux1 b i v h base 0 h1 mem1 mem2;
written_buffer_down64_aux2 b i v h base n (i + 1) h1 mem1 mem2 | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [
"lemma"
] | [
"Vale.X64.Memory.buffer64",
"Vale.X64.Memory.buffer_writeable",
"Vale.X64.Memory.vuint64",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThan",
"Vale.X64.Memory.buffer_length",
"Vale.Def.Words_s.nat64",
"Vale.Arch.HeapImpl.vale_heap",
"Vale.X64.Memory_Sems.written_buffer_down64_aux2",
"Prims.op_Addition",
"Prims.unit",
"Vale.X64.Memory_Sems.written_buffer_down64_aux1",
"Prims.int",
"Vale.X64.Memory.buffer_addr",
"Vale.Arch.MachineHeap_s.machine_heap",
"Vale.Interop.Heap_s.correct_down",
"Vale.Arch.HeapImpl._ih",
"Vale.Interop.down_mem",
"Vale.X64.Memory.buffer_write",
"FStar.List.Tot.Base.memP",
"Vale.Interop.Types.b8",
"Vale.Interop.Heap_s.ptrs_of_mem",
"Prims.squash",
"Prims.l_Forall",
"Prims.l_imp",
"Prims.l_or",
"Prims.l_and",
"Prims.op_LessThanOrEqual",
"Vale.X64.Memory.scale8",
"Prims.eq2",
"Vale.Def.Types_s.nat8",
"Vale.X64.Memory.op_String_Access",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base
let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m)
let lemma_same_domains h m1 m2 = ()
let get_heap h = I.down_mem (_ih h)
let upd_heap h m = mi_heap_upd h m
//let lemma_upd_get_heap h = I.down_up_identity (_ih h)
let lemma_get_upd_heap h m = I.up_down_identity (_ih h) m
let lemma_heap_impl = ()
let lemma_heap_get_heap h = ()
let lemma_heap_taint h = ()
//let lemma_heap_upd_heap h mh mt = ()
[@"opaque_to_smt"]
let rec set_of_range (a:int) (n:nat) : Pure (Set.set int)
(requires True)
(ensures fun s -> (forall (i:int).{:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n))
=
if n = 0 then Set.empty else Set.union (set_of_range a (n - 1)) (Set.singleton (a + n - 1))
let buffer_info_has_addr (bi:buffer_info) (a:int) =
let b = bi.bi_buffer in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
addr <= a /\ a < addr + len
let buffer_info_has_addr_opt (bi:option buffer_info) (a:int) =
match bi with
| None -> False
| Some bi -> buffer_info_has_addr bi a
#set-options "--z3rlimit 100"
let rec make_owns_rec (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
if n = 0 then ((fun _ -> None), (fun _ -> Set.empty)) else
let (m0, s0) = make_owns_rec h bs (n - 1) in
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let s i = if i = hi then Set.union (s0 i) s_b else s0 i in
let m a = if addr <= a && a < addr + len then Some (n - 1) else m0 a in
(m, s)
[@"opaque_to_smt"]
let make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
make_owns_rec h bs n
let rec lemma_make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n <= Seq.length bs /\
(forall (i:nat).{:pattern Seq.index bs i} i < Seq.length bs ==> buffer_readable h (Seq.index bs i).bi_buffer) /\
(forall (i1 i2:nat).{:pattern (Seq.index bs i1); (Seq.index bs i2)}
i1 < Seq.length bs /\ i2 < Seq.length bs ==> buffer_info_disjoint (Seq.index bs i1) (Seq.index bs i2))
)
(ensures (
let (m, s) = make_owns h bs n in
(forall (i:heaplet_id) (a:int).{:pattern Set.mem a (s i)}
(Set.mem a (s i) <==> Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (m a) == Some i) /\
(Set.mem a (s i) ==> buffer_info_has_addr_opt (Option.mapTot (fun n -> Seq.index bs n) (m a)) a) /\
(Set.mem a (s i) ==> Set.mem a (Map.domain h.mh))
) /\
(forall (k:nat) (a:int).{:pattern Set.mem a (s (Seq.index bs k).bi_heaplet)}
k < n /\ buffer_info_has_addr (Seq.index bs k) a ==> Set.mem a (s (Seq.index bs k).bi_heaplet))
))
=
reveal_opaque (`%make_owns) make_owns;
if n = 0 then () else
let _ = make_owns h bs (n - 1) in
let (m, s) = make_owns h bs n in
lemma_make_owns h bs (n - 1);
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let lem1 (a:int) : Lemma
(requires Set.mem a s_b)
(ensures Set.mem a (Map.domain h.mh))
[SMTPat (Set.mem a (Map.domain h.mh))]
=
I.addrs_set_mem h.ih b a
in
let lem2 (i:heaplet_id) (a:int) : Lemma
(requires i =!= hi /\ Set.mem a (Set.intersect s_b (s i)))
(ensures False)
[SMTPat (Set.mem a (s i))]
=
reveal_opaque (`%addr_map_pred) addr_map_pred
in
()
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 2 --max_ifuel 2"
let rec lemma_loc_mutable_buffers_rec (l:list buffer_info) (s:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n + List.length l == Seq.length s /\
list_to_seq_post l s n
)
(ensures (
let modloc = loc_mutable_buffers l in
forall (i:nat).{:pattern Seq.index s i} n <= i /\ i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes modloc (loc_buffer bi.bi_buffer))
))
(decreases l)
=
match l with
| [] -> ()
| h::t -> lemma_loc_mutable_buffers_rec t s (n + 1)
#pop-options
let lemma_loc_mutable_buffers (l:list buffer_info) : Lemma
(ensures (
let s = list_to_seq l in
forall (i:nat).{:pattern Seq.index s i} i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes (loc_mutable_buffers l) (loc_buffer bi.bi_buffer))
))
=
lemma_list_to_seq l;
lemma_loc_mutable_buffers_rec l (list_to_seq l) 0
let create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let modloc = loc_mutable_buffers buffers in
let layout1 = h1.vf_layout in
let layin1 = layout1.vl_inner in
let (hmap, hsets) = make_owns h1.vf_heap bs (Seq.length bs) in
let hmap a = Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (hmap a) in
let l = {
vl_heaplets_initialized = true;
vl_heaplet_map = hmap;
vl_heaplet_sets = hsets;
vl_old_heap = h1.vf_heap;
vl_buffers = bs;
vl_mod_loc = modloc;
} in
let layout2 = {layout1 with vl_inner = l} in
let h2 = {
vf_layout = layout2;
vf_heap = h1.vf_heap;
vf_heaplets = h1.vf_heaplets;
} in
h2
let lemma_create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let h2 = create_heaplets buffers h1 in
assert (h2.vf_layout.vl_inner.vl_buffers == bs); // REVIEW: why is this necessary, even with extra ifuel?
lemma_make_owns h1.vf_heap bs (Seq.length bs);
lemma_loc_mutable_buffers buffers;
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
()
let destroy_heaplets h1 =
h1
let lemma_destroy_heaplets h1 =
()
val heap_shift (m1 m2:S.machine_heap) (base:int) (n:nat) : Lemma
(requires (forall i. 0 <= i /\ i < n ==> m1.[base + i] == m2.[base + i]))
(ensures (forall i. {:pattern (m1.[i])} base <= i /\ i < base + n ==> m1.[i] == m2.[i]))
#push-options "--smtencoding.l_arith_repr boxwrap"
let heap_shift m1 m2 base n =
assert (forall i. base <= i /\ i < base + n ==>
m1.[base + (i - base)] == m2.[base + (i - base)])
#pop-options
val same_mem_eq_slices64 (b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1))})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (
k * 8 + 8 <= DV.length (get_downview b.bsrc) /\
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) (get_downview b.bsrc)) (k * 8) (k * 8 + 8) ==
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) (get_downview b.bsrc)) (k * 8) (k * 8 + 8)))
let same_mem_eq_slices64 b i v k h1 h2 mem1 mem2 =
let t = TUInt64 in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db (uint_view t) in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.length_eq ub
let length_up64 (b:buffer64) (h:vale_heap) (k:nat{k < buffer_length b}) (i:nat{i < 8}) : Lemma
(scale8 k + i <= DV.length (get_downview b.bsrc)) =
let vb = UV.mk_buffer (get_downview b.bsrc) uint64_view in
UV.length_eq vb
val same_mem_get_heap_val64 (b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1))})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (let ptr = buffer_addr b h1 + scale8 k in
forall (x:int).{:pattern (mem1.[x])} ptr <= x /\ x < ptr + 8 ==> mem1.[x] == mem2.[x]))
let same_mem_get_heap_val64 b j v k h1 h2 mem1 mem2 =
let ptr = buffer_addr b h1 + scale8 k in
let addr = buffer_addr b h1 in
let aux (x:int{ptr <= x /\ x < ptr + 8}) : Lemma (mem1.[x] == mem2.[x]) =
let i = x - ptr in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db uint64_view in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
same_mem_eq_slices64 b j v k h1 h2 mem1 mem2;
let s1 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8) (k * 8 + 8)) in
let s2 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8) (k * 8 + 8)) in
assert (Seq.index s1 i == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8 + i));
length_up64 b h1 k i;
assert (mem1.[addr+(scale8 k + i)] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8 + i)));
assert (Seq.index s2 i == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8 + i));
length_up64 b h2 k i;
assert (mem2.[addr+(scale8 k + i)] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8 + i)))
in
Classical.forall_intro aux;
assert (forall i. addr + (scale8 k + i) == ptr + i)
let rec written_buffer_down64_aux1
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
(base:nat{base == buffer_addr b h})
(k:nat) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j.{:pattern (mem1.[j]) \/ (mem2.[j])}
base <= j /\ j < base + k * 8 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem1.[j])}
j >= base /\ j < base + scale8 i ==>
mem1.[j] == mem2.[j]))
(decreases %[i-k]) =
if k >= i then ()
else begin
let ptr = base + scale8 k in
same_mem_get_heap_val64 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 8;
written_buffer_down64_aux1 b i v h base (k+1) h1 mem1 mem2
end
let rec written_buffer_down64_aux2
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
(base:nat{base == buffer_addr b h})
(n:nat{n == buffer_length b})
(k:nat{k > i}) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
base + scale8 (i+1) <= j /\ j < base + k * 8 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
j >= base + scale8 (i+1) /\ j < base + scale8 n ==>
mem1.[j] == mem2.[j]))
(decreases %[n-k]) =
if k >= n then ()
else begin
let ptr = base + scale8 k in
same_mem_get_heap_val64 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 8;
written_buffer_down64_aux2 b i v h base n (k+1) h1 mem1 mem2
end
let written_buffer_down64 (b:buffer64{buffer_writeable b}) (i:nat{i < buffer_length b}) (v:nat64) (h:vale_heap)
: Lemma
(requires List.memP b (IB.ptrs_of_mem (_ih h)))
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
(base <= j /\ j < base + scale8 i) \/
(base + scale8 (i+1) <= j /\ j < base + scale8 n) ==> | false | false | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val written_buffer_down64
(b: buffer64{buffer_writeable b})
(i: nat{i < buffer_length b})
(v: nat64)
(h: vale_heap)
: Lemma (requires List.memP b (IB.ptrs_of_mem (_ih h)))
(ensures
(let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
forall j. {:pattern (mem1.[ j ])\/(mem2.[ j ])}
(base <= j /\ j < base + scale8 i) \/
(base + scale8 (i + 1) <= j /\ j < base + scale8 n) ==>
mem1.[ j ] == mem2.[ j ])) | [] | Vale.X64.Memory_Sems.written_buffer_down64 | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
b: Vale.X64.Memory.buffer64{Vale.X64.Memory.buffer_writeable b} ->
i: Prims.nat{i < Vale.X64.Memory.buffer_length b} ->
v: Vale.Def.Words_s.nat64 ->
h: Vale.Arch.HeapImpl.vale_heap
-> FStar.Pervasives.Lemma
(requires
FStar.List.Tot.Base.memP b (Vale.Interop.Heap_s.ptrs_of_mem (Vale.Arch.HeapImpl._ih h)))
(ensures
(let mem1 = Vale.Interop.down_mem (Vale.Arch.HeapImpl._ih h) in
let h1 = Vale.X64.Memory.buffer_write b i v h in
let mem2 = Vale.Interop.down_mem (Vale.Arch.HeapImpl._ih h1) in
let base = Vale.X64.Memory.buffer_addr b h in
let n = Vale.X64.Memory.buffer_length b in
forall (j: Prims.int). {:pattern mem1.[ j ]\/mem2.[ j ]}
base <= j /\ j < base + Vale.X64.Memory.scale8 i \/
base + Vale.X64.Memory.scale8 (i + 1) <= j /\ j < base + Vale.X64.Memory.scale8 n ==>
mem1.[ j ] == mem2.[ j ])) | {
"end_col": 64,
"end_line": 337,
"start_col": 3,
"start_line": 331
} |
FStar.Pervasives.Lemma | val low_lemma_load_mem64_full (b:buffer64) (i:nat) (vfh:vale_full_heap) (t:taint) (hid:heaplet_id) : Lemma
(requires (
let (h, mt) = (Map16.get vfh.vf_heaplets hid, vfh.vf_layout.vl_taint) in
i < Seq.length (buffer_as_seq h b) /\
buffer_readable h b /\
valid_layout_buffer b vfh.vf_layout h false /\
valid_taint_buf64 b h mt t /\
mem_inv vfh
))
(ensures (
let (h, mt) = (Map16.get vfh.vf_heaplets hid, vfh.vf_layout.vl_taint) in
let ptr = buffer_addr b h + scale8 i in
is_full_read vfh.vf_heap h b i /\
// valid_addr64 ptr (heap_get (coerce vfh)) /\
valid_mem64 ptr vfh.vf_heap /\
valid_taint_buf64 b vfh.vf_heap mt t
)) | [
{
"abbrev": false,
"full_module": "Vale.X64.BufferViewStore",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let low_lemma_load_mem64_full b i vfh t hid =
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
() | val low_lemma_load_mem64_full (b:buffer64) (i:nat) (vfh:vale_full_heap) (t:taint) (hid:heaplet_id) : Lemma
(requires (
let (h, mt) = (Map16.get vfh.vf_heaplets hid, vfh.vf_layout.vl_taint) in
i < Seq.length (buffer_as_seq h b) /\
buffer_readable h b /\
valid_layout_buffer b vfh.vf_layout h false /\
valid_taint_buf64 b h mt t /\
mem_inv vfh
))
(ensures (
let (h, mt) = (Map16.get vfh.vf_heaplets hid, vfh.vf_layout.vl_taint) in
let ptr = buffer_addr b h + scale8 i in
is_full_read vfh.vf_heap h b i /\
// valid_addr64 ptr (heap_get (coerce vfh)) /\
valid_mem64 ptr vfh.vf_heap /\
valid_taint_buf64 b vfh.vf_heap mt t
))
let low_lemma_load_mem64_full b i vfh t hid = | false | null | true | reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
() | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [
"lemma"
] | [
"Vale.X64.Memory.buffer64",
"Prims.nat",
"Vale.Arch.HeapImpl.vale_full_heap",
"Vale.Arch.HeapTypes_s.taint",
"Vale.Arch.HeapImpl.heaplet_id",
"Prims.unit",
"FStar.Pervasives.reveal_opaque",
"Vale.Arch.HeapTypes_s.base_typ",
"Vale.X64.Memory.buffer",
"Vale.Arch.HeapImpl.vale_heap_layout",
"FStar.Pervasives.Native.option",
"Prims.bool",
"Vale.Def.Prop_s.prop0",
"Vale.X64.Memory.valid_layout_buffer_id"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base
let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m)
let lemma_same_domains h m1 m2 = ()
let get_heap h = I.down_mem (_ih h)
let upd_heap h m = mi_heap_upd h m
//let lemma_upd_get_heap h = I.down_up_identity (_ih h)
let lemma_get_upd_heap h m = I.up_down_identity (_ih h) m
let lemma_heap_impl = ()
let lemma_heap_get_heap h = ()
let lemma_heap_taint h = ()
//let lemma_heap_upd_heap h mh mt = ()
[@"opaque_to_smt"]
let rec set_of_range (a:int) (n:nat) : Pure (Set.set int)
(requires True)
(ensures fun s -> (forall (i:int).{:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n))
=
if n = 0 then Set.empty else Set.union (set_of_range a (n - 1)) (Set.singleton (a + n - 1))
let buffer_info_has_addr (bi:buffer_info) (a:int) =
let b = bi.bi_buffer in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
addr <= a /\ a < addr + len
let buffer_info_has_addr_opt (bi:option buffer_info) (a:int) =
match bi with
| None -> False
| Some bi -> buffer_info_has_addr bi a
#set-options "--z3rlimit 100"
let rec make_owns_rec (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
if n = 0 then ((fun _ -> None), (fun _ -> Set.empty)) else
let (m0, s0) = make_owns_rec h bs (n - 1) in
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let s i = if i = hi then Set.union (s0 i) s_b else s0 i in
let m a = if addr <= a && a < addr + len then Some (n - 1) else m0 a in
(m, s)
[@"opaque_to_smt"]
let make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
make_owns_rec h bs n
let rec lemma_make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n <= Seq.length bs /\
(forall (i:nat).{:pattern Seq.index bs i} i < Seq.length bs ==> buffer_readable h (Seq.index bs i).bi_buffer) /\
(forall (i1 i2:nat).{:pattern (Seq.index bs i1); (Seq.index bs i2)}
i1 < Seq.length bs /\ i2 < Seq.length bs ==> buffer_info_disjoint (Seq.index bs i1) (Seq.index bs i2))
)
(ensures (
let (m, s) = make_owns h bs n in
(forall (i:heaplet_id) (a:int).{:pattern Set.mem a (s i)}
(Set.mem a (s i) <==> Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (m a) == Some i) /\
(Set.mem a (s i) ==> buffer_info_has_addr_opt (Option.mapTot (fun n -> Seq.index bs n) (m a)) a) /\
(Set.mem a (s i) ==> Set.mem a (Map.domain h.mh))
) /\
(forall (k:nat) (a:int).{:pattern Set.mem a (s (Seq.index bs k).bi_heaplet)}
k < n /\ buffer_info_has_addr (Seq.index bs k) a ==> Set.mem a (s (Seq.index bs k).bi_heaplet))
))
=
reveal_opaque (`%make_owns) make_owns;
if n = 0 then () else
let _ = make_owns h bs (n - 1) in
let (m, s) = make_owns h bs n in
lemma_make_owns h bs (n - 1);
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let lem1 (a:int) : Lemma
(requires Set.mem a s_b)
(ensures Set.mem a (Map.domain h.mh))
[SMTPat (Set.mem a (Map.domain h.mh))]
=
I.addrs_set_mem h.ih b a
in
let lem2 (i:heaplet_id) (a:int) : Lemma
(requires i =!= hi /\ Set.mem a (Set.intersect s_b (s i)))
(ensures False)
[SMTPat (Set.mem a (s i))]
=
reveal_opaque (`%addr_map_pred) addr_map_pred
in
()
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 2 --max_ifuel 2"
let rec lemma_loc_mutable_buffers_rec (l:list buffer_info) (s:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n + List.length l == Seq.length s /\
list_to_seq_post l s n
)
(ensures (
let modloc = loc_mutable_buffers l in
forall (i:nat).{:pattern Seq.index s i} n <= i /\ i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes modloc (loc_buffer bi.bi_buffer))
))
(decreases l)
=
match l with
| [] -> ()
| h::t -> lemma_loc_mutable_buffers_rec t s (n + 1)
#pop-options
let lemma_loc_mutable_buffers (l:list buffer_info) : Lemma
(ensures (
let s = list_to_seq l in
forall (i:nat).{:pattern Seq.index s i} i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes (loc_mutable_buffers l) (loc_buffer bi.bi_buffer))
))
=
lemma_list_to_seq l;
lemma_loc_mutable_buffers_rec l (list_to_seq l) 0
let create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let modloc = loc_mutable_buffers buffers in
let layout1 = h1.vf_layout in
let layin1 = layout1.vl_inner in
let (hmap, hsets) = make_owns h1.vf_heap bs (Seq.length bs) in
let hmap a = Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (hmap a) in
let l = {
vl_heaplets_initialized = true;
vl_heaplet_map = hmap;
vl_heaplet_sets = hsets;
vl_old_heap = h1.vf_heap;
vl_buffers = bs;
vl_mod_loc = modloc;
} in
let layout2 = {layout1 with vl_inner = l} in
let h2 = {
vf_layout = layout2;
vf_heap = h1.vf_heap;
vf_heaplets = h1.vf_heaplets;
} in
h2
let lemma_create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let h2 = create_heaplets buffers h1 in
assert (h2.vf_layout.vl_inner.vl_buffers == bs); // REVIEW: why is this necessary, even with extra ifuel?
lemma_make_owns h1.vf_heap bs (Seq.length bs);
lemma_loc_mutable_buffers buffers;
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
()
let destroy_heaplets h1 =
h1
let lemma_destroy_heaplets h1 =
()
val heap_shift (m1 m2:S.machine_heap) (base:int) (n:nat) : Lemma
(requires (forall i. 0 <= i /\ i < n ==> m1.[base + i] == m2.[base + i]))
(ensures (forall i. {:pattern (m1.[i])} base <= i /\ i < base + n ==> m1.[i] == m2.[i]))
#push-options "--smtencoding.l_arith_repr boxwrap"
let heap_shift m1 m2 base n =
assert (forall i. base <= i /\ i < base + n ==>
m1.[base + (i - base)] == m2.[base + (i - base)])
#pop-options
val same_mem_eq_slices64 (b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1))})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (
k * 8 + 8 <= DV.length (get_downview b.bsrc) /\
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) (get_downview b.bsrc)) (k * 8) (k * 8 + 8) ==
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) (get_downview b.bsrc)) (k * 8) (k * 8 + 8)))
let same_mem_eq_slices64 b i v k h1 h2 mem1 mem2 =
let t = TUInt64 in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db (uint_view t) in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.length_eq ub
let length_up64 (b:buffer64) (h:vale_heap) (k:nat{k < buffer_length b}) (i:nat{i < 8}) : Lemma
(scale8 k + i <= DV.length (get_downview b.bsrc)) =
let vb = UV.mk_buffer (get_downview b.bsrc) uint64_view in
UV.length_eq vb
val same_mem_get_heap_val64 (b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1))})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (let ptr = buffer_addr b h1 + scale8 k in
forall (x:int).{:pattern (mem1.[x])} ptr <= x /\ x < ptr + 8 ==> mem1.[x] == mem2.[x]))
let same_mem_get_heap_val64 b j v k h1 h2 mem1 mem2 =
let ptr = buffer_addr b h1 + scale8 k in
let addr = buffer_addr b h1 in
let aux (x:int{ptr <= x /\ x < ptr + 8}) : Lemma (mem1.[x] == mem2.[x]) =
let i = x - ptr in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db uint64_view in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
same_mem_eq_slices64 b j v k h1 h2 mem1 mem2;
let s1 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8) (k * 8 + 8)) in
let s2 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8) (k * 8 + 8)) in
assert (Seq.index s1 i == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8 + i));
length_up64 b h1 k i;
assert (mem1.[addr+(scale8 k + i)] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8 + i)));
assert (Seq.index s2 i == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8 + i));
length_up64 b h2 k i;
assert (mem2.[addr+(scale8 k + i)] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8 + i)))
in
Classical.forall_intro aux;
assert (forall i. addr + (scale8 k + i) == ptr + i)
let rec written_buffer_down64_aux1
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
(base:nat{base == buffer_addr b h})
(k:nat) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j.{:pattern (mem1.[j]) \/ (mem2.[j])}
base <= j /\ j < base + k * 8 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem1.[j])}
j >= base /\ j < base + scale8 i ==>
mem1.[j] == mem2.[j]))
(decreases %[i-k]) =
if k >= i then ()
else begin
let ptr = base + scale8 k in
same_mem_get_heap_val64 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 8;
written_buffer_down64_aux1 b i v h base (k+1) h1 mem1 mem2
end
let rec written_buffer_down64_aux2
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
(base:nat{base == buffer_addr b h})
(n:nat{n == buffer_length b})
(k:nat{k > i}) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
base + scale8 (i+1) <= j /\ j < base + k * 8 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
j >= base + scale8 (i+1) /\ j < base + scale8 n ==>
mem1.[j] == mem2.[j]))
(decreases %[n-k]) =
if k >= n then ()
else begin
let ptr = base + scale8 k in
same_mem_get_heap_val64 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 8;
written_buffer_down64_aux2 b i v h base n (k+1) h1 mem1 mem2
end
let written_buffer_down64 (b:buffer64{buffer_writeable b}) (i:nat{i < buffer_length b}) (v:nat64) (h:vale_heap)
: Lemma
(requires List.memP b (IB.ptrs_of_mem (_ih h)))
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
(base <= j /\ j < base + scale8 i) \/
(base + scale8 (i+1) <= j /\ j < base + scale8 n) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
written_buffer_down64_aux1 b i v h base 0 h1 mem1 mem2;
written_buffer_down64_aux2 b i v h base n (i+1) h1 mem1 mem2
let unwritten_buffer_down (t:base_typ) (b:buffer t{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:base_typ_as_vale_type t)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
forall (a:b8{List.memP a (IB.ptrs_of_mem (_ih h)) /\ a =!= b}) j. {:pattern mem1.[j]; List.memP a (IB.ptrs_of_mem (_ih h)) \/ mem2.[j]; List.memP a (IB.ptrs_of_mem (_ih h))}
let base = (IB.addrs_of_mem (_ih h)) a in
j >= base /\ j < base + DV.length (get_downview a.bsrc) ==> mem1.[j] == mem2.[j]))
= let aux (a:b8{a =!= b /\ List.memP a (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let base = (IB.addrs_of_mem (_ih h)) a in
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
forall j.
j >= base /\ j < base + DV.length (get_downview a.bsrc) ==>
mem1.[j] == mem2.[j]))
= let db = get_downview a.bsrc in
if DV.length db = 0 then ()
else
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = (IB.addrs_of_mem (_ih h)) a in
let s0 = DV.as_seq (IB.hs_of_mem (_ih h)) db in
let s1 = DV.as_seq (IB.hs_of_mem (_ih h1)) db in
opaque_assert (`%IB.list_disjoint_or_eq) IB.list_disjoint_or_eq IB.list_disjoint_or_eq_def (MB.disjoint a.bsrc b.bsrc);
lemma_dv_equal (IB.down_view a.src) a.bsrc (IB.hs_of_mem (_ih h)) (IB.hs_of_mem (_ih h1));
assert (Seq.equal s0 s1);
assert (forall (j:int).{:pattern (mem1.[j])}
base <= j /\ j < base + Seq.length s0 ==> v_to_typ TUInt8 (Seq.index s0 (j - base)) == mem1.[j]);
heap_shift mem1 mem2 base (DV.length db)
in
Classical.forall_intro aux
let store_buffer_down64_mem
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
forall (j:int). {:pattern mem1.[j] \/ mem2.[j]}
j < base + scale8 i \/ j >= base + scale8 (i+1) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
let aux (j:int)
: Lemma
(j < base + scale8 i \/ j >= base + scale8 (i+1) ==>
mem1.[j] == mem2.[j])
=
I.addrs_set_lemma_all ();
if j >= base && j < base + DV.length (get_downview b.bsrc) then begin
written_buffer_down64 b i v h;
length_t_eq (TUInt64) b
end
else if not (I.valid_addr (_ih h) j)
then I.same_unspecified_down (IB.hs_of_mem (_ih h)) (IB.hs_of_mem (_ih h1)) (IB.ptrs_of_mem (_ih h))
else unwritten_buffer_down TUInt64 b i v h
in
Classical.forall_intro aux
let store_buffer_aux_down64_mem (ptr:int) (v:nat64) (h:vale_heap{writeable_mem64 ptr h})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = store_mem (TUInt64) ptr v h in
let mem2 = I.down_mem (_ih h1) in
forall j. {:pattern mem1.[j] \/ mem2.[j]}
j < ptr \/ j >= ptr + 8 ==>
mem1.[j] == mem2.[j]))
= let t = TUInt64 in
let h1 = store_mem t ptr v h in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
store_buffer_write t ptr v h;
assert (buffer_addr b h + scale8 i == ptr);
assert (buffer_addr b h + scale8 (i+1) == ptr + 8);
store_buffer_down64_mem b i v h
let store_buffer_aux_down64_mem2 (ptr:int) (v:nat64) (h:vale_heap{writeable_mem64 ptr h})
: Lemma
(ensures (
let h1 = store_mem (TUInt64) ptr v h in
let mem2 = I.down_mem (_ih h1) in
S.get_heap_val64 ptr mem2 == v))
= let t = TUInt64 in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let h1 = store_mem t ptr v h in
let mem2 = I.down_mem (_ih h1) in
store_buffer_write t ptr v h;
assert (Seq.index (buffer_as_seq h1 b) i == v);
index64_get_heap_val64 h1 b mem2 i
let in_bounds64 (h:vale_heap) (b:buffer64) (i:nat{i < buffer_length b})
: Lemma (scale8 i + 8 <= DV.length (get_downview b.bsrc))
=
length_t_eq TUInt64 b
let bytes_valid64 ptr h =
reveal_opaque (`%S.valid_addr64) S.valid_addr64;
let t = TUInt64 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
in_bounds64 h b i;
I.addrs_set_mem (_ih h) b ptr;
I.addrs_set_mem (_ih h) b (ptr+1);
I.addrs_set_mem (_ih h) b (ptr+2);
I.addrs_set_mem (_ih h) b (ptr+3);
I.addrs_set_mem (_ih h) b (ptr+4);
I.addrs_set_mem (_ih h) b (ptr+5);
I.addrs_set_mem (_ih h) b (ptr+6);
I.addrs_set_mem (_ih h) b (ptr+7)
val same_mem_get_heap_val128 (b:buffer128)
(i:nat{i < buffer_length b})
(v:quad32)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1)) /\ buffer_writeable b})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (let ptr = buffer_addr b h1 + scale16 k in
forall i.{:pattern mem1.[i]} i >= ptr /\ i < ptr+16 ==> mem1.[i] == mem2.[i]))
val same_mem_eq_slices128 (b:buffer128)
(i:nat{i < buffer_length b})
(v:quad32)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1)) /\ buffer_writeable b})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (
k * 16 + 16 <= DV.length (get_downview b.bsrc) /\
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) (get_downview b.bsrc)) (k * 16) (k * 16 + 16) ==
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) (get_downview b.bsrc)) (k * 16) (k * 16 + 16)))
let same_mem_eq_slices128 b i v k h1 h2 mem1 mem2 =
let t = TUInt128 in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db (uint_view t) in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.length_eq ub
let length_up128 (b:buffer128) (h:vale_heap) (k:nat{k < buffer_length b}) (i:nat{i < 16}) : Lemma
(scale16 k + i <= DV.length (get_downview b.bsrc)) =
let vb = UV.mk_buffer (get_downview b.bsrc) uint128_view in
UV.length_eq vb
let same_mem_get_heap_val128 b j v k h1 h2 mem1 mem2 =
let ptr = buffer_addr b h1 + scale16 k in
let addr = buffer_addr b h1 in
let aux (i:nat{ptr <= i /\ i < ptr+16}) : Lemma (mem1.[i] == mem2.[i]) =
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db uint128_view in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
same_mem_eq_slices128 b j v k h1 h2 mem1 mem2;
let s1 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16) (k * 16 + 16)) in
let s2 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16) (k * 16 + 16)) in
assert (Seq.index s1 (i - ptr) == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16 + (i-ptr)));
length_up128 b h1 k (i-ptr);
assert (mem1.[i] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16 + (i-ptr))));
assert (Seq.index s2 (i-ptr) == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16 + (i-ptr)));
length_up128 b h2 k (i-ptr);
assert (mem2.[addr+(scale16 k + (i-ptr))] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16 + (i-ptr))));
assert (forall i. addr + (scale16 k + (i-ptr)) == i)
in
Classical.forall_intro aux
let in_bounds128 (h:vale_heap) (b:buffer128) (i:nat{i < buffer_length b}) : Lemma
(scale16 i + 16 <= DV.length (get_downview b.bsrc))
=
length_t_eq TUInt128 b
#push-options "--z3rlimit 20"
#restart-solver
let bytes_valid128 ptr h =
reveal_opaque (`%S.valid_addr128) S.valid_addr128;
IB.list_disjoint_or_eq_reveal ();
let t = TUInt128 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
in_bounds128 h b i;
I.addrs_set_mem (_ih h) b ptr;
I.addrs_set_mem (_ih h) b (ptr+1);
I.addrs_set_mem (_ih h) b (ptr+2);
I.addrs_set_mem (_ih h) b (ptr+3);
I.addrs_set_mem (_ih h) b (ptr+4);
I.addrs_set_mem (_ih h) b (ptr+5);
I.addrs_set_mem (_ih h) b (ptr+6);
I.addrs_set_mem (_ih h) b (ptr+7);
I.addrs_set_mem (_ih h) b (ptr+8);
I.addrs_set_mem (_ih h) b (ptr+9);
I.addrs_set_mem (_ih h) b (ptr+10);
I.addrs_set_mem (_ih h) b (ptr+11);
I.addrs_set_mem (_ih h) b (ptr+12);
I.addrs_set_mem (_ih h) b (ptr+13);
I.addrs_set_mem (_ih h) b (ptr+14);
I.addrs_set_mem (_ih h) b (ptr+15)
#pop-options
let equiv_load_mem64 ptr h =
let t = TUInt64 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let addr = buffer_addr b h in
let contents = DV.as_seq (_ih h).IB.hs (get_downview b.bsrc) in
let heap = get_heap h in
index64_get_heap_val64 h b heap i;
lemma_load_mem64 b i h
//let low_lemma_valid_mem64 b i h =
// lemma_valid_mem64 b i h;
// bytes_valid64 (buffer_addr b h + scale8 i) h
//let low_lemma_load_mem64 b i h =
// lemma_valid_mem64 b i h;
// lemma_load_mem64 b i h;
// equiv_load_mem64 (buffer_addr b h + scale8 i) h
//let same_domain_update64 b i v h =
// low_lemma_valid_mem64 b i h;
// Vale.Arch.MachineHeap.same_domain_update64 (buffer_addr b h + scale8 i) v (get_heap h)
open Vale.X64.BufferViewStore
let low_lemma_store_mem64_aux
(b:buffer64)
(heap:S.machine_heap)
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{buffer_readable h b /\ buffer_writeable b})
: Lemma
(requires IB.correct_down_p (_ih h) heap b)
(ensures (let heap' = S.update_heap64 (buffer_addr b h + scale8 i) v heap in
let h' = store_mem64 (buffer_addr b h + scale8 i) v h in
(_ih h').IB.hs == DV.upd_seq (_ih h).IB.hs (get_downview b.bsrc) (I.get_seq_heap heap' (_ih h).IB.addrs b))) =
let ptr = buffer_addr b h + scale8 i in
let heap' = S.update_heap64 ptr v heap in
let h' = store_mem64 ptr v h in
lemma_store_mem64 b i v h;
length_t_eq TUInt64 b;
bv_upd_update_heap64 b heap i v (_ih h);
let db = get_downview b.bsrc in
let bv = UV.mk_buffer db Vale.Interop.Views.up_view64 in
assert (UV.upd (_ih h).IB.hs bv i (UInt64.uint_to_t v) == (_ih h').IB.hs)
val valid_state_store_mem64_aux: (i:nat) -> (v:nat64) -> (h:vale_heap) -> Lemma
(requires writeable_mem64 i h)
(ensures (
let heap = get_heap h in
let heap' = S.update_heap64 i v heap in
let h' = store_mem64 i v h in
heap' == I.down_mem (_ih h')
))
let valid_state_store_mem64_aux i v h =
let heap = get_heap h in
let heap' = S.update_heap64 i v heap in
let h1 = store_mem TUInt64 i v h in
store_buffer_aux_down64_mem i v h;
store_buffer_aux_down64_mem2 i v h;
let mem1 = heap' in
let mem2 = I.down_mem (_ih h1) in
let aux () : Lemma (forall j. mem1.[j] == mem2.[j]) =
Vale.Arch.MachineHeap.same_mem_get_heap_val64 i mem1 mem2;
Vale.Arch.MachineHeap.correct_update_get64 i v heap;
Vale.Arch.MachineHeap.frame_update_heap64 i v heap
in let aux2 () : Lemma (Set.equal (Map.domain mem1) (Map.domain mem2)) =
bytes_valid64 i h;
Vale.Arch.MachineHeap.same_domain_update64 i v heap
in aux(); aux2();
Map.lemma_equal_intro mem1 mem2 | false | false | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val low_lemma_load_mem64_full (b:buffer64) (i:nat) (vfh:vale_full_heap) (t:taint) (hid:heaplet_id) : Lemma
(requires (
let (h, mt) = (Map16.get vfh.vf_heaplets hid, vfh.vf_layout.vl_taint) in
i < Seq.length (buffer_as_seq h b) /\
buffer_readable h b /\
valid_layout_buffer b vfh.vf_layout h false /\
valid_taint_buf64 b h mt t /\
mem_inv vfh
))
(ensures (
let (h, mt) = (Map16.get vfh.vf_heaplets hid, vfh.vf_layout.vl_taint) in
let ptr = buffer_addr b h + scale8 i in
is_full_read vfh.vf_heap h b i /\
// valid_addr64 ptr (heap_get (coerce vfh)) /\
valid_mem64 ptr vfh.vf_heap /\
valid_taint_buf64 b vfh.vf_heap mt t
)) | [] | Vale.X64.Memory_Sems.low_lemma_load_mem64_full | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
b: Vale.X64.Memory.buffer64 ->
i: Prims.nat ->
vfh: Vale.Arch.HeapImpl.vale_full_heap ->
t: Vale.Arch.HeapTypes_s.taint ->
hid: Vale.Arch.HeapImpl.heaplet_id
-> FStar.Pervasives.Lemma
(requires
(let _ =
Vale.Lib.Map16.get (Mkvale_full_heap?.vf_heaplets vfh) hid,
Mkvale_heap_layout?.vl_taint (Mkvale_full_heap?.vf_layout vfh)
in
(let FStar.Pervasives.Native.Mktuple2 #_ #_ h mt = _ in
i < FStar.Seq.Base.length (Vale.X64.Memory.buffer_as_seq h b) /\
Vale.X64.Memory.buffer_readable h b /\
Vale.X64.Memory.valid_layout_buffer b (Mkvale_full_heap?.vf_layout vfh) h false /\
Vale.X64.Memory.valid_taint_buf64 b h mt t /\ Vale.X64.Memory.mem_inv vfh)
<:
Type0))
(ensures
(let _ =
Vale.Lib.Map16.get (Mkvale_full_heap?.vf_heaplets vfh) hid,
Mkvale_heap_layout?.vl_taint (Mkvale_full_heap?.vf_layout vfh)
in
(let FStar.Pervasives.Native.Mktuple2 #_ #_ h mt = _ in
let ptr = Vale.X64.Memory.buffer_addr b h + Vale.X64.Memory.scale8 i in
Vale.X64.Memory_Sems.is_full_read (Mkvale_full_heap?.vf_heap vfh) h b i /\
Vale.X64.Memory.valid_mem64 ptr (Mkvale_full_heap?.vf_heap vfh) /\
Vale.X64.Memory.valid_taint_buf64 b (Mkvale_full_heap?.vf_heap vfh) mt t)
<:
Type0)) | {
"end_col": 4,
"end_line": 638,
"start_col": 2,
"start_line": 637
} |
FStar.Pervasives.Lemma | val bytes_valid128 (i:int) (m:vale_heap) : Lemma
(requires valid_mem128 i m)
(ensures S.valid_addr128 i (get_heap m))
[SMTPat (S.valid_addr128 i (get_heap m))] | [
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let bytes_valid128 ptr h =
reveal_opaque (`%S.valid_addr128) S.valid_addr128;
IB.list_disjoint_or_eq_reveal ();
let t = TUInt128 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
in_bounds128 h b i;
I.addrs_set_mem (_ih h) b ptr;
I.addrs_set_mem (_ih h) b (ptr+1);
I.addrs_set_mem (_ih h) b (ptr+2);
I.addrs_set_mem (_ih h) b (ptr+3);
I.addrs_set_mem (_ih h) b (ptr+4);
I.addrs_set_mem (_ih h) b (ptr+5);
I.addrs_set_mem (_ih h) b (ptr+6);
I.addrs_set_mem (_ih h) b (ptr+7);
I.addrs_set_mem (_ih h) b (ptr+8);
I.addrs_set_mem (_ih h) b (ptr+9);
I.addrs_set_mem (_ih h) b (ptr+10);
I.addrs_set_mem (_ih h) b (ptr+11);
I.addrs_set_mem (_ih h) b (ptr+12);
I.addrs_set_mem (_ih h) b (ptr+13);
I.addrs_set_mem (_ih h) b (ptr+14);
I.addrs_set_mem (_ih h) b (ptr+15) | val bytes_valid128 (i:int) (m:vale_heap) : Lemma
(requires valid_mem128 i m)
(ensures S.valid_addr128 i (get_heap m))
[SMTPat (S.valid_addr128 i (get_heap m))]
let bytes_valid128 ptr h = | false | null | true | reveal_opaque (`%S.valid_addr128) S.valid_addr128;
IB.list_disjoint_or_eq_reveal ();
let t = TUInt128 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
in_bounds128 h b i;
I.addrs_set_mem (_ih h) b ptr;
I.addrs_set_mem (_ih h) b (ptr + 1);
I.addrs_set_mem (_ih h) b (ptr + 2);
I.addrs_set_mem (_ih h) b (ptr + 3);
I.addrs_set_mem (_ih h) b (ptr + 4);
I.addrs_set_mem (_ih h) b (ptr + 5);
I.addrs_set_mem (_ih h) b (ptr + 6);
I.addrs_set_mem (_ih h) b (ptr + 7);
I.addrs_set_mem (_ih h) b (ptr + 8);
I.addrs_set_mem (_ih h) b (ptr + 9);
I.addrs_set_mem (_ih h) b (ptr + 10);
I.addrs_set_mem (_ih h) b (ptr + 11);
I.addrs_set_mem (_ih h) b (ptr + 12);
I.addrs_set_mem (_ih h) b (ptr + 13);
I.addrs_set_mem (_ih h) b (ptr + 14);
I.addrs_set_mem (_ih h) b (ptr + 15) | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [
"lemma"
] | [
"Prims.int",
"Vale.Arch.HeapImpl.vale_heap",
"Vale.Interop.addrs_set_mem",
"Vale.Arch.HeapImpl._ih",
"Prims.op_Addition",
"Prims.unit",
"Vale.X64.Memory_Sems.in_bounds128",
"Prims.nat",
"Vale.X64.Memory.get_addr_in_ptr",
"Vale.X64.Memory.buffer_length",
"Vale.X64.Memory.buffer_addr",
"Vale.Arch.HeapImpl.buffer",
"Vale.X64.Memory.get_addr_ptr",
"Vale.Arch.HeapTypes_s.base_typ",
"Vale.Arch.HeapTypes_s.TUInt128",
"Vale.Interop.Heap_s.list_disjoint_or_eq_reveal",
"FStar.Pervasives.reveal_opaque",
"Vale.Arch.MachineHeap_s.machine_heap",
"Prims.bool",
"Vale.Arch.MachineHeap_s.valid_addr128"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base
let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m)
let lemma_same_domains h m1 m2 = ()
let get_heap h = I.down_mem (_ih h)
let upd_heap h m = mi_heap_upd h m
//let lemma_upd_get_heap h = I.down_up_identity (_ih h)
let lemma_get_upd_heap h m = I.up_down_identity (_ih h) m
let lemma_heap_impl = ()
let lemma_heap_get_heap h = ()
let lemma_heap_taint h = ()
//let lemma_heap_upd_heap h mh mt = ()
[@"opaque_to_smt"]
let rec set_of_range (a:int) (n:nat) : Pure (Set.set int)
(requires True)
(ensures fun s -> (forall (i:int).{:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n))
=
if n = 0 then Set.empty else Set.union (set_of_range a (n - 1)) (Set.singleton (a + n - 1))
let buffer_info_has_addr (bi:buffer_info) (a:int) =
let b = bi.bi_buffer in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
addr <= a /\ a < addr + len
let buffer_info_has_addr_opt (bi:option buffer_info) (a:int) =
match bi with
| None -> False
| Some bi -> buffer_info_has_addr bi a
#set-options "--z3rlimit 100"
let rec make_owns_rec (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
if n = 0 then ((fun _ -> None), (fun _ -> Set.empty)) else
let (m0, s0) = make_owns_rec h bs (n - 1) in
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let s i = if i = hi then Set.union (s0 i) s_b else s0 i in
let m a = if addr <= a && a < addr + len then Some (n - 1) else m0 a in
(m, s)
[@"opaque_to_smt"]
let make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
make_owns_rec h bs n
let rec lemma_make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n <= Seq.length bs /\
(forall (i:nat).{:pattern Seq.index bs i} i < Seq.length bs ==> buffer_readable h (Seq.index bs i).bi_buffer) /\
(forall (i1 i2:nat).{:pattern (Seq.index bs i1); (Seq.index bs i2)}
i1 < Seq.length bs /\ i2 < Seq.length bs ==> buffer_info_disjoint (Seq.index bs i1) (Seq.index bs i2))
)
(ensures (
let (m, s) = make_owns h bs n in
(forall (i:heaplet_id) (a:int).{:pattern Set.mem a (s i)}
(Set.mem a (s i) <==> Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (m a) == Some i) /\
(Set.mem a (s i) ==> buffer_info_has_addr_opt (Option.mapTot (fun n -> Seq.index bs n) (m a)) a) /\
(Set.mem a (s i) ==> Set.mem a (Map.domain h.mh))
) /\
(forall (k:nat) (a:int).{:pattern Set.mem a (s (Seq.index bs k).bi_heaplet)}
k < n /\ buffer_info_has_addr (Seq.index bs k) a ==> Set.mem a (s (Seq.index bs k).bi_heaplet))
))
=
reveal_opaque (`%make_owns) make_owns;
if n = 0 then () else
let _ = make_owns h bs (n - 1) in
let (m, s) = make_owns h bs n in
lemma_make_owns h bs (n - 1);
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let lem1 (a:int) : Lemma
(requires Set.mem a s_b)
(ensures Set.mem a (Map.domain h.mh))
[SMTPat (Set.mem a (Map.domain h.mh))]
=
I.addrs_set_mem h.ih b a
in
let lem2 (i:heaplet_id) (a:int) : Lemma
(requires i =!= hi /\ Set.mem a (Set.intersect s_b (s i)))
(ensures False)
[SMTPat (Set.mem a (s i))]
=
reveal_opaque (`%addr_map_pred) addr_map_pred
in
()
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 2 --max_ifuel 2"
let rec lemma_loc_mutable_buffers_rec (l:list buffer_info) (s:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n + List.length l == Seq.length s /\
list_to_seq_post l s n
)
(ensures (
let modloc = loc_mutable_buffers l in
forall (i:nat).{:pattern Seq.index s i} n <= i /\ i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes modloc (loc_buffer bi.bi_buffer))
))
(decreases l)
=
match l with
| [] -> ()
| h::t -> lemma_loc_mutable_buffers_rec t s (n + 1)
#pop-options
let lemma_loc_mutable_buffers (l:list buffer_info) : Lemma
(ensures (
let s = list_to_seq l in
forall (i:nat).{:pattern Seq.index s i} i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes (loc_mutable_buffers l) (loc_buffer bi.bi_buffer))
))
=
lemma_list_to_seq l;
lemma_loc_mutable_buffers_rec l (list_to_seq l) 0
let create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let modloc = loc_mutable_buffers buffers in
let layout1 = h1.vf_layout in
let layin1 = layout1.vl_inner in
let (hmap, hsets) = make_owns h1.vf_heap bs (Seq.length bs) in
let hmap a = Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (hmap a) in
let l = {
vl_heaplets_initialized = true;
vl_heaplet_map = hmap;
vl_heaplet_sets = hsets;
vl_old_heap = h1.vf_heap;
vl_buffers = bs;
vl_mod_loc = modloc;
} in
let layout2 = {layout1 with vl_inner = l} in
let h2 = {
vf_layout = layout2;
vf_heap = h1.vf_heap;
vf_heaplets = h1.vf_heaplets;
} in
h2
let lemma_create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let h2 = create_heaplets buffers h1 in
assert (h2.vf_layout.vl_inner.vl_buffers == bs); // REVIEW: why is this necessary, even with extra ifuel?
lemma_make_owns h1.vf_heap bs (Seq.length bs);
lemma_loc_mutable_buffers buffers;
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
()
let destroy_heaplets h1 =
h1
let lemma_destroy_heaplets h1 =
()
val heap_shift (m1 m2:S.machine_heap) (base:int) (n:nat) : Lemma
(requires (forall i. 0 <= i /\ i < n ==> m1.[base + i] == m2.[base + i]))
(ensures (forall i. {:pattern (m1.[i])} base <= i /\ i < base + n ==> m1.[i] == m2.[i]))
#push-options "--smtencoding.l_arith_repr boxwrap"
let heap_shift m1 m2 base n =
assert (forall i. base <= i /\ i < base + n ==>
m1.[base + (i - base)] == m2.[base + (i - base)])
#pop-options
val same_mem_eq_slices64 (b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1))})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (
k * 8 + 8 <= DV.length (get_downview b.bsrc) /\
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) (get_downview b.bsrc)) (k * 8) (k * 8 + 8) ==
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) (get_downview b.bsrc)) (k * 8) (k * 8 + 8)))
let same_mem_eq_slices64 b i v k h1 h2 mem1 mem2 =
let t = TUInt64 in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db (uint_view t) in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.length_eq ub
let length_up64 (b:buffer64) (h:vale_heap) (k:nat{k < buffer_length b}) (i:nat{i < 8}) : Lemma
(scale8 k + i <= DV.length (get_downview b.bsrc)) =
let vb = UV.mk_buffer (get_downview b.bsrc) uint64_view in
UV.length_eq vb
val same_mem_get_heap_val64 (b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1))})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (let ptr = buffer_addr b h1 + scale8 k in
forall (x:int).{:pattern (mem1.[x])} ptr <= x /\ x < ptr + 8 ==> mem1.[x] == mem2.[x]))
let same_mem_get_heap_val64 b j v k h1 h2 mem1 mem2 =
let ptr = buffer_addr b h1 + scale8 k in
let addr = buffer_addr b h1 in
let aux (x:int{ptr <= x /\ x < ptr + 8}) : Lemma (mem1.[x] == mem2.[x]) =
let i = x - ptr in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db uint64_view in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
same_mem_eq_slices64 b j v k h1 h2 mem1 mem2;
let s1 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8) (k * 8 + 8)) in
let s2 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8) (k * 8 + 8)) in
assert (Seq.index s1 i == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8 + i));
length_up64 b h1 k i;
assert (mem1.[addr+(scale8 k + i)] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8 + i)));
assert (Seq.index s2 i == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8 + i));
length_up64 b h2 k i;
assert (mem2.[addr+(scale8 k + i)] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8 + i)))
in
Classical.forall_intro aux;
assert (forall i. addr + (scale8 k + i) == ptr + i)
let rec written_buffer_down64_aux1
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
(base:nat{base == buffer_addr b h})
(k:nat) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j.{:pattern (mem1.[j]) \/ (mem2.[j])}
base <= j /\ j < base + k * 8 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem1.[j])}
j >= base /\ j < base + scale8 i ==>
mem1.[j] == mem2.[j]))
(decreases %[i-k]) =
if k >= i then ()
else begin
let ptr = base + scale8 k in
same_mem_get_heap_val64 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 8;
written_buffer_down64_aux1 b i v h base (k+1) h1 mem1 mem2
end
let rec written_buffer_down64_aux2
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
(base:nat{base == buffer_addr b h})
(n:nat{n == buffer_length b})
(k:nat{k > i}) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
base + scale8 (i+1) <= j /\ j < base + k * 8 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
j >= base + scale8 (i+1) /\ j < base + scale8 n ==>
mem1.[j] == mem2.[j]))
(decreases %[n-k]) =
if k >= n then ()
else begin
let ptr = base + scale8 k in
same_mem_get_heap_val64 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 8;
written_buffer_down64_aux2 b i v h base n (k+1) h1 mem1 mem2
end
let written_buffer_down64 (b:buffer64{buffer_writeable b}) (i:nat{i < buffer_length b}) (v:nat64) (h:vale_heap)
: Lemma
(requires List.memP b (IB.ptrs_of_mem (_ih h)))
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
(base <= j /\ j < base + scale8 i) \/
(base + scale8 (i+1) <= j /\ j < base + scale8 n) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
written_buffer_down64_aux1 b i v h base 0 h1 mem1 mem2;
written_buffer_down64_aux2 b i v h base n (i+1) h1 mem1 mem2
let unwritten_buffer_down (t:base_typ) (b:buffer t{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:base_typ_as_vale_type t)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
forall (a:b8{List.memP a (IB.ptrs_of_mem (_ih h)) /\ a =!= b}) j. {:pattern mem1.[j]; List.memP a (IB.ptrs_of_mem (_ih h)) \/ mem2.[j]; List.memP a (IB.ptrs_of_mem (_ih h))}
let base = (IB.addrs_of_mem (_ih h)) a in
j >= base /\ j < base + DV.length (get_downview a.bsrc) ==> mem1.[j] == mem2.[j]))
= let aux (a:b8{a =!= b /\ List.memP a (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let base = (IB.addrs_of_mem (_ih h)) a in
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
forall j.
j >= base /\ j < base + DV.length (get_downview a.bsrc) ==>
mem1.[j] == mem2.[j]))
= let db = get_downview a.bsrc in
if DV.length db = 0 then ()
else
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = (IB.addrs_of_mem (_ih h)) a in
let s0 = DV.as_seq (IB.hs_of_mem (_ih h)) db in
let s1 = DV.as_seq (IB.hs_of_mem (_ih h1)) db in
opaque_assert (`%IB.list_disjoint_or_eq) IB.list_disjoint_or_eq IB.list_disjoint_or_eq_def (MB.disjoint a.bsrc b.bsrc);
lemma_dv_equal (IB.down_view a.src) a.bsrc (IB.hs_of_mem (_ih h)) (IB.hs_of_mem (_ih h1));
assert (Seq.equal s0 s1);
assert (forall (j:int).{:pattern (mem1.[j])}
base <= j /\ j < base + Seq.length s0 ==> v_to_typ TUInt8 (Seq.index s0 (j - base)) == mem1.[j]);
heap_shift mem1 mem2 base (DV.length db)
in
Classical.forall_intro aux
let store_buffer_down64_mem
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
forall (j:int). {:pattern mem1.[j] \/ mem2.[j]}
j < base + scale8 i \/ j >= base + scale8 (i+1) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
let aux (j:int)
: Lemma
(j < base + scale8 i \/ j >= base + scale8 (i+1) ==>
mem1.[j] == mem2.[j])
=
I.addrs_set_lemma_all ();
if j >= base && j < base + DV.length (get_downview b.bsrc) then begin
written_buffer_down64 b i v h;
length_t_eq (TUInt64) b
end
else if not (I.valid_addr (_ih h) j)
then I.same_unspecified_down (IB.hs_of_mem (_ih h)) (IB.hs_of_mem (_ih h1)) (IB.ptrs_of_mem (_ih h))
else unwritten_buffer_down TUInt64 b i v h
in
Classical.forall_intro aux
let store_buffer_aux_down64_mem (ptr:int) (v:nat64) (h:vale_heap{writeable_mem64 ptr h})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = store_mem (TUInt64) ptr v h in
let mem2 = I.down_mem (_ih h1) in
forall j. {:pattern mem1.[j] \/ mem2.[j]}
j < ptr \/ j >= ptr + 8 ==>
mem1.[j] == mem2.[j]))
= let t = TUInt64 in
let h1 = store_mem t ptr v h in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
store_buffer_write t ptr v h;
assert (buffer_addr b h + scale8 i == ptr);
assert (buffer_addr b h + scale8 (i+1) == ptr + 8);
store_buffer_down64_mem b i v h
let store_buffer_aux_down64_mem2 (ptr:int) (v:nat64) (h:vale_heap{writeable_mem64 ptr h})
: Lemma
(ensures (
let h1 = store_mem (TUInt64) ptr v h in
let mem2 = I.down_mem (_ih h1) in
S.get_heap_val64 ptr mem2 == v))
= let t = TUInt64 in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let h1 = store_mem t ptr v h in
let mem2 = I.down_mem (_ih h1) in
store_buffer_write t ptr v h;
assert (Seq.index (buffer_as_seq h1 b) i == v);
index64_get_heap_val64 h1 b mem2 i
let in_bounds64 (h:vale_heap) (b:buffer64) (i:nat{i < buffer_length b})
: Lemma (scale8 i + 8 <= DV.length (get_downview b.bsrc))
=
length_t_eq TUInt64 b
let bytes_valid64 ptr h =
reveal_opaque (`%S.valid_addr64) S.valid_addr64;
let t = TUInt64 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
in_bounds64 h b i;
I.addrs_set_mem (_ih h) b ptr;
I.addrs_set_mem (_ih h) b (ptr+1);
I.addrs_set_mem (_ih h) b (ptr+2);
I.addrs_set_mem (_ih h) b (ptr+3);
I.addrs_set_mem (_ih h) b (ptr+4);
I.addrs_set_mem (_ih h) b (ptr+5);
I.addrs_set_mem (_ih h) b (ptr+6);
I.addrs_set_mem (_ih h) b (ptr+7)
val same_mem_get_heap_val128 (b:buffer128)
(i:nat{i < buffer_length b})
(v:quad32)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1)) /\ buffer_writeable b})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (let ptr = buffer_addr b h1 + scale16 k in
forall i.{:pattern mem1.[i]} i >= ptr /\ i < ptr+16 ==> mem1.[i] == mem2.[i]))
val same_mem_eq_slices128 (b:buffer128)
(i:nat{i < buffer_length b})
(v:quad32)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1)) /\ buffer_writeable b})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (
k * 16 + 16 <= DV.length (get_downview b.bsrc) /\
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) (get_downview b.bsrc)) (k * 16) (k * 16 + 16) ==
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) (get_downview b.bsrc)) (k * 16) (k * 16 + 16)))
let same_mem_eq_slices128 b i v k h1 h2 mem1 mem2 =
let t = TUInt128 in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db (uint_view t) in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.length_eq ub
let length_up128 (b:buffer128) (h:vale_heap) (k:nat{k < buffer_length b}) (i:nat{i < 16}) : Lemma
(scale16 k + i <= DV.length (get_downview b.bsrc)) =
let vb = UV.mk_buffer (get_downview b.bsrc) uint128_view in
UV.length_eq vb
let same_mem_get_heap_val128 b j v k h1 h2 mem1 mem2 =
let ptr = buffer_addr b h1 + scale16 k in
let addr = buffer_addr b h1 in
let aux (i:nat{ptr <= i /\ i < ptr+16}) : Lemma (mem1.[i] == mem2.[i]) =
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db uint128_view in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
same_mem_eq_slices128 b j v k h1 h2 mem1 mem2;
let s1 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16) (k * 16 + 16)) in
let s2 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16) (k * 16 + 16)) in
assert (Seq.index s1 (i - ptr) == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16 + (i-ptr)));
length_up128 b h1 k (i-ptr);
assert (mem1.[i] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16 + (i-ptr))));
assert (Seq.index s2 (i-ptr) == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16 + (i-ptr)));
length_up128 b h2 k (i-ptr);
assert (mem2.[addr+(scale16 k + (i-ptr))] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16 + (i-ptr))));
assert (forall i. addr + (scale16 k + (i-ptr)) == i)
in
Classical.forall_intro aux
let in_bounds128 (h:vale_heap) (b:buffer128) (i:nat{i < buffer_length b}) : Lemma
(scale16 i + 16 <= DV.length (get_downview b.bsrc))
=
length_t_eq TUInt128 b
#push-options "--z3rlimit 20"
#restart-solver | false | false | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val bytes_valid128 (i:int) (m:vale_heap) : Lemma
(requires valid_mem128 i m)
(ensures S.valid_addr128 i (get_heap m))
[SMTPat (S.valid_addr128 i (get_heap m))] | [] | Vale.X64.Memory_Sems.bytes_valid128 | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | i: Prims.int -> m: Vale.Arch.HeapImpl.vale_heap
-> FStar.Pervasives.Lemma (requires Vale.X64.Memory.valid_mem128 i m)
(ensures Vale.Arch.MachineHeap_s.valid_addr128 i (Vale.X64.Memory_Sems.get_heap m))
[SMTPat (Vale.Arch.MachineHeap_s.valid_addr128 i (Vale.X64.Memory_Sems.get_heap m))] | {
"end_col": 36,
"end_line": 560,
"start_col": 2,
"start_line": 539
} |
FStar.Pervasives.Lemma | val equiv_load_mem64 (ptr:int) (m:vale_heap) : Lemma
(requires valid_mem64 ptr m)
(ensures load_mem64 ptr m == S.get_heap_val64 ptr (get_heap m)) | [
{
"abbrev": true,
"full_module": "Vale.Interop.Base",
"short_module": "IB"
},
{
"abbrev": false,
"full_module": "Vale.Lib.BufferViewHelpers",
"short_module": null
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Down",
"short_module": "DV"
},
{
"abbrev": true,
"full_module": "LowStar.BufferView.Up",
"short_module": "UV"
},
{
"abbrev": true,
"full_module": "LowStar.Monotonic.Buffer",
"short_module": "MB"
},
{
"abbrev": true,
"full_module": "Vale.Interop",
"short_module": "I"
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": true,
"full_module": "Vale.Lib.Map16",
"short_module": "Map16"
},
{
"abbrev": true,
"full_module": "Vale.X64.Machine_Semantics_s",
"short_module": "S"
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.MachineHeap_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Heap",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.HeapImpl",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let equiv_load_mem64 ptr h =
let t = TUInt64 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let addr = buffer_addr b h in
let contents = DV.as_seq (_ih h).IB.hs (get_downview b.bsrc) in
let heap = get_heap h in
index64_get_heap_val64 h b heap i;
lemma_load_mem64 b i h | val equiv_load_mem64 (ptr:int) (m:vale_heap) : Lemma
(requires valid_mem64 ptr m)
(ensures load_mem64 ptr m == S.get_heap_val64 ptr (get_heap m))
let equiv_load_mem64 ptr h = | false | null | true | let t = TUInt64 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let addr = buffer_addr b h in
let contents = DV.as_seq (_ih h).IB.hs (get_downview b.bsrc) in
let heap = get_heap h in
index64_get_heap_val64 h b heap i;
lemma_load_mem64 b i h | {
"checked_file": "Vale.X64.Memory_Sems.fst.checked",
"dependencies": [
"Vale.X64.Memory.fst.checked",
"Vale.X64.Memory.fst.checked",
"Vale.X64.Machine_Semantics_s.fst.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.BufferViewStore.fsti.checked",
"Vale.Lib.BufferViewHelpers.fst.checked",
"Vale.Interop.Views.fsti.checked",
"Vale.Interop.Heap_s.fst.checked",
"Vale.Interop.Base.fst.checked",
"Vale.Interop.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Two_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Prop_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.MachineHeap.fsti.checked",
"Vale.Arch.HeapImpl.fsti.checked",
"Vale.Arch.Heap.fsti.checked",
"prims.fst.checked",
"LowStar.Monotonic.Buffer.fsti.checked",
"LowStar.BufferView.Up.fsti.checked",
"LowStar.BufferView.Down.fsti.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.Set.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Option.fst.checked",
"FStar.Mul.fst.checked",
"FStar.Map.fsti.checked",
"FStar.List.fst.checked",
"FStar.Classical.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.X64.Memory_Sems.fst"
} | [
"lemma"
] | [
"Prims.int",
"Vale.Arch.HeapImpl.vale_heap",
"Vale.X64.Memory.lemma_load_mem64",
"Prims.unit",
"Vale.X64.Memory.index64_get_heap_val64",
"Vale.Arch.MachineHeap_s.machine_heap",
"Vale.X64.Memory_Sems.same_domain",
"Vale.X64.Memory_Sems.get_heap",
"FStar.Seq.Properties.lseq",
"FStar.UInt8.t",
"LowStar.BufferView.Down.length",
"Vale.Interop.Types.get_downview",
"Vale.Interop.Types.__proj__Buffer__item__src",
"Vale.Interop.Types.b8_preorder",
"Vale.Interop.Types.__proj__Buffer__item__writeable",
"Vale.Interop.Types.base_typ_as_type",
"Vale.Interop.Types.__proj__Buffer__item__bsrc",
"LowStar.BufferView.Down.as_seq",
"Vale.Interop.Heap_s.__proj__InteropHeap__item__hs",
"Vale.Arch.HeapImpl._ih",
"Vale.X64.Memory.buffer_addr",
"Prims.nat",
"Vale.X64.Memory.get_addr_in_ptr",
"Vale.X64.Memory.buffer_length",
"Vale.Arch.HeapImpl.buffer",
"Vale.X64.Memory.get_addr_ptr",
"Vale.Arch.HeapTypes_s.base_typ",
"Vale.Arch.HeapTypes_s.TUInt64"
] | [] | module Vale.X64.Memory_Sems
open FStar.Mul
open Vale.Def.Prop_s
open Vale.Def.Opaque_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.Def.Words_s
module I = Vale.Interop
module S = Vale.X64.Machine_Semantics_s
module MB = LowStar.Monotonic.Buffer
module UV = LowStar.BufferView.Up
module DV = LowStar.BufferView.Down
open Vale.Lib.BufferViewHelpers
open Vale.Arch.HeapImpl
open Vale.Arch.Heap
friend Vale.X64.Memory
module IB = Vale.Interop.Base
let same_domain h m = Set.equal (IB.addrs_set (_ih h)) (Map.domain m)
let lemma_same_domains h m1 m2 = ()
let get_heap h = I.down_mem (_ih h)
let upd_heap h m = mi_heap_upd h m
//let lemma_upd_get_heap h = I.down_up_identity (_ih h)
let lemma_get_upd_heap h m = I.up_down_identity (_ih h) m
let lemma_heap_impl = ()
let lemma_heap_get_heap h = ()
let lemma_heap_taint h = ()
//let lemma_heap_upd_heap h mh mt = ()
[@"opaque_to_smt"]
let rec set_of_range (a:int) (n:nat) : Pure (Set.set int)
(requires True)
(ensures fun s -> (forall (i:int).{:pattern Set.mem i s} Set.mem i s <==> a <= i /\ i < a + n))
=
if n = 0 then Set.empty else Set.union (set_of_range a (n - 1)) (Set.singleton (a + n - 1))
let buffer_info_has_addr (bi:buffer_info) (a:int) =
let b = bi.bi_buffer in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
addr <= a /\ a < addr + len
let buffer_info_has_addr_opt (bi:option buffer_info) (a:int) =
match bi with
| None -> False
| Some bi -> buffer_info_has_addr bi a
#set-options "--z3rlimit 100"
let rec make_owns_rec (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
if n = 0 then ((fun _ -> None), (fun _ -> Set.empty)) else
let (m0, s0) = make_owns_rec h bs (n - 1) in
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let s i = if i = hi then Set.union (s0 i) s_b else s0 i in
let m a = if addr <= a && a < addr + len then Some (n - 1) else m0 a in
(m, s)
[@"opaque_to_smt"]
let make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat{n <= Seq.length bs}) :
GTot ((int -> option (n:nat{n < Seq.length bs})) & (heaplet_id -> Set.set int))
=
make_owns_rec h bs n
let rec lemma_make_owns (h:vale_heap) (bs:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n <= Seq.length bs /\
(forall (i:nat).{:pattern Seq.index bs i} i < Seq.length bs ==> buffer_readable h (Seq.index bs i).bi_buffer) /\
(forall (i1 i2:nat).{:pattern (Seq.index bs i1); (Seq.index bs i2)}
i1 < Seq.length bs /\ i2 < Seq.length bs ==> buffer_info_disjoint (Seq.index bs i1) (Seq.index bs i2))
)
(ensures (
let (m, s) = make_owns h bs n in
(forall (i:heaplet_id) (a:int).{:pattern Set.mem a (s i)}
(Set.mem a (s i) <==> Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (m a) == Some i) /\
(Set.mem a (s i) ==> buffer_info_has_addr_opt (Option.mapTot (fun n -> Seq.index bs n) (m a)) a) /\
(Set.mem a (s i) ==> Set.mem a (Map.domain h.mh))
) /\
(forall (k:nat) (a:int).{:pattern Set.mem a (s (Seq.index bs k).bi_heaplet)}
k < n /\ buffer_info_has_addr (Seq.index bs k) a ==> Set.mem a (s (Seq.index bs k).bi_heaplet))
))
=
reveal_opaque (`%make_owns) make_owns;
if n = 0 then () else
let _ = make_owns h bs (n - 1) in
let (m, s) = make_owns h bs n in
lemma_make_owns h bs (n - 1);
let bi = Seq.index bs (n - 1) in
let b = bi.bi_buffer in
let hi = bi.bi_heaplet in
let addr = Vale.Interop.Heap_s.global_addrs_map b in
let len = DV.length (get_downview b.bsrc) in
let s_b = set_of_range addr len in
let lem1 (a:int) : Lemma
(requires Set.mem a s_b)
(ensures Set.mem a (Map.domain h.mh))
[SMTPat (Set.mem a (Map.domain h.mh))]
=
I.addrs_set_mem h.ih b a
in
let lem2 (i:heaplet_id) (a:int) : Lemma
(requires i =!= hi /\ Set.mem a (Set.intersect s_b (s i)))
(ensures False)
[SMTPat (Set.mem a (s i))]
=
reveal_opaque (`%addr_map_pred) addr_map_pred
in
()
#push-options "--initial_fuel 1 --max_fuel 1 --initial_ifuel 2 --max_ifuel 2"
let rec lemma_loc_mutable_buffers_rec (l:list buffer_info) (s:Seq.seq buffer_info) (n:nat) : Lemma
(requires
n + List.length l == Seq.length s /\
list_to_seq_post l s n
)
(ensures (
let modloc = loc_mutable_buffers l in
forall (i:nat).{:pattern Seq.index s i} n <= i /\ i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes modloc (loc_buffer bi.bi_buffer))
))
(decreases l)
=
match l with
| [] -> ()
| h::t -> lemma_loc_mutable_buffers_rec t s (n + 1)
#pop-options
let lemma_loc_mutable_buffers (l:list buffer_info) : Lemma
(ensures (
let s = list_to_seq l in
forall (i:nat).{:pattern Seq.index s i} i < Seq.length s ==> (
let bi = Seq.index s i in
bi.bi_mutable == Mutable ==> loc_includes (loc_mutable_buffers l) (loc_buffer bi.bi_buffer))
))
=
lemma_list_to_seq l;
lemma_loc_mutable_buffers_rec l (list_to_seq l) 0
let create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let modloc = loc_mutable_buffers buffers in
let layout1 = h1.vf_layout in
let layin1 = layout1.vl_inner in
let (hmap, hsets) = make_owns h1.vf_heap bs (Seq.length bs) in
let hmap a = Option.mapTot (fun n -> (Seq.index bs n).bi_heaplet) (hmap a) in
let l = {
vl_heaplets_initialized = true;
vl_heaplet_map = hmap;
vl_heaplet_sets = hsets;
vl_old_heap = h1.vf_heap;
vl_buffers = bs;
vl_mod_loc = modloc;
} in
let layout2 = {layout1 with vl_inner = l} in
let h2 = {
vf_layout = layout2;
vf_heap = h1.vf_heap;
vf_heaplets = h1.vf_heaplets;
} in
h2
let lemma_create_heaplets buffers h1 =
let bs = list_to_seq buffers in
let h2 = create_heaplets buffers h1 in
assert (h2.vf_layout.vl_inner.vl_buffers == bs); // REVIEW: why is this necessary, even with extra ifuel?
lemma_make_owns h1.vf_heap bs (Seq.length bs);
lemma_loc_mutable_buffers buffers;
reveal_opaque (`%valid_layout_buffer_id) valid_layout_buffer_id;
()
let destroy_heaplets h1 =
h1
let lemma_destroy_heaplets h1 =
()
val heap_shift (m1 m2:S.machine_heap) (base:int) (n:nat) : Lemma
(requires (forall i. 0 <= i /\ i < n ==> m1.[base + i] == m2.[base + i]))
(ensures (forall i. {:pattern (m1.[i])} base <= i /\ i < base + n ==> m1.[i] == m2.[i]))
#push-options "--smtencoding.l_arith_repr boxwrap"
let heap_shift m1 m2 base n =
assert (forall i. base <= i /\ i < base + n ==>
m1.[base + (i - base)] == m2.[base + (i - base)])
#pop-options
val same_mem_eq_slices64 (b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1))})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (
k * 8 + 8 <= DV.length (get_downview b.bsrc) /\
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) (get_downview b.bsrc)) (k * 8) (k * 8 + 8) ==
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) (get_downview b.bsrc)) (k * 8) (k * 8 + 8)))
let same_mem_eq_slices64 b i v k h1 h2 mem1 mem2 =
let t = TUInt64 in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db (uint_view t) in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.length_eq ub
let length_up64 (b:buffer64) (h:vale_heap) (k:nat{k < buffer_length b}) (i:nat{i < 8}) : Lemma
(scale8 k + i <= DV.length (get_downview b.bsrc)) =
let vb = UV.mk_buffer (get_downview b.bsrc) uint64_view in
UV.length_eq vb
val same_mem_get_heap_val64 (b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1))})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (let ptr = buffer_addr b h1 + scale8 k in
forall (x:int).{:pattern (mem1.[x])} ptr <= x /\ x < ptr + 8 ==> mem1.[x] == mem2.[x]))
let same_mem_get_heap_val64 b j v k h1 h2 mem1 mem2 =
let ptr = buffer_addr b h1 + scale8 k in
let addr = buffer_addr b h1 in
let aux (x:int{ptr <= x /\ x < ptr + 8}) : Lemma (mem1.[x] == mem2.[x]) =
let i = x - ptr in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db uint64_view in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
same_mem_eq_slices64 b j v k h1 h2 mem1 mem2;
let s1 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8) (k * 8 + 8)) in
let s2 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8) (k * 8 + 8)) in
assert (Seq.index s1 i == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8 + i));
length_up64 b h1 k i;
assert (mem1.[addr+(scale8 k + i)] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 8 + i)));
assert (Seq.index s2 i == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8 + i));
length_up64 b h2 k i;
assert (mem2.[addr+(scale8 k + i)] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 8 + i)))
in
Classical.forall_intro aux;
assert (forall i. addr + (scale8 k + i) == ptr + i)
let rec written_buffer_down64_aux1
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
(base:nat{base == buffer_addr b h})
(k:nat) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j.{:pattern (mem1.[j]) \/ (mem2.[j])}
base <= j /\ j < base + k * 8 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem1.[j])}
j >= base /\ j < base + scale8 i ==>
mem1.[j] == mem2.[j]))
(decreases %[i-k]) =
if k >= i then ()
else begin
let ptr = base + scale8 k in
same_mem_get_heap_val64 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 8;
written_buffer_down64_aux1 b i v h base (k+1) h1 mem1 mem2
end
let rec written_buffer_down64_aux2
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
(base:nat{base == buffer_addr b h})
(n:nat{n == buffer_length b})
(k:nat{k > i}) (h1:vale_heap{h1 == buffer_write b i v h})
(mem1:S.machine_heap{IB.correct_down (_ih h) mem1})
(mem2:S.machine_heap{IB.correct_down (_ih h1) mem2 /\
(forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
base + scale8 (i+1) <= j /\ j < base + k * 8 ==>
mem1.[j] == mem2.[j])})
: Lemma
(ensures (forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
j >= base + scale8 (i+1) /\ j < base + scale8 n ==>
mem1.[j] == mem2.[j]))
(decreases %[n-k]) =
if k >= n then ()
else begin
let ptr = base + scale8 k in
same_mem_get_heap_val64 b i v k h h1 mem1 mem2;
heap_shift mem1 mem2 ptr 8;
written_buffer_down64_aux2 b i v h base n (k+1) h1 mem1 mem2
end
let written_buffer_down64 (b:buffer64{buffer_writeable b}) (i:nat{i < buffer_length b}) (v:nat64) (h:vale_heap)
: Lemma
(requires List.memP b (IB.ptrs_of_mem (_ih h)))
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
forall j. {:pattern (mem1.[j]) \/ (mem2.[j])}
(base <= j /\ j < base + scale8 i) \/
(base + scale8 (i+1) <= j /\ j < base + scale8 n) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
written_buffer_down64_aux1 b i v h base 0 h1 mem1 mem2;
written_buffer_down64_aux2 b i v h base n (i+1) h1 mem1 mem2
let unwritten_buffer_down (t:base_typ) (b:buffer t{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:base_typ_as_vale_type t)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
forall (a:b8{List.memP a (IB.ptrs_of_mem (_ih h)) /\ a =!= b}) j. {:pattern mem1.[j]; List.memP a (IB.ptrs_of_mem (_ih h)) \/ mem2.[j]; List.memP a (IB.ptrs_of_mem (_ih h))}
let base = (IB.addrs_of_mem (_ih h)) a in
j >= base /\ j < base + DV.length (get_downview a.bsrc) ==> mem1.[j] == mem2.[j]))
= let aux (a:b8{a =!= b /\ List.memP a (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let base = (IB.addrs_of_mem (_ih h)) a in
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
forall j.
j >= base /\ j < base + DV.length (get_downview a.bsrc) ==>
mem1.[j] == mem2.[j]))
= let db = get_downview a.bsrc in
if DV.length db = 0 then ()
else
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = (IB.addrs_of_mem (_ih h)) a in
let s0 = DV.as_seq (IB.hs_of_mem (_ih h)) db in
let s1 = DV.as_seq (IB.hs_of_mem (_ih h1)) db in
opaque_assert (`%IB.list_disjoint_or_eq) IB.list_disjoint_or_eq IB.list_disjoint_or_eq_def (MB.disjoint a.bsrc b.bsrc);
lemma_dv_equal (IB.down_view a.src) a.bsrc (IB.hs_of_mem (_ih h)) (IB.hs_of_mem (_ih h1));
assert (Seq.equal s0 s1);
assert (forall (j:int).{:pattern (mem1.[j])}
base <= j /\ j < base + Seq.length s0 ==> v_to_typ TUInt8 (Seq.index s0 (j - base)) == mem1.[j]);
heap_shift mem1 mem2 base (DV.length db)
in
Classical.forall_intro aux
let store_buffer_down64_mem
(b:buffer64{buffer_writeable b})
(i:nat{i < buffer_length b})
(v:nat64)
(h:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h))})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
forall (j:int). {:pattern mem1.[j] \/ mem2.[j]}
j < base + scale8 i \/ j >= base + scale8 (i+1) ==>
mem1.[j] == mem2.[j]))
= let mem1 = I.down_mem (_ih h) in
let h1 = buffer_write b i v h in
let mem2 = I.down_mem (_ih h1) in
let base = buffer_addr b h in
let n = buffer_length b in
let aux (j:int)
: Lemma
(j < base + scale8 i \/ j >= base + scale8 (i+1) ==>
mem1.[j] == mem2.[j])
=
I.addrs_set_lemma_all ();
if j >= base && j < base + DV.length (get_downview b.bsrc) then begin
written_buffer_down64 b i v h;
length_t_eq (TUInt64) b
end
else if not (I.valid_addr (_ih h) j)
then I.same_unspecified_down (IB.hs_of_mem (_ih h)) (IB.hs_of_mem (_ih h1)) (IB.ptrs_of_mem (_ih h))
else unwritten_buffer_down TUInt64 b i v h
in
Classical.forall_intro aux
let store_buffer_aux_down64_mem (ptr:int) (v:nat64) (h:vale_heap{writeable_mem64 ptr h})
: Lemma
(ensures (
let mem1 = I.down_mem (_ih h) in
let h1 = store_mem (TUInt64) ptr v h in
let mem2 = I.down_mem (_ih h1) in
forall j. {:pattern mem1.[j] \/ mem2.[j]}
j < ptr \/ j >= ptr + 8 ==>
mem1.[j] == mem2.[j]))
= let t = TUInt64 in
let h1 = store_mem t ptr v h in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
store_buffer_write t ptr v h;
assert (buffer_addr b h + scale8 i == ptr);
assert (buffer_addr b h + scale8 (i+1) == ptr + 8);
store_buffer_down64_mem b i v h
let store_buffer_aux_down64_mem2 (ptr:int) (v:nat64) (h:vale_heap{writeable_mem64 ptr h})
: Lemma
(ensures (
let h1 = store_mem (TUInt64) ptr v h in
let mem2 = I.down_mem (_ih h1) in
S.get_heap_val64 ptr mem2 == v))
= let t = TUInt64 in
let b = Some?.v (find_writeable_buffer t ptr h) in
length_t_eq t b;
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
let h1 = store_mem t ptr v h in
let mem2 = I.down_mem (_ih h1) in
store_buffer_write t ptr v h;
assert (Seq.index (buffer_as_seq h1 b) i == v);
index64_get_heap_val64 h1 b mem2 i
let in_bounds64 (h:vale_heap) (b:buffer64) (i:nat{i < buffer_length b})
: Lemma (scale8 i + 8 <= DV.length (get_downview b.bsrc))
=
length_t_eq TUInt64 b
let bytes_valid64 ptr h =
reveal_opaque (`%S.valid_addr64) S.valid_addr64;
let t = TUInt64 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
in_bounds64 h b i;
I.addrs_set_mem (_ih h) b ptr;
I.addrs_set_mem (_ih h) b (ptr+1);
I.addrs_set_mem (_ih h) b (ptr+2);
I.addrs_set_mem (_ih h) b (ptr+3);
I.addrs_set_mem (_ih h) b (ptr+4);
I.addrs_set_mem (_ih h) b (ptr+5);
I.addrs_set_mem (_ih h) b (ptr+6);
I.addrs_set_mem (_ih h) b (ptr+7)
val same_mem_get_heap_val128 (b:buffer128)
(i:nat{i < buffer_length b})
(v:quad32)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1)) /\ buffer_writeable b})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (let ptr = buffer_addr b h1 + scale16 k in
forall i.{:pattern mem1.[i]} i >= ptr /\ i < ptr+16 ==> mem1.[i] == mem2.[i]))
val same_mem_eq_slices128 (b:buffer128)
(i:nat{i < buffer_length b})
(v:quad32)
(k:nat{k < buffer_length b})
(h1:vale_heap{List.memP b (IB.ptrs_of_mem (_ih h1)) /\ buffer_writeable b})
(h2:vale_heap{h2 == buffer_write b i v h1})
(mem1:S.machine_heap{IB.correct_down_p (_ih h1) mem1 b})
(mem2:S.machine_heap{IB.correct_down_p (_ih h2) mem2 b}) : Lemma
(requires (Seq.index (buffer_as_seq h1 b) k == Seq.index (buffer_as_seq h2 b) k))
(ensures (
k * 16 + 16 <= DV.length (get_downview b.bsrc) /\
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) (get_downview b.bsrc)) (k * 16) (k * 16 + 16) ==
Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) (get_downview b.bsrc)) (k * 16) (k * 16 + 16)))
let same_mem_eq_slices128 b i v k h1 h2 mem1 mem2 =
let t = TUInt128 in
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db (uint_view t) in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.put_sel (IB.hs_of_mem (_ih h2)) ub k;
UV.length_eq ub
let length_up128 (b:buffer128) (h:vale_heap) (k:nat{k < buffer_length b}) (i:nat{i < 16}) : Lemma
(scale16 k + i <= DV.length (get_downview b.bsrc)) =
let vb = UV.mk_buffer (get_downview b.bsrc) uint128_view in
UV.length_eq vb
let same_mem_get_heap_val128 b j v k h1 h2 mem1 mem2 =
let ptr = buffer_addr b h1 + scale16 k in
let addr = buffer_addr b h1 in
let aux (i:nat{ptr <= i /\ i < ptr+16}) : Lemma (mem1.[i] == mem2.[i]) =
let db = get_downview b.bsrc in
let ub = UV.mk_buffer db uint128_view in
UV.as_seq_sel (IB.hs_of_mem (_ih h1)) ub k;
UV.as_seq_sel (IB.hs_of_mem (_ih h2)) ub k;
same_mem_eq_slices128 b j v k h1 h2 mem1 mem2;
let s1 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16) (k * 16 + 16)) in
let s2 = (Seq.slice (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16) (k * 16 + 16)) in
assert (Seq.index s1 (i - ptr) == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16 + (i-ptr)));
length_up128 b h1 k (i-ptr);
assert (mem1.[i] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h1)) db) (k * 16 + (i-ptr))));
assert (Seq.index s2 (i-ptr) == Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16 + (i-ptr)));
length_up128 b h2 k (i-ptr);
assert (mem2.[addr+(scale16 k + (i-ptr))] == UInt8.v (Seq.index (DV.as_seq (IB.hs_of_mem (_ih h2)) db) (k * 16 + (i-ptr))));
assert (forall i. addr + (scale16 k + (i-ptr)) == i)
in
Classical.forall_intro aux
let in_bounds128 (h:vale_heap) (b:buffer128) (i:nat{i < buffer_length b}) : Lemma
(scale16 i + 16 <= DV.length (get_downview b.bsrc))
=
length_t_eq TUInt128 b
#push-options "--z3rlimit 20"
#restart-solver
let bytes_valid128 ptr h =
reveal_opaque (`%S.valid_addr128) S.valid_addr128;
IB.list_disjoint_or_eq_reveal ();
let t = TUInt128 in
let b = get_addr_ptr t ptr h in
let i = get_addr_in_ptr t (buffer_length b) (buffer_addr b h) ptr 0 in
in_bounds128 h b i;
I.addrs_set_mem (_ih h) b ptr;
I.addrs_set_mem (_ih h) b (ptr+1);
I.addrs_set_mem (_ih h) b (ptr+2);
I.addrs_set_mem (_ih h) b (ptr+3);
I.addrs_set_mem (_ih h) b (ptr+4);
I.addrs_set_mem (_ih h) b (ptr+5);
I.addrs_set_mem (_ih h) b (ptr+6);
I.addrs_set_mem (_ih h) b (ptr+7);
I.addrs_set_mem (_ih h) b (ptr+8);
I.addrs_set_mem (_ih h) b (ptr+9);
I.addrs_set_mem (_ih h) b (ptr+10);
I.addrs_set_mem (_ih h) b (ptr+11);
I.addrs_set_mem (_ih h) b (ptr+12);
I.addrs_set_mem (_ih h) b (ptr+13);
I.addrs_set_mem (_ih h) b (ptr+14);
I.addrs_set_mem (_ih h) b (ptr+15)
#pop-options | false | false | Vale.X64.Memory_Sems.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 100,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val equiv_load_mem64 (ptr:int) (m:vale_heap) : Lemma
(requires valid_mem64 ptr m)
(ensures load_mem64 ptr m == S.get_heap_val64 ptr (get_heap m)) | [] | Vale.X64.Memory_Sems.equiv_load_mem64 | {
"file_name": "vale/code/arch/x64/Vale.X64.Memory_Sems.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | ptr: Prims.int -> m: Vale.Arch.HeapImpl.vale_heap
-> FStar.Pervasives.Lemma (requires Vale.X64.Memory.valid_mem64 ptr m)
(ensures
Vale.X64.Memory.load_mem64 ptr m ==
Vale.Arch.MachineHeap_s.get_heap_val64 ptr (Vale.X64.Memory_Sems.get_heap m)) | {
"end_col": 24,
"end_line": 571,
"start_col": 28,
"start_line": 563
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.