effect
stringclasses 48
values | original_source_type
stringlengths 0
23k
| opens_and_abbrevs
listlengths 2
92
| isa_cross_project_example
bool 1
class | source_definition
stringlengths 9
57.9k
| partial_definition
stringlengths 7
23.3k
| is_div
bool 2
classes | is_type
null | is_proof
bool 2
classes | completed_definiton
stringlengths 1
250k
| dependencies
dict | effect_flags
sequencelengths 0
2
| ideal_premises
sequencelengths 0
236
| mutual_with
sequencelengths 0
11
| file_context
stringlengths 0
407k
| interleaved
bool 1
class | is_simply_typed
bool 2
classes | file_name
stringlengths 5
48
| vconfig
dict | is_simple_lemma
null | source_type
stringlengths 10
23k
| proof_features
sequencelengths 0
1
| name
stringlengths 8
95
| source
dict | verbose_type
stringlengths 1
7.42k
| source_range
dict |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
FStar.Pervasives.Lemma | val nat32_xor_bytewise_2 (k k' x x' m: nat32) (s s' t t': four nat8)
: Lemma
(requires
k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\ ixor k m == x /\ ixor k' m == x' /\ s.lo0 == s'.lo0 /\
s.lo1 == s'.lo1) (ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1) | [
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let nat32_xor_bytewise_2 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_2_helper3 k k' s s';
lemma_ishl_32 k 16;
lemma_ishl_32 k' 16;
lemma_ishl_32 x 16;
lemma_ishl_32 x' 16;
lemma_ishl_ixor_32 k m 16;
lemma_ishl_ixor_32 k' m 16;
// assert (ishl #pow2_32 k 16 == k * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 k' 16 == k' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == x * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x' 16 == x' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == ixor (ishl k 16) (ishl m 16));
// assert (ishl #pow2_32 x' 16 == ixor (ishl k' 16) (ishl m 16));
// assert (x * 0x10000 % 0x100000000 == ixor (k * 0x10000 % 0x100000000) (ishl m 16));
// assert (x' * 0x10000 % 0x100000000 == ixor (k' * 0x10000 % 0x100000000) (ishl m 16));
nat32_xor_bytewise_2_helper2 x x' t t';
() | val nat32_xor_bytewise_2 (k k' x x' m: nat32) (s s' t t': four nat8)
: Lemma
(requires
k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\ ixor k m == x /\ ixor k' m == x' /\ s.lo0 == s'.lo0 /\
s.lo1 == s'.lo1) (ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
let nat32_xor_bytewise_2 (k k' x x' m: nat32) (s s' t t': four nat8)
: Lemma
(requires
k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\ ixor k m == x /\ ixor k' m == x' /\ s.lo0 == s'.lo0 /\
s.lo1 == s'.lo1) (ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1) = | false | null | true | let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_2_helper3 k k' s s';
lemma_ishl_32 k 16;
lemma_ishl_32 k' 16;
lemma_ishl_32 x 16;
lemma_ishl_32 x' 16;
lemma_ishl_ixor_32 k m 16;
lemma_ishl_ixor_32 k' m 16;
nat32_xor_bytewise_2_helper2 x x' t t';
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.Def.Types_s.nat32",
"Vale.Def.Words_s.four",
"Vale.Def.Types_s.nat8",
"Prims.unit",
"Vale.AES.GCTR.nat32_xor_bytewise_2_helper2",
"Vale.AES.GCTR.lemma_ishl_ixor_32",
"Vale.AES.GCTR.lemma_ishl_32",
"Vale.AES.GCTR.nat32_xor_bytewise_2_helper3",
"Vale.Def.Words_s.nat8",
"Prims.l_and",
"Prims.eq2",
"Vale.Def.Words_s.natN",
"Vale.Def.Words_s.pow2_32",
"Vale.Def.Words.Four_s.four_to_nat",
"Vale.Def.Types_s.ixor",
"Vale.Def.Words_s.__proj__Mkfour__item__lo0",
"Vale.Def.Words_s.__proj__Mkfour__item__lo1",
"Prims.squash",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb
let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
()
(*
Want to show that:
slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, out_b))) 0 num_bytes
==
gctr_encrypt_LE icb_BE (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) ...
We know that
slice (buffer128_as_seq(mem, out_b) 0 num_blocks
==
gctr_encrypt_recursive icb_BE (slice buffer128_as_seq(mem, in_b) 0 num_blocks) ...
And we know that:
get_mem out_b num_blocks
==
gctr_encrypt_block(icb_BE, (get_mem inb num_blocks), alg, key, num_blocks);
Internally gctr_encrypt_LE will compute:
full_blocks, final_block = split (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) (num_blocks * 16)
We'd like to show that
Step1: le_bytes_to_seq_quad32 full_blocks == slice buffer128_as_seq(mem, in_b) 0 num_blocks
and
Step2: final_block == slice (le_quad32_to_bytes (get_mem inb num_blocks)) 0 num_extra
Then we need to break down the byte-level effects of gctr_encrypt_block to show that even though the
padded version of final_block differs from (get_mem inb num_blocks), after we slice it at the end,
we end up with the same value
*)
let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
=
let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
()
#reset-options "--smtencoding.elim_box true --z3rlimit 30"
let lemma_slice_orig_index (#a:Type) (s s':seq a) (m n:nat) : Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures (forall (i:int).{:pattern (index s i) \/ (index s' i)} m <= i /\ i < n ==> index s i == index s' i))
=
let aux (i:nat{m <= i /\ i < n}) : Lemma (index s i == index s' i) =
lemma_index_slice s m n (i - m);
lemma_index_slice s' m n (i - m)
in Classical.forall_intro aux
let lemma_ishl_32 (x:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 x k == x * pow2 k % pow2_32)
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
FStar.UInt.shift_left_value_lemma #32 x k;
()
let lemma_ishl_ixor_32 (x y:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k))
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
Vale.Def.TypesNative_s.reveal_ishl 32 y k;
Vale.Def.TypesNative_s.reveal_ishl 32 (ixor x y) k;
Vale.Def.TypesNative_s.reveal_ixor 32 x y;
Vale.Def.TypesNative_s.reveal_ixor 32 (ishl x k) (ishl y k);
FStar.UInt.shift_left_logxor_lemma #32 x y k;
()
unfold let pow2_24 = 0x1000000
let nat24 = natN pow2_24
let nat32_xor_bytewise_1_helper1 (x0 x0':nat8) (x1 x1':nat24) (x x':nat32) : Lemma
(requires
x == x0 + 0x100 * x1 /\
x' == x0' + 0x100 * x1' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_2_helper1 (x0 x0' x1 x1':nat16) (x x':nat32) : Lemma
(requires
x == x0 + 0x10000 * x1 /\
x' == x0' + 0x10000 * x1' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_3_helper1 (x0 x0':nat24) (x1 x1':nat8) (x x':nat32) : Lemma
(requires
x == x0 + 0x1000000 * x1 /\
x' == x0' + 0x1000000 * x1' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_1_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t123 = t1 + 0x100 * t2 + 0x10000 * t3 in
let t123' = t1' + 0x100 * t2' + 0x10000 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_1_helper1 t0 t0' t123 t123' x x';
()
let nat32_xor_bytewise_2_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t01 = t0 + 0x100 * t1 in
let t23 = t2 + 0x100 * t3 in
let t01' = t0' + 0x100 * t1' in
let t23' = t2' + 0x100 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_2_helper1 t01 t01' t23 t23' x x';
()
let nat32_xor_bytewise_3_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t012 = t0 + 0x100 * t1 + 0x10000 * t2 in
let t012' = t0' + 0x100 * t1' + 0x10000 * t2' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_3_helper1 t012 t012' t3 t3' x x';
()
let nat32_xor_bytewise_1_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0
)
(ensures k * 0x1000000 % 0x100000000 == k' * 0x1000000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_2_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures k * 0x10000 % 0x100000000 == k' * 0x10000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_3_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures k * 0x100 % 0x100000000 == k' * 0x100 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_1 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_1_helper3 k k' s s';
lemma_ishl_32 k 24;
lemma_ishl_32 k' 24;
lemma_ishl_32 x 24;
lemma_ishl_32 x' 24;
lemma_ishl_ixor_32 k m 24;
lemma_ishl_ixor_32 k' m 24;
assert_norm (pow2 24 == pow2_24);
nat32_xor_bytewise_1_helper2 x x' t t';
()
let nat32_xor_bytewise_2 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
) | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val nat32_xor_bytewise_2 (k k' x x' m: nat32) (s s' t t': four nat8)
: Lemma
(requires
k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\ ixor k m == x /\ ixor k' m == x' /\ s.lo0 == s'.lo0 /\
s.lo1 == s'.lo1) (ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1) | [] | Vale.AES.GCTR.nat32_xor_bytewise_2 | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
k: Vale.Def.Types_s.nat32 ->
k': Vale.Def.Types_s.nat32 ->
x: Vale.Def.Types_s.nat32 ->
x': Vale.Def.Types_s.nat32 ->
m: Vale.Def.Types_s.nat32 ->
s: Vale.Def.Words_s.four Vale.Def.Types_s.nat8 ->
s': Vale.Def.Words_s.four Vale.Def.Types_s.nat8 ->
t: Vale.Def.Words_s.four Vale.Def.Types_s.nat8 ->
t': Vale.Def.Words_s.four Vale.Def.Types_s.nat8
-> FStar.Pervasives.Lemma
(requires
k == Vale.Def.Words.Four_s.four_to_nat 8 s /\ k' == Vale.Def.Words.Four_s.four_to_nat 8 s' /\
x == Vale.Def.Words.Four_s.four_to_nat 8 t /\ x' == Vale.Def.Words.Four_s.four_to_nat 8 t' /\
Vale.Def.Types_s.ixor k m == x /\ Vale.Def.Types_s.ixor k' m == x' /\
Mkfour?.lo0 s == Mkfour?.lo0 s' /\ Mkfour?.lo1 s == Mkfour?.lo1 s')
(ensures Mkfour?.lo0 t == Mkfour?.lo0 t' /\ Mkfour?.lo1 t == Mkfour?.lo1 t') | {
"end_col": 4,
"end_line": 483,
"start_col": 3,
"start_line": 462
} |
FStar.Pervasives.Lemma | val lemma_ishl_ixor_32 (x y: nat32) (k: nat)
: Lemma (ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k)) | [
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let lemma_ishl_ixor_32 (x y:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k))
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
Vale.Def.TypesNative_s.reveal_ishl 32 y k;
Vale.Def.TypesNative_s.reveal_ishl 32 (ixor x y) k;
Vale.Def.TypesNative_s.reveal_ixor 32 x y;
Vale.Def.TypesNative_s.reveal_ixor 32 (ishl x k) (ishl y k);
FStar.UInt.shift_left_logxor_lemma #32 x y k;
() | val lemma_ishl_ixor_32 (x y: nat32) (k: nat)
: Lemma (ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k))
let lemma_ishl_ixor_32 (x y: nat32) (k: nat)
: Lemma (ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k)) = | false | null | true | Vale.Def.TypesNative_s.reveal_ishl 32 x k;
Vale.Def.TypesNative_s.reveal_ishl 32 y k;
Vale.Def.TypesNative_s.reveal_ishl 32 (ixor x y) k;
Vale.Def.TypesNative_s.reveal_ixor 32 x y;
Vale.Def.TypesNative_s.reveal_ixor 32 (ishl x k) (ishl y k);
FStar.UInt.shift_left_logxor_lemma #32 x y k;
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.Def.Types_s.nat32",
"Prims.nat",
"Prims.unit",
"FStar.UInt.shift_left_logxor_lemma",
"Vale.Def.TypesNative_s.reveal_ixor",
"Vale.Def.Types_s.ishl",
"Vale.Def.Words_s.pow2_32",
"Vale.Def.TypesNative_s.reveal_ishl",
"Vale.Def.Types_s.ixor",
"Prims.l_True",
"Prims.squash",
"Prims.eq2",
"Vale.Def.Words_s.natN",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb
let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
()
(*
Want to show that:
slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, out_b))) 0 num_bytes
==
gctr_encrypt_LE icb_BE (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) ...
We know that
slice (buffer128_as_seq(mem, out_b) 0 num_blocks
==
gctr_encrypt_recursive icb_BE (slice buffer128_as_seq(mem, in_b) 0 num_blocks) ...
And we know that:
get_mem out_b num_blocks
==
gctr_encrypt_block(icb_BE, (get_mem inb num_blocks), alg, key, num_blocks);
Internally gctr_encrypt_LE will compute:
full_blocks, final_block = split (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) (num_blocks * 16)
We'd like to show that
Step1: le_bytes_to_seq_quad32 full_blocks == slice buffer128_as_seq(mem, in_b) 0 num_blocks
and
Step2: final_block == slice (le_quad32_to_bytes (get_mem inb num_blocks)) 0 num_extra
Then we need to break down the byte-level effects of gctr_encrypt_block to show that even though the
padded version of final_block differs from (get_mem inb num_blocks), after we slice it at the end,
we end up with the same value
*)
let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
=
let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
()
#reset-options "--smtencoding.elim_box true --z3rlimit 30"
let lemma_slice_orig_index (#a:Type) (s s':seq a) (m n:nat) : Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures (forall (i:int).{:pattern (index s i) \/ (index s' i)} m <= i /\ i < n ==> index s i == index s' i))
=
let aux (i:nat{m <= i /\ i < n}) : Lemma (index s i == index s' i) =
lemma_index_slice s m n (i - m);
lemma_index_slice s' m n (i - m)
in Classical.forall_intro aux
let lemma_ishl_32 (x:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 x k == x * pow2 k % pow2_32)
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
FStar.UInt.shift_left_value_lemma #32 x k;
()
let lemma_ishl_ixor_32 (x y:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k)) | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val lemma_ishl_ixor_32 (x y: nat32) (k: nat)
: Lemma (ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k)) | [] | Vale.AES.GCTR.lemma_ishl_ixor_32 | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | x: Vale.Def.Types_s.nat32 -> y: Vale.Def.Types_s.nat32 -> k: Prims.nat
-> FStar.Pervasives.Lemma
(ensures
Vale.Def.Types_s.ishl (Vale.Def.Types_s.ixor x y) k ==
Vale.Def.Types_s.ixor (Vale.Def.Types_s.ishl x k) (Vale.Def.Types_s.ishl y k)) | {
"end_col": 4,
"end_line": 294,
"start_col": 2,
"start_line": 288
} |
FStar.Pervasives.Lemma | val gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial_def alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0) | [
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
() | val gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial_def alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
let gctr_partial_completed
(alg: algorithm)
(plain cipher: seq quad32)
(key: seq nat32)
(icb: quad32)
= | false | null | true | gctr_indexed icb plain alg key cipher;
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.AES.AES_common_s.algorithm",
"FStar.Seq.Base.seq",
"Vale.Def.Types_s.quad32",
"Vale.Def.Types_s.nat32",
"Prims.unit",
"Vale.AES.GCTR.gctr_indexed"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial_def alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0) | [] | Vale.AES.GCTR.gctr_partial_completed | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
alg: Vale.AES.AES_common_s.algorithm ->
plain: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
cipher: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
key: FStar.Seq.Base.seq Vale.Def.Types_s.nat32 ->
icb: Vale.Def.Types_s.quad32
-> FStar.Pervasives.Lemma
(requires
Vale.AES.AES_s.is_aes_key_LE alg key /\
FStar.Seq.Base.length plain == FStar.Seq.Base.length cipher /\
FStar.Seq.Base.length plain < Vale.Def.Words_s.pow2_32 /\
Vale.AES.GCTR.gctr_partial_def alg (FStar.Seq.Base.length cipher) plain cipher key icb)
(ensures cipher == Vale.AES.GCTR_s.gctr_encrypt_recursive icb plain alg key 0) | {
"end_col": 4,
"end_line": 190,
"start_col": 2,
"start_line": 189
} |
FStar.Pervasives.Lemma | val nat32_xor_bytewise_1_helper2 (x x': nat32) (t t': four nat8)
: Lemma
(requires
x == four_to_nat 8 t /\ x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000) (ensures t.lo0 == t'.lo0) | [
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let nat32_xor_bytewise_1_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t123 = t1 + 0x100 * t2 + 0x10000 * t3 in
let t123' = t1' + 0x100 * t2' + 0x10000 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_1_helper1 t0 t0' t123 t123' x x';
() | val nat32_xor_bytewise_1_helper2 (x x': nat32) (t t': four nat8)
: Lemma
(requires
x == four_to_nat 8 t /\ x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000) (ensures t.lo0 == t'.lo0)
let nat32_xor_bytewise_1_helper2 (x x': nat32) (t t': four nat8)
: Lemma
(requires
x == four_to_nat 8 t /\ x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000) (ensures t.lo0 == t'.lo0) = | false | null | true | let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t123 = t1 + 0x100 * t2 + 0x10000 * t3 in
let t123' = t1' + 0x100 * t2' + 0x10000 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t);
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_1_helper1 t0 t0' t123 t123' x x';
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.Def.Types_s.nat32",
"Vale.Def.Words_s.four",
"Vale.Def.Types_s.nat8",
"Prims.unit",
"Vale.AES.GCTR.nat32_xor_bytewise_1_helper1",
"FStar.Pervasives.assert_norm",
"Prims.eq2",
"Vale.Def.Words_s.natN",
"Prims.pow2",
"FStar.Mul.op_Star",
"Vale.Def.Words.Four_s.four_to_nat",
"Vale.Def.Words.Four_s.four_to_nat_unfold",
"Prims.int",
"Prims.op_Addition",
"Vale.Def.Words_s.nat8",
"Prims.l_and",
"Vale.Def.Words_s.pow2_32",
"Prims.op_Modulus",
"Prims.squash",
"Vale.Def.Words_s.__proj__Mkfour__item__lo0",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb
let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
()
(*
Want to show that:
slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, out_b))) 0 num_bytes
==
gctr_encrypt_LE icb_BE (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) ...
We know that
slice (buffer128_as_seq(mem, out_b) 0 num_blocks
==
gctr_encrypt_recursive icb_BE (slice buffer128_as_seq(mem, in_b) 0 num_blocks) ...
And we know that:
get_mem out_b num_blocks
==
gctr_encrypt_block(icb_BE, (get_mem inb num_blocks), alg, key, num_blocks);
Internally gctr_encrypt_LE will compute:
full_blocks, final_block = split (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) (num_blocks * 16)
We'd like to show that
Step1: le_bytes_to_seq_quad32 full_blocks == slice buffer128_as_seq(mem, in_b) 0 num_blocks
and
Step2: final_block == slice (le_quad32_to_bytes (get_mem inb num_blocks)) 0 num_extra
Then we need to break down the byte-level effects of gctr_encrypt_block to show that even though the
padded version of final_block differs from (get_mem inb num_blocks), after we slice it at the end,
we end up with the same value
*)
let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
=
let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
()
#reset-options "--smtencoding.elim_box true --z3rlimit 30"
let lemma_slice_orig_index (#a:Type) (s s':seq a) (m n:nat) : Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures (forall (i:int).{:pattern (index s i) \/ (index s' i)} m <= i /\ i < n ==> index s i == index s' i))
=
let aux (i:nat{m <= i /\ i < n}) : Lemma (index s i == index s' i) =
lemma_index_slice s m n (i - m);
lemma_index_slice s' m n (i - m)
in Classical.forall_intro aux
let lemma_ishl_32 (x:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 x k == x * pow2 k % pow2_32)
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
FStar.UInt.shift_left_value_lemma #32 x k;
()
let lemma_ishl_ixor_32 (x y:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k))
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
Vale.Def.TypesNative_s.reveal_ishl 32 y k;
Vale.Def.TypesNative_s.reveal_ishl 32 (ixor x y) k;
Vale.Def.TypesNative_s.reveal_ixor 32 x y;
Vale.Def.TypesNative_s.reveal_ixor 32 (ishl x k) (ishl y k);
FStar.UInt.shift_left_logxor_lemma #32 x y k;
()
unfold let pow2_24 = 0x1000000
let nat24 = natN pow2_24
let nat32_xor_bytewise_1_helper1 (x0 x0':nat8) (x1 x1':nat24) (x x':nat32) : Lemma
(requires
x == x0 + 0x100 * x1 /\
x' == x0' + 0x100 * x1' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_2_helper1 (x0 x0' x1 x1':nat16) (x x':nat32) : Lemma
(requires
x == x0 + 0x10000 * x1 /\
x' == x0' + 0x10000 * x1' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_3_helper1 (x0 x0':nat24) (x1 x1':nat8) (x x':nat32) : Lemma
(requires
x == x0 + 0x1000000 * x1 /\
x' == x0' + 0x1000000 * x1' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_1_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
) | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val nat32_xor_bytewise_1_helper2 (x x': nat32) (t t': four nat8)
: Lemma
(requires
x == four_to_nat 8 t /\ x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000) (ensures t.lo0 == t'.lo0) | [] | Vale.AES.GCTR.nat32_xor_bytewise_1_helper2 | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
x: Vale.Def.Types_s.nat32 ->
x': Vale.Def.Types_s.nat32 ->
t: Vale.Def.Words_s.four Vale.Def.Types_s.nat8 ->
t': Vale.Def.Words_s.four Vale.Def.Types_s.nat8
-> FStar.Pervasives.Lemma
(requires
x == Vale.Def.Words.Four_s.four_to_nat 8 t /\ x' == Vale.Def.Words.Four_s.four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000)
(ensures Mkfour?.lo0 t == Mkfour?.lo0 t') | {
"end_col": 4,
"end_line": 344,
"start_col": 3,
"start_line": 336
} |
FStar.Pervasives.Lemma | val gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) : Lemma
(requires is_aes_key_LE alg key)
(ensures (
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
cipher = gctr_encrypt_LE icb_BE (make_gctr_plain_LE plain) alg key
)) | [
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
() | val gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) : Lemma
(requires is_aes_key_LE alg key)
(ensures (
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
cipher = gctr_encrypt_LE icb_BE (make_gctr_plain_LE plain) alg key
))
let gctr_encrypt_empty
(icb_BE: quad32)
(plain_LE cipher_LE: seq quad32)
(alg: algorithm)
(key: seq nat32)
= | false | null | true | reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty);
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty);
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.Def.Types_s.quad32",
"FStar.Seq.Base.seq",
"Vale.AES.AES_common_s.algorithm",
"Vale.Def.Types_s.nat32",
"Prims.unit",
"Prims._assert",
"FStar.Seq.Base.equal",
"Vale.Def.Types_s.nat8",
"Vale.Def.Types_s.le_seq_quad32_to_bytes",
"FStar.Seq.Base.empty",
"Prims.eq2",
"Vale.AES.GCTR_s.gctr_encrypt_recursive",
"Vale.Def.Types_s.le_bytes_to_seq_quad32",
"Prims.int",
"Prims.op_Modulus",
"FStar.Seq.Base.length",
"Vale.AES.GCTR.make_gctr_plain_LE",
"Vale.Def.Words_s.nat8",
"FStar.Seq.Base.slice",
"Vale.AES.GCTR_s.gctr_encrypt_LE_reveal",
"FStar.Pervasives.reveal_opaque",
"Prims.l_True"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
() | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) : Lemma
(requires is_aes_key_LE alg key)
(ensures (
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
cipher = gctr_encrypt_LE icb_BE (make_gctr_plain_LE plain) alg key
)) | [] | Vale.AES.GCTR.gctr_encrypt_empty | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
icb_BE: Vale.Def.Types_s.quad32 ->
plain_LE: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
cipher_LE: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
alg: Vale.AES.AES_common_s.algorithm ->
key: FStar.Seq.Base.seq Vale.Def.Types_s.nat32
-> FStar.Pervasives.Lemma (requires Vale.AES.AES_s.is_aes_key_LE alg key)
(ensures
(let plain = FStar.Seq.Base.slice (Vale.Def.Types_s.le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher =
FStar.Seq.Base.slice (Vale.Def.Types_s.le_seq_quad32_to_bytes cipher_LE) 0 0
in
cipher =
Vale.AES.GCTR_s.gctr_encrypt_LE icb_BE (Vale.AES.GCTR.make_gctr_plain_LE plain) alg key)) | {
"end_col": 4,
"end_line": 50,
"start_col": 2,
"start_line": 34
} |
FStar.Pervasives.Lemma | val gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) : Lemma
(requires
is_aes_key_LE alg key /\
cipher == gctr_encrypt_recursive icb_BE plain alg key 0 /\
length plain * 16 < pow2_32
)
(ensures le_seq_quad32_to_bytes cipher == gctr_encrypt_LE icb_BE (le_seq_quad32_to_bytes plain) alg key) | [
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
() | val gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) : Lemma
(requires
is_aes_key_LE alg key /\
cipher == gctr_encrypt_recursive icb_BE plain alg key 0 /\
length plain * 16 < pow2_32
)
(ensures le_seq_quad32_to_bytes cipher == gctr_encrypt_LE icb_BE (le_seq_quad32_to_bytes plain) alg key)
let gctr_partial_to_full_basic
(icb_BE: quad32)
(plain: seq quad32)
(alg: algorithm)
(key: seq nat32)
(cipher: seq quad32)
= | false | null | true | gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.Def.Types_s.quad32",
"FStar.Seq.Base.seq",
"Vale.AES.AES_common_s.algorithm",
"Vale.Def.Types_s.nat32",
"Prims.unit",
"Vale.Arch.Types.le_bytes_to_seq_quad32_to_bytes",
"Vale.Def.Words_s.nat8",
"Vale.Def.Types_s.le_seq_quad32_to_bytes",
"Vale.AES.GCTR_s.gctr_encrypt_recursive",
"Vale.Def.Types_s.le_bytes_to_seq_quad32",
"Prims._assert",
"Prims.eq2",
"Prims.int",
"Prims.op_Modulus",
"FStar.Seq.Base.length",
"Vale.Def.Types_s.nat8",
"Vale.AES.GCTR_s.gctr_encrypt_LE_reveal"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) : Lemma
(requires
is_aes_key_LE alg key /\
cipher == gctr_encrypt_recursive icb_BE plain alg key 0 /\
length plain * 16 < pow2_32
)
(ensures le_seq_quad32_to_bytes cipher == gctr_encrypt_LE icb_BE (le_seq_quad32_to_bytes plain) alg key) | [] | Vale.AES.GCTR.gctr_partial_to_full_basic | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
icb_BE: Vale.Def.Types_s.quad32 ->
plain: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
alg: Vale.AES.AES_common_s.algorithm ->
key: FStar.Seq.Base.seq Vale.Def.Types_s.nat32 ->
cipher: FStar.Seq.Base.seq Vale.Def.Types_s.quad32
-> FStar.Pervasives.Lemma
(requires
Vale.AES.AES_s.is_aes_key_LE alg key /\
cipher == Vale.AES.GCTR_s.gctr_encrypt_recursive icb_BE plain alg key 0 /\
FStar.Seq.Base.length plain * 16 < Vale.Def.Words_s.pow2_32)
(ensures
Vale.Def.Types_s.le_seq_quad32_to_bytes cipher ==
Vale.AES.GCTR_s.gctr_encrypt_LE icb_BE
(Vale.Def.Types_s.le_seq_quad32_to_bytes plain)
alg
key) | {
"end_col": 4,
"end_line": 212,
"start_col": 2,
"start_line": 205
} |
FStar.Pervasives.Lemma | val nat32_xor_bytewise_3 (k k' x x' m: nat32) (s s' t t': four nat8)
: Lemma
(requires
k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\ ixor k m == x /\ ixor k' m == x' /\ s.lo0 == s'.lo0 /\
s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2) | [
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let nat32_xor_bytewise_3 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_3_helper3 k k' s s';
lemma_ishl_32 k 8;
lemma_ishl_32 k' 8;
lemma_ishl_32 x 8;
lemma_ishl_32 x' 8;
lemma_ishl_ixor_32 k m 8;
lemma_ishl_ixor_32 k' m 8;
nat32_xor_bytewise_3_helper2 x x' t t';
() | val nat32_xor_bytewise_3 (k k' x x' m: nat32) (s s' t t': four nat8)
: Lemma
(requires
k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\ ixor k m == x /\ ixor k' m == x' /\ s.lo0 == s'.lo0 /\
s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
let nat32_xor_bytewise_3 (k k' x x' m: nat32) (s s' t t': four nat8)
: Lemma
(requires
k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\ ixor k m == x /\ ixor k' m == x' /\ s.lo0 == s'.lo0 /\
s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2) = | false | null | true | let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_3_helper3 k k' s s';
lemma_ishl_32 k 8;
lemma_ishl_32 k' 8;
lemma_ishl_32 x 8;
lemma_ishl_32 x' 8;
lemma_ishl_ixor_32 k m 8;
lemma_ishl_ixor_32 k' m 8;
nat32_xor_bytewise_3_helper2 x x' t t';
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.Def.Types_s.nat32",
"Vale.Def.Words_s.four",
"Vale.Def.Types_s.nat8",
"Prims.unit",
"Vale.AES.GCTR.nat32_xor_bytewise_3_helper2",
"Vale.AES.GCTR.lemma_ishl_ixor_32",
"Vale.AES.GCTR.lemma_ishl_32",
"Vale.AES.GCTR.nat32_xor_bytewise_3_helper3",
"Vale.Def.Words_s.nat8",
"Prims.l_and",
"Prims.eq2",
"Vale.Def.Words_s.natN",
"Vale.Def.Words_s.pow2_32",
"Vale.Def.Words.Four_s.four_to_nat",
"Vale.Def.Types_s.ixor",
"Vale.Def.Words_s.__proj__Mkfour__item__lo0",
"Vale.Def.Words_s.__proj__Mkfour__item__lo1",
"Vale.Def.Words_s.__proj__Mkfour__item__hi2",
"Prims.squash",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb
let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
()
(*
Want to show that:
slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, out_b))) 0 num_bytes
==
gctr_encrypt_LE icb_BE (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) ...
We know that
slice (buffer128_as_seq(mem, out_b) 0 num_blocks
==
gctr_encrypt_recursive icb_BE (slice buffer128_as_seq(mem, in_b) 0 num_blocks) ...
And we know that:
get_mem out_b num_blocks
==
gctr_encrypt_block(icb_BE, (get_mem inb num_blocks), alg, key, num_blocks);
Internally gctr_encrypt_LE will compute:
full_blocks, final_block = split (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) (num_blocks * 16)
We'd like to show that
Step1: le_bytes_to_seq_quad32 full_blocks == slice buffer128_as_seq(mem, in_b) 0 num_blocks
and
Step2: final_block == slice (le_quad32_to_bytes (get_mem inb num_blocks)) 0 num_extra
Then we need to break down the byte-level effects of gctr_encrypt_block to show that even though the
padded version of final_block differs from (get_mem inb num_blocks), after we slice it at the end,
we end up with the same value
*)
let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
=
let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
()
#reset-options "--smtencoding.elim_box true --z3rlimit 30"
let lemma_slice_orig_index (#a:Type) (s s':seq a) (m n:nat) : Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures (forall (i:int).{:pattern (index s i) \/ (index s' i)} m <= i /\ i < n ==> index s i == index s' i))
=
let aux (i:nat{m <= i /\ i < n}) : Lemma (index s i == index s' i) =
lemma_index_slice s m n (i - m);
lemma_index_slice s' m n (i - m)
in Classical.forall_intro aux
let lemma_ishl_32 (x:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 x k == x * pow2 k % pow2_32)
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
FStar.UInt.shift_left_value_lemma #32 x k;
()
let lemma_ishl_ixor_32 (x y:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k))
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
Vale.Def.TypesNative_s.reveal_ishl 32 y k;
Vale.Def.TypesNative_s.reveal_ishl 32 (ixor x y) k;
Vale.Def.TypesNative_s.reveal_ixor 32 x y;
Vale.Def.TypesNative_s.reveal_ixor 32 (ishl x k) (ishl y k);
FStar.UInt.shift_left_logxor_lemma #32 x y k;
()
unfold let pow2_24 = 0x1000000
let nat24 = natN pow2_24
let nat32_xor_bytewise_1_helper1 (x0 x0':nat8) (x1 x1':nat24) (x x':nat32) : Lemma
(requires
x == x0 + 0x100 * x1 /\
x' == x0' + 0x100 * x1' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_2_helper1 (x0 x0' x1 x1':nat16) (x x':nat32) : Lemma
(requires
x == x0 + 0x10000 * x1 /\
x' == x0' + 0x10000 * x1' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_3_helper1 (x0 x0':nat24) (x1 x1':nat8) (x x':nat32) : Lemma
(requires
x == x0 + 0x1000000 * x1 /\
x' == x0' + 0x1000000 * x1' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_1_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t123 = t1 + 0x100 * t2 + 0x10000 * t3 in
let t123' = t1' + 0x100 * t2' + 0x10000 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_1_helper1 t0 t0' t123 t123' x x';
()
let nat32_xor_bytewise_2_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t01 = t0 + 0x100 * t1 in
let t23 = t2 + 0x100 * t3 in
let t01' = t0' + 0x100 * t1' in
let t23' = t2' + 0x100 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_2_helper1 t01 t01' t23 t23' x x';
()
let nat32_xor_bytewise_3_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t012 = t0 + 0x100 * t1 + 0x10000 * t2 in
let t012' = t0' + 0x100 * t1' + 0x10000 * t2' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_3_helper1 t012 t012' t3 t3' x x';
()
let nat32_xor_bytewise_1_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0
)
(ensures k * 0x1000000 % 0x100000000 == k' * 0x1000000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_2_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures k * 0x10000 % 0x100000000 == k' * 0x10000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_3_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures k * 0x100 % 0x100000000 == k' * 0x100 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_1 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_1_helper3 k k' s s';
lemma_ishl_32 k 24;
lemma_ishl_32 k' 24;
lemma_ishl_32 x 24;
lemma_ishl_32 x' 24;
lemma_ishl_ixor_32 k m 24;
lemma_ishl_ixor_32 k' m 24;
assert_norm (pow2 24 == pow2_24);
nat32_xor_bytewise_1_helper2 x x' t t';
()
let nat32_xor_bytewise_2 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_2_helper3 k k' s s';
lemma_ishl_32 k 16;
lemma_ishl_32 k' 16;
lemma_ishl_32 x 16;
lemma_ishl_32 x' 16;
lemma_ishl_ixor_32 k m 16;
lemma_ishl_ixor_32 k' m 16;
// assert (ishl #pow2_32 k 16 == k * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 k' 16 == k' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == x * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x' 16 == x' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == ixor (ishl k 16) (ishl m 16));
// assert (ishl #pow2_32 x' 16 == ixor (ishl k' 16) (ishl m 16));
// assert (x * 0x10000 % 0x100000000 == ixor (k * 0x10000 % 0x100000000) (ishl m 16));
// assert (x' * 0x10000 % 0x100000000 == ixor (k' * 0x10000 % 0x100000000) (ishl m 16));
nat32_xor_bytewise_2_helper2 x x' t t';
()
let nat32_xor_bytewise_3 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
) | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val nat32_xor_bytewise_3 (k k' x x' m: nat32) (s s' t t': four nat8)
: Lemma
(requires
k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\ ixor k m == x /\ ixor k' m == x' /\ s.lo0 == s'.lo0 /\
s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2) | [] | Vale.AES.GCTR.nat32_xor_bytewise_3 | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
k: Vale.Def.Types_s.nat32 ->
k': Vale.Def.Types_s.nat32 ->
x: Vale.Def.Types_s.nat32 ->
x': Vale.Def.Types_s.nat32 ->
m: Vale.Def.Types_s.nat32 ->
s: Vale.Def.Words_s.four Vale.Def.Types_s.nat8 ->
s': Vale.Def.Words_s.four Vale.Def.Types_s.nat8 ->
t: Vale.Def.Words_s.four Vale.Def.Types_s.nat8 ->
t': Vale.Def.Words_s.four Vale.Def.Types_s.nat8
-> FStar.Pervasives.Lemma
(requires
k == Vale.Def.Words.Four_s.four_to_nat 8 s /\ k' == Vale.Def.Words.Four_s.four_to_nat 8 s' /\
x == Vale.Def.Words.Four_s.four_to_nat 8 t /\ x' == Vale.Def.Words.Four_s.four_to_nat 8 t' /\
Vale.Def.Types_s.ixor k m == x /\ Vale.Def.Types_s.ixor k' m == x' /\
Mkfour?.lo0 s == Mkfour?.lo0 s' /\ Mkfour?.lo1 s == Mkfour?.lo1 s' /\
Mkfour?.hi2 s == Mkfour?.hi2 s')
(ensures
Mkfour?.lo0 t == Mkfour?.lo0 t' /\ Mkfour?.lo1 t == Mkfour?.lo1 t' /\
Mkfour?.hi2 t == Mkfour?.hi2 t') | {
"end_col": 4,
"end_line": 509,
"start_col": 3,
"start_line": 496
} |
FStar.Pervasives.Lemma | val slice_pad_to_128_bits (s: seq nat8 {0 < length s /\ length s < 16})
: Lemma (slice (pad_to_128_bits s) 0 (length s) == s) | [
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let slice_pad_to_128_bits (s:seq nat8 { 0 < length s /\ length s < 16 }) :
Lemma(slice (pad_to_128_bits s) 0 (length s) == s)
=
assert (length s % 16 == length s);
assert (equal s (slice (pad_to_128_bits s) 0 (length s)));
() | val slice_pad_to_128_bits (s: seq nat8 {0 < length s /\ length s < 16})
: Lemma (slice (pad_to_128_bits s) 0 (length s) == s)
let slice_pad_to_128_bits (s: seq nat8 {0 < length s /\ length s < 16})
: Lemma (slice (pad_to_128_bits s) 0 (length s) == s) = | false | null | true | assert (length s % 16 == length s);
assert (equal s (slice (pad_to_128_bits s) 0 (length s)));
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"FStar.Seq.Base.seq",
"Vale.Def.Types_s.nat8",
"Prims.l_and",
"Prims.b2t",
"Prims.op_LessThan",
"FStar.Seq.Base.length",
"Prims.unit",
"Prims._assert",
"FStar.Seq.Base.equal",
"FStar.Seq.Base.slice",
"Vale.AES.GCTR_s.pad_to_128_bits",
"Prims.eq2",
"Prims.int",
"Prims.op_Modulus",
"Prims.l_True",
"Prims.squash",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb
let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
()
(*
Want to show that:
slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, out_b))) 0 num_bytes
==
gctr_encrypt_LE icb_BE (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) ...
We know that
slice (buffer128_as_seq(mem, out_b) 0 num_blocks
==
gctr_encrypt_recursive icb_BE (slice buffer128_as_seq(mem, in_b) 0 num_blocks) ...
And we know that:
get_mem out_b num_blocks
==
gctr_encrypt_block(icb_BE, (get_mem inb num_blocks), alg, key, num_blocks);
Internally gctr_encrypt_LE will compute:
full_blocks, final_block = split (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) (num_blocks * 16)
We'd like to show that
Step1: le_bytes_to_seq_quad32 full_blocks == slice buffer128_as_seq(mem, in_b) 0 num_blocks
and
Step2: final_block == slice (le_quad32_to_bytes (get_mem inb num_blocks)) 0 num_extra
Then we need to break down the byte-level effects of gctr_encrypt_block to show that even though the
padded version of final_block differs from (get_mem inb num_blocks), after we slice it at the end,
we end up with the same value
*)
let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
=
let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
()
#reset-options "--smtencoding.elim_box true --z3rlimit 30"
let lemma_slice_orig_index (#a:Type) (s s':seq a) (m n:nat) : Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures (forall (i:int).{:pattern (index s i) \/ (index s' i)} m <= i /\ i < n ==> index s i == index s' i))
=
let aux (i:nat{m <= i /\ i < n}) : Lemma (index s i == index s' i) =
lemma_index_slice s m n (i - m);
lemma_index_slice s' m n (i - m)
in Classical.forall_intro aux
let lemma_ishl_32 (x:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 x k == x * pow2 k % pow2_32)
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
FStar.UInt.shift_left_value_lemma #32 x k;
()
let lemma_ishl_ixor_32 (x y:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k))
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
Vale.Def.TypesNative_s.reveal_ishl 32 y k;
Vale.Def.TypesNative_s.reveal_ishl 32 (ixor x y) k;
Vale.Def.TypesNative_s.reveal_ixor 32 x y;
Vale.Def.TypesNative_s.reveal_ixor 32 (ishl x k) (ishl y k);
FStar.UInt.shift_left_logxor_lemma #32 x y k;
()
unfold let pow2_24 = 0x1000000
let nat24 = natN pow2_24
let nat32_xor_bytewise_1_helper1 (x0 x0':nat8) (x1 x1':nat24) (x x':nat32) : Lemma
(requires
x == x0 + 0x100 * x1 /\
x' == x0' + 0x100 * x1' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_2_helper1 (x0 x0' x1 x1':nat16) (x x':nat32) : Lemma
(requires
x == x0 + 0x10000 * x1 /\
x' == x0' + 0x10000 * x1' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_3_helper1 (x0 x0':nat24) (x1 x1':nat8) (x x':nat32) : Lemma
(requires
x == x0 + 0x1000000 * x1 /\
x' == x0' + 0x1000000 * x1' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_1_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t123 = t1 + 0x100 * t2 + 0x10000 * t3 in
let t123' = t1' + 0x100 * t2' + 0x10000 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_1_helper1 t0 t0' t123 t123' x x';
()
let nat32_xor_bytewise_2_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t01 = t0 + 0x100 * t1 in
let t23 = t2 + 0x100 * t3 in
let t01' = t0' + 0x100 * t1' in
let t23' = t2' + 0x100 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_2_helper1 t01 t01' t23 t23' x x';
()
let nat32_xor_bytewise_3_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t012 = t0 + 0x100 * t1 + 0x10000 * t2 in
let t012' = t0' + 0x100 * t1' + 0x10000 * t2' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_3_helper1 t012 t012' t3 t3' x x';
()
let nat32_xor_bytewise_1_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0
)
(ensures k * 0x1000000 % 0x100000000 == k' * 0x1000000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_2_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures k * 0x10000 % 0x100000000 == k' * 0x10000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_3_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures k * 0x100 % 0x100000000 == k' * 0x100 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_1 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_1_helper3 k k' s s';
lemma_ishl_32 k 24;
lemma_ishl_32 k' 24;
lemma_ishl_32 x 24;
lemma_ishl_32 x' 24;
lemma_ishl_ixor_32 k m 24;
lemma_ishl_ixor_32 k' m 24;
assert_norm (pow2 24 == pow2_24);
nat32_xor_bytewise_1_helper2 x x' t t';
()
let nat32_xor_bytewise_2 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_2_helper3 k k' s s';
lemma_ishl_32 k 16;
lemma_ishl_32 k' 16;
lemma_ishl_32 x 16;
lemma_ishl_32 x' 16;
lemma_ishl_ixor_32 k m 16;
lemma_ishl_ixor_32 k' m 16;
// assert (ishl #pow2_32 k 16 == k * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 k' 16 == k' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == x * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x' 16 == x' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == ixor (ishl k 16) (ishl m 16));
// assert (ishl #pow2_32 x' 16 == ixor (ishl k' 16) (ishl m 16));
// assert (x * 0x10000 % 0x100000000 == ixor (k * 0x10000 % 0x100000000) (ishl m 16));
// assert (x' * 0x10000 % 0x100000000 == ixor (k' * 0x10000 % 0x100000000) (ishl m 16));
nat32_xor_bytewise_2_helper2 x x' t t';
()
let nat32_xor_bytewise_3 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_3_helper3 k k' s s';
lemma_ishl_32 k 8;
lemma_ishl_32 k' 8;
lemma_ishl_32 x 8;
lemma_ishl_32 x' 8;
lemma_ishl_ixor_32 k m 8;
lemma_ishl_ixor_32 k' m 8;
nat32_xor_bytewise_3_helper2 x x' t t';
()
#reset-options "--z3rlimit 50 --smtencoding.nl_arith_repr boxwrap --smtencoding.l_arith_repr boxwrap"
let nat32_xor_bytewise_4 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s == s'
)
(ensures t == t')
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
()
#reset-options
let nat32_xor_bytewise (k k' m:nat32) (s s' t t':seq4 nat8) (n:nat) : Lemma
(requires
n <= 4 /\
k == four_to_nat 8 (seq_to_four_LE s) /\
k' == four_to_nat 8 (seq_to_four_LE s') /\
ixor k m == four_to_nat 8 (seq_to_four_LE t) /\
ixor k' m == four_to_nat 8 (seq_to_four_LE t') /\
equal (slice s 0 n) (slice s' 0 n)
)
// (ensures equal (slice t 0 n) (slice t' 0 n))
(ensures (forall (i:nat).{:pattern (index t i) \/ (index t' i)} i < n ==> index t i == index t' i))
=
assert (n > 0 ==> index (slice s 0 n) 0 == index (slice s' 0 n) 0);
assert (n > 1 ==> index (slice s 0 n) 1 == index (slice s' 0 n) 1);
assert (n > 2 ==> index (slice s 0 n) 2 == index (slice s' 0 n) 2);
assert (n > 3 ==> index (slice s 0 n) 3 == index (slice s' 0 n) 3);
let x = ixor k m in
let x' = ixor k' m in
if n = 1 then nat32_xor_bytewise_1 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 2 then nat32_xor_bytewise_2 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 3 then nat32_xor_bytewise_3 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 4 then nat32_xor_bytewise_4 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
assert (equal (slice t 0 n) (slice t' 0 n));
lemma_slice_orig_index t t' 0 n;
()
let quad32_xor_bytewise (q q' r:quad32) (n:nat{ n <= 16 }) : Lemma
(requires (let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
slice q_bytes 0 n == slice q'_bytes 0 n))
(ensures (let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
let qr_bytes = le_quad32_to_bytes (quad32_xor q r) in
let q'r_bytes = le_quad32_to_bytes (quad32_xor q' r) in
slice qr_bytes 0 n == slice q'r_bytes 0 n))
=
let s = le_quad32_to_bytes q in
let s' = le_quad32_to_bytes q' in
let t = le_quad32_to_bytes (quad32_xor q r) in
let t' = le_quad32_to_bytes (quad32_xor q' r) in
lemma_slices_le_quad32_to_bytes q;
lemma_slices_le_quad32_to_bytes q';
lemma_slices_le_quad32_to_bytes (quad32_xor q r);
lemma_slices_le_quad32_to_bytes (quad32_xor q' r);
lemma_slice_orig_index s s' 0 n;
quad32_xor_reveal ();
reverse_bytes_nat32_reveal ();
if n < 4 then nat32_xor_bytewise q.lo0 q'.lo0 r.lo0 (slice s 0 4) (slice s' 0 4) (slice t 0 4) (slice t' 0 4) n
else
(
nat32_xor_bytewise q.lo0 q'.lo0 r.lo0 (slice s 0 4) (slice s' 0 4) (slice t 0 4) (slice t' 0 4) 4;
if n < 8 then nat32_xor_bytewise q.lo1 q'.lo1 r.lo1 (slice s 4 8) (slice s' 4 8) (slice t 4 8) (slice t' 4 8) (n - 4)
else
(
nat32_xor_bytewise q.lo1 q'.lo1 r.lo1 (slice s 4 8) (slice s' 4 8) (slice t 4 8) (slice t' 4 8) 4;
if n < 12 then nat32_xor_bytewise q.hi2 q'.hi2 r.hi2 (slice s 8 12) (slice s' 8 12) (slice t 8 12) (slice t' 8 12) (n - 8)
else
(
nat32_xor_bytewise q.hi2 q'.hi2 r.hi2 (slice s 8 12) (slice s' 8 12) (slice t 8 12) (slice t' 8 12) 4;
nat32_xor_bytewise q.hi3 q'.hi3 r.hi3 (slice s 12 16) (slice s' 12 16) (slice t 12 16) (slice t' 12 16) (n - 12);
()
)
)
);
assert (equal (slice t 0 n) (slice t' 0 n));
()
let slice_pad_to_128_bits (s:seq nat8 { 0 < length s /\ length s < 16 }) :
Lemma(slice (pad_to_128_bits s) 0 (length s) == s) | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val slice_pad_to_128_bits (s: seq nat8 {0 < length s /\ length s < 16})
: Lemma (slice (pad_to_128_bits s) 0 (length s) == s) | [] | Vale.AES.GCTR.slice_pad_to_128_bits | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
s:
FStar.Seq.Base.seq Vale.Def.Types_s.nat8
{0 < FStar.Seq.Base.length s /\ FStar.Seq.Base.length s < 16}
-> FStar.Pervasives.Lemma
(ensures
FStar.Seq.Base.slice (Vale.AES.GCTR_s.pad_to_128_bits s) 0 (FStar.Seq.Base.length s) == s) | {
"end_col": 4,
"end_line": 605,
"start_col": 2,
"start_line": 603
} |
FStar.Pervasives.Lemma | val gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0) | [
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb | val gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
let gctr_partial_opaque_completed
(alg: algorithm)
(plain cipher: seq quad32)
(key: seq nat32)
(icb: quad32)
: Lemma
(requires
is_aes_key_LE alg key /\ length plain == length cipher /\ length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0) = | false | null | true | gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.AES.AES_common_s.algorithm",
"FStar.Seq.Base.seq",
"Vale.Def.Types_s.quad32",
"Vale.Def.Types_s.nat32",
"Vale.AES.GCTR.gctr_partial_completed",
"Prims.unit",
"Vale.AES.GCTR.gctr_partial_reveal",
"Prims.l_and",
"Vale.AES.AES_s.is_aes_key_LE",
"Prims.eq2",
"Prims.nat",
"FStar.Seq.Base.length",
"Prims.b2t",
"Prims.op_LessThan",
"Vale.Def.Words_s.pow2_32",
"Vale.AES.GCTR.gctr_partial",
"Prims.squash",
"Vale.AES.GCTR_s.gctr_encrypt_recursive",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0) | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0) | [] | Vale.AES.GCTR.gctr_partial_opaque_completed | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
alg: Vale.AES.AES_common_s.algorithm ->
plain: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
cipher: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
key: FStar.Seq.Base.seq Vale.Def.Types_s.nat32 ->
icb: Vale.Def.Types_s.quad32
-> FStar.Pervasives.Lemma
(requires
Vale.AES.AES_s.is_aes_key_LE alg key /\
FStar.Seq.Base.length plain == FStar.Seq.Base.length cipher /\
FStar.Seq.Base.length plain < Vale.Def.Words_s.pow2_32 /\
Vale.AES.GCTR.gctr_partial alg (FStar.Seq.Base.length cipher) plain cipher key icb)
(ensures cipher == Vale.AES.GCTR_s.gctr_encrypt_recursive icb plain alg key 0) | {
"end_col": 49,
"end_line": 202,
"start_col": 2,
"start_line": 201
} |
FStar.Pervasives.Lemma | val lemma_counter_init (x:quad32) (low64 low8:nat64) : Lemma
(requires low64 == lo64 x /\
low8 == iand64 low64 0xff)
(ensures low8 == x.lo0 % 256) | [
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
() | val lemma_counter_init (x:quad32) (low64 low8:nat64) : Lemma
(requires low64 == lo64 x /\
low8 == iand64 low64 0xff)
(ensures low8 == x.lo0 % 256)
let lemma_counter_init x low64 low8 = | false | null | true | Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32);
assert (low64 == x.lo0 + x.lo1 * pow2_32);
assert (low64 % 256 == x.lo0 % 256);
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.Def.Types_s.quad32",
"Vale.Def.Types_s.nat64",
"Prims.unit",
"Prims._assert",
"Prims.eq2",
"Prims.int",
"Prims.op_Modulus",
"Vale.Def.Words_s.__proj__Mkfour__item__lo0",
"Vale.Def.Types_s.nat32",
"Prims.op_Addition",
"FStar.Mul.op_Star",
"Vale.Def.Words_s.__proj__Mkfour__item__lo1",
"Vale.Def.Words_s.pow2_32",
"FStar.Pervasives.assert_norm",
"Vale.Def.Words_s.pow2_norm",
"Vale.Arch.Types.lo64_reveal",
"Vale.Def.TypesNative_s.reveal_iand",
"Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0" | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val lemma_counter_init (x:quad32) (low64 low8:nat64) : Lemma
(requires low64 == lo64 x /\
low8 == iand64 low64 0xff)
(ensures low8 == x.lo0 % 256) | [] | Vale.AES.GCTR.lemma_counter_init | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | x: Vale.Def.Types_s.quad32 -> low64: Vale.Def.Types_s.nat64 -> low8: Vale.Def.Types_s.nat64
-> FStar.Pervasives.Lemma
(requires low64 == Vale.Arch.Types.lo64 x /\ low8 == Vale.Arch.Types.iand64 low64 0xff)
(ensures low8 == Mkfour?.lo0 x % 256) | {
"end_col": 4,
"end_line": 28,
"start_col": 2,
"start_line": 21
} |
FStar.Pervasives.Lemma | val lemma_gctr_partial_append (alg:algorithm) (b1 b2:nat) (p1 c1 p2 c2:seq quad32) (key:seq nat32) (icb1 icb2:quad32) : Lemma
(requires gctr_partial alg b1 p1 c1 key icb1 /\
gctr_partial alg b2 p2 c2 key icb2 /\
b1 == length p1 /\ b1 == length c1 /\
b2 == length p2 /\ b2 == length c2 /\
icb2 == inc32 icb1 b1)
(ensures gctr_partial alg (b1 + b2) (p1 @| p2) (c1 @| c2) key icb1) | [
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
() | val lemma_gctr_partial_append (alg:algorithm) (b1 b2:nat) (p1 c1 p2 c2:seq quad32) (key:seq nat32) (icb1 icb2:quad32) : Lemma
(requires gctr_partial alg b1 p1 c1 key icb1 /\
gctr_partial alg b2 p2 c2 key icb2 /\
b1 == length p1 /\ b1 == length c1 /\
b2 == length p2 /\ b2 == length c2 /\
icb2 == inc32 icb1 b1)
(ensures gctr_partial alg (b1 + b2) (p1 @| p2) (c1 @| c2) key icb1)
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 = | false | null | true | gctr_partial_reveal ();
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.AES.AES_common_s.algorithm",
"Prims.nat",
"FStar.Seq.Base.seq",
"Vale.Def.Types_s.quad32",
"Vale.Def.Types_s.nat32",
"Prims.unit",
"Vale.AES.GCTR.gctr_partial_reveal"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val lemma_gctr_partial_append (alg:algorithm) (b1 b2:nat) (p1 c1 p2 c2:seq quad32) (key:seq nat32) (icb1 icb2:quad32) : Lemma
(requires gctr_partial alg b1 p1 c1 key icb1 /\
gctr_partial alg b2 p2 c2 key icb2 /\
b1 == length p1 /\ b1 == length c1 /\
b2 == length p2 /\ b2 == length c2 /\
icb2 == inc32 icb1 b1)
(ensures gctr_partial alg (b1 + b2) (p1 @| p2) (c1 @| c2) key icb1) | [] | Vale.AES.GCTR.lemma_gctr_partial_append | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
alg: Vale.AES.AES_common_s.algorithm ->
b1: Prims.nat ->
b2: Prims.nat ->
p1: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
c1: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
p2: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
c2: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
key: FStar.Seq.Base.seq Vale.Def.Types_s.nat32 ->
icb1: Vale.Def.Types_s.quad32 ->
icb2: Vale.Def.Types_s.quad32
-> FStar.Pervasives.Lemma
(requires
Vale.AES.GCTR.gctr_partial alg b1 p1 c1 key icb1 /\
Vale.AES.GCTR.gctr_partial alg b2 p2 c2 key icb2 /\ b1 == FStar.Seq.Base.length p1 /\
b1 == FStar.Seq.Base.length c1 /\ b2 == FStar.Seq.Base.length p2 /\
b2 == FStar.Seq.Base.length c2 /\ icb2 == Vale.AES.GCTR_s.inc32 icb1 b1)
(ensures Vale.AES.GCTR.gctr_partial alg (b1 + b2) (p1 @| p2) (c1 @| c2) key icb1) | {
"end_col": 4,
"end_line": 59,
"start_col": 2,
"start_line": 58
} |
FStar.Pervasives.Lemma | val nat32_xor_bytewise_1 (k k' x x' m: nat32) (s s' t t': four nat8)
: Lemma
(requires
k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\ ixor k m == x /\ ixor k' m == x' /\ s.lo0 == s'.lo0)
(ensures t.lo0 == t'.lo0) | [
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let nat32_xor_bytewise_1 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_1_helper3 k k' s s';
lemma_ishl_32 k 24;
lemma_ishl_32 k' 24;
lemma_ishl_32 x 24;
lemma_ishl_32 x' 24;
lemma_ishl_ixor_32 k m 24;
lemma_ishl_ixor_32 k' m 24;
assert_norm (pow2 24 == pow2_24);
nat32_xor_bytewise_1_helper2 x x' t t';
() | val nat32_xor_bytewise_1 (k k' x x' m: nat32) (s s' t t': four nat8)
: Lemma
(requires
k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\ ixor k m == x /\ ixor k' m == x' /\ s.lo0 == s'.lo0)
(ensures t.lo0 == t'.lo0)
let nat32_xor_bytewise_1 (k k' x x' m: nat32) (s s' t t': four nat8)
: Lemma
(requires
k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\ ixor k m == x /\ ixor k' m == x' /\ s.lo0 == s'.lo0)
(ensures t.lo0 == t'.lo0) = | false | null | true | let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_1_helper3 k k' s s';
lemma_ishl_32 k 24;
lemma_ishl_32 k' 24;
lemma_ishl_32 x 24;
lemma_ishl_32 x' 24;
lemma_ishl_ixor_32 k m 24;
lemma_ishl_ixor_32 k' m 24;
assert_norm (pow2 24 == pow2_24);
nat32_xor_bytewise_1_helper2 x x' t t';
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.Def.Types_s.nat32",
"Vale.Def.Words_s.four",
"Vale.Def.Types_s.nat8",
"Prims.unit",
"Vale.AES.GCTR.nat32_xor_bytewise_1_helper2",
"FStar.Pervasives.assert_norm",
"Prims.eq2",
"Prims.int",
"Prims.pow2",
"Vale.AES.GCTR.pow2_24",
"Vale.AES.GCTR.lemma_ishl_ixor_32",
"Vale.AES.GCTR.lemma_ishl_32",
"Vale.AES.GCTR.nat32_xor_bytewise_1_helper3",
"Vale.Def.Words_s.nat8",
"Prims.l_and",
"Vale.Def.Words_s.natN",
"Vale.Def.Words_s.pow2_32",
"Vale.Def.Words.Four_s.four_to_nat",
"Vale.Def.Types_s.ixor",
"Vale.Def.Words_s.__proj__Mkfour__item__lo0",
"Prims.squash",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb
let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
()
(*
Want to show that:
slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, out_b))) 0 num_bytes
==
gctr_encrypt_LE icb_BE (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) ...
We know that
slice (buffer128_as_seq(mem, out_b) 0 num_blocks
==
gctr_encrypt_recursive icb_BE (slice buffer128_as_seq(mem, in_b) 0 num_blocks) ...
And we know that:
get_mem out_b num_blocks
==
gctr_encrypt_block(icb_BE, (get_mem inb num_blocks), alg, key, num_blocks);
Internally gctr_encrypt_LE will compute:
full_blocks, final_block = split (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) (num_blocks * 16)
We'd like to show that
Step1: le_bytes_to_seq_quad32 full_blocks == slice buffer128_as_seq(mem, in_b) 0 num_blocks
and
Step2: final_block == slice (le_quad32_to_bytes (get_mem inb num_blocks)) 0 num_extra
Then we need to break down the byte-level effects of gctr_encrypt_block to show that even though the
padded version of final_block differs from (get_mem inb num_blocks), after we slice it at the end,
we end up with the same value
*)
let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
=
let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
()
#reset-options "--smtencoding.elim_box true --z3rlimit 30"
let lemma_slice_orig_index (#a:Type) (s s':seq a) (m n:nat) : Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures (forall (i:int).{:pattern (index s i) \/ (index s' i)} m <= i /\ i < n ==> index s i == index s' i))
=
let aux (i:nat{m <= i /\ i < n}) : Lemma (index s i == index s' i) =
lemma_index_slice s m n (i - m);
lemma_index_slice s' m n (i - m)
in Classical.forall_intro aux
let lemma_ishl_32 (x:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 x k == x * pow2 k % pow2_32)
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
FStar.UInt.shift_left_value_lemma #32 x k;
()
let lemma_ishl_ixor_32 (x y:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k))
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
Vale.Def.TypesNative_s.reveal_ishl 32 y k;
Vale.Def.TypesNative_s.reveal_ishl 32 (ixor x y) k;
Vale.Def.TypesNative_s.reveal_ixor 32 x y;
Vale.Def.TypesNative_s.reveal_ixor 32 (ishl x k) (ishl y k);
FStar.UInt.shift_left_logxor_lemma #32 x y k;
()
unfold let pow2_24 = 0x1000000
let nat24 = natN pow2_24
let nat32_xor_bytewise_1_helper1 (x0 x0':nat8) (x1 x1':nat24) (x x':nat32) : Lemma
(requires
x == x0 + 0x100 * x1 /\
x' == x0' + 0x100 * x1' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_2_helper1 (x0 x0' x1 x1':nat16) (x x':nat32) : Lemma
(requires
x == x0 + 0x10000 * x1 /\
x' == x0' + 0x10000 * x1' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_3_helper1 (x0 x0':nat24) (x1 x1':nat8) (x x':nat32) : Lemma
(requires
x == x0 + 0x1000000 * x1 /\
x' == x0' + 0x1000000 * x1' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_1_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t123 = t1 + 0x100 * t2 + 0x10000 * t3 in
let t123' = t1' + 0x100 * t2' + 0x10000 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_1_helper1 t0 t0' t123 t123' x x';
()
let nat32_xor_bytewise_2_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t01 = t0 + 0x100 * t1 in
let t23 = t2 + 0x100 * t3 in
let t01' = t0' + 0x100 * t1' in
let t23' = t2' + 0x100 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_2_helper1 t01 t01' t23 t23' x x';
()
let nat32_xor_bytewise_3_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t012 = t0 + 0x100 * t1 + 0x10000 * t2 in
let t012' = t0' + 0x100 * t1' + 0x10000 * t2' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_3_helper1 t012 t012' t3 t3' x x';
()
let nat32_xor_bytewise_1_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0
)
(ensures k * 0x1000000 % 0x100000000 == k' * 0x1000000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_2_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures k * 0x10000 % 0x100000000 == k' * 0x10000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_3_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures k * 0x100 % 0x100000000 == k' * 0x100 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_1 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0
) | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val nat32_xor_bytewise_1 (k k' x x' m: nat32) (s s' t t': four nat8)
: Lemma
(requires
k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\ ixor k m == x /\ ixor k' m == x' /\ s.lo0 == s'.lo0)
(ensures t.lo0 == t'.lo0) | [] | Vale.AES.GCTR.nat32_xor_bytewise_1 | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
k: Vale.Def.Types_s.nat32 ->
k': Vale.Def.Types_s.nat32 ->
x: Vale.Def.Types_s.nat32 ->
x': Vale.Def.Types_s.nat32 ->
m: Vale.Def.Types_s.nat32 ->
s: Vale.Def.Words_s.four Vale.Def.Types_s.nat8 ->
s': Vale.Def.Words_s.four Vale.Def.Types_s.nat8 ->
t: Vale.Def.Words_s.four Vale.Def.Types_s.nat8 ->
t': Vale.Def.Words_s.four Vale.Def.Types_s.nat8
-> FStar.Pervasives.Lemma
(requires
k == Vale.Def.Words.Four_s.four_to_nat 8 s /\ k' == Vale.Def.Words.Four_s.four_to_nat 8 s' /\
x == Vale.Def.Words.Four_s.four_to_nat 8 t /\ x' == Vale.Def.Words.Four_s.four_to_nat 8 t' /\
Vale.Def.Types_s.ixor k m == x /\ Vale.Def.Types_s.ixor k' m == x' /\
Mkfour?.lo0 s == Mkfour?.lo0 s') (ensures Mkfour?.lo0 t == Mkfour?.lo0 t') | {
"end_col": 4,
"end_line": 449,
"start_col": 3,
"start_line": 435
} |
FStar.Pervasives.Lemma | val gctr_indexed
(icb: quad32)
(plain: gctr_plain_internal_LE)
(alg: algorithm)
(key: aes_key_LE alg)
(cipher: seq quad32)
: Lemma
(requires
length cipher == length plain /\
(forall i. {:pattern index cipher i}
0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i))))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0) | [
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) | val gctr_indexed
(icb: quad32)
(plain: gctr_plain_internal_LE)
(alg: algorithm)
(key: aes_key_LE alg)
(cipher: seq quad32)
: Lemma
(requires
length cipher == length plain /\
(forall i. {:pattern index cipher i}
0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i))))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
let gctr_indexed
(icb: quad32)
(plain: gctr_plain_internal_LE)
(alg: algorithm)
(key: aes_key_LE alg)
(cipher: seq quad32)
: Lemma
(requires
length cipher == length plain /\
(forall i. {:pattern index cipher i}
0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i))))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0) = | false | null | true | gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert (equal cipher c) | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.Def.Types_s.quad32",
"Vale.AES.GCTR_s.gctr_plain_internal_LE",
"Vale.AES.AES_common_s.algorithm",
"Vale.AES.AES_s.aes_key_LE",
"FStar.Seq.Base.seq",
"Prims._assert",
"FStar.Seq.Base.equal",
"Vale.AES.GCTR_s.gctr_encrypt_recursive",
"Prims.unit",
"Vale.AES.GCTR.gctr_indexed_helper",
"Prims.l_and",
"Prims.eq2",
"Prims.nat",
"FStar.Seq.Base.length",
"Prims.l_Forall",
"Prims.int",
"Prims.b2t",
"Prims.op_GreaterThanOrEqual",
"Prims.op_LessThan",
"Prims.l_imp",
"Prims.op_LessThanOrEqual",
"FStar.Seq.Base.index",
"Vale.Def.Types_s.quad32_xor",
"Vale.AES.GCTR.aes_encrypt_BE",
"Vale.AES.GCTR_s.inc32",
"Prims.squash",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0) | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val gctr_indexed
(icb: quad32)
(plain: gctr_plain_internal_LE)
(alg: algorithm)
(key: aes_key_LE alg)
(cipher: seq quad32)
: Lemma
(requires
length cipher == length plain /\
(forall i. {:pattern index cipher i}
0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i))))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0) | [] | Vale.AES.GCTR.gctr_indexed | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
icb: Vale.Def.Types_s.quad32 ->
plain: Vale.AES.GCTR_s.gctr_plain_internal_LE ->
alg: Vale.AES.AES_common_s.algorithm ->
key: Vale.AES.AES_s.aes_key_LE alg ->
cipher: FStar.Seq.Base.seq Vale.Def.Types_s.quad32
-> FStar.Pervasives.Lemma
(requires
FStar.Seq.Base.length cipher == FStar.Seq.Base.length plain /\
(forall (i:
Prims.int
{ i >= 0 /\ i < FStar.Seq.Base.length plain /\
(i >= 0) /\ (i < FStar.Seq.Base.length cipher) }).
{:pattern FStar.Seq.Base.index cipher i}
0 <= i /\ i < FStar.Seq.Base.length cipher ==>
FStar.Seq.Base.index cipher i ==
Vale.Def.Types_s.quad32_xor (FStar.Seq.Base.index plain i)
(Vale.AES.GCTR.aes_encrypt_BE alg key (Vale.AES.GCTR_s.inc32 icb i))))
(ensures cipher == Vale.AES.GCTR_s.gctr_encrypt_recursive icb plain alg key 0) | {
"end_col": 24,
"end_line": 185,
"start_col": 2,
"start_line": 183
} |
FStar.Pervasives.Lemma | val nat32_xor_bytewise_2_helper2 (x x': nat32) (t t': four nat8)
: Lemma
(requires
x == four_to_nat 8 t /\ x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1) | [
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let nat32_xor_bytewise_2_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t01 = t0 + 0x100 * t1 in
let t23 = t2 + 0x100 * t3 in
let t01' = t0' + 0x100 * t1' in
let t23' = t2' + 0x100 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_2_helper1 t01 t01' t23 t23' x x';
() | val nat32_xor_bytewise_2_helper2 (x x': nat32) (t t': four nat8)
: Lemma
(requires
x == four_to_nat 8 t /\ x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
let nat32_xor_bytewise_2_helper2 (x x': nat32) (t t': four nat8)
: Lemma
(requires
x == four_to_nat 8 t /\ x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1) = | false | null | true | let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t01 = t0 + 0x100 * t1 in
let t23 = t2 + 0x100 * t3 in
let t01' = t0' + 0x100 * t1' in
let t23' = t2' + 0x100 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t);
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_2_helper1 t01 t01' t23 t23' x x';
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.Def.Types_s.nat32",
"Vale.Def.Words_s.four",
"Vale.Def.Types_s.nat8",
"Prims.unit",
"Vale.AES.GCTR.nat32_xor_bytewise_2_helper1",
"FStar.Pervasives.assert_norm",
"Prims.eq2",
"Vale.Def.Words_s.natN",
"Prims.pow2",
"FStar.Mul.op_Star",
"Vale.Def.Words.Four_s.four_to_nat",
"Vale.Def.Words.Four_s.four_to_nat_unfold",
"Prims.int",
"Prims.op_Addition",
"Vale.Def.Words_s.nat8",
"Prims.l_and",
"Vale.Def.Words_s.pow2_32",
"Prims.op_Modulus",
"Prims.squash",
"Vale.Def.Words_s.__proj__Mkfour__item__lo0",
"Vale.Def.Words_s.__proj__Mkfour__item__lo1",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb
let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
()
(*
Want to show that:
slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, out_b))) 0 num_bytes
==
gctr_encrypt_LE icb_BE (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) ...
We know that
slice (buffer128_as_seq(mem, out_b) 0 num_blocks
==
gctr_encrypt_recursive icb_BE (slice buffer128_as_seq(mem, in_b) 0 num_blocks) ...
And we know that:
get_mem out_b num_blocks
==
gctr_encrypt_block(icb_BE, (get_mem inb num_blocks), alg, key, num_blocks);
Internally gctr_encrypt_LE will compute:
full_blocks, final_block = split (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) (num_blocks * 16)
We'd like to show that
Step1: le_bytes_to_seq_quad32 full_blocks == slice buffer128_as_seq(mem, in_b) 0 num_blocks
and
Step2: final_block == slice (le_quad32_to_bytes (get_mem inb num_blocks)) 0 num_extra
Then we need to break down the byte-level effects of gctr_encrypt_block to show that even though the
padded version of final_block differs from (get_mem inb num_blocks), after we slice it at the end,
we end up with the same value
*)
let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
=
let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
()
#reset-options "--smtencoding.elim_box true --z3rlimit 30"
let lemma_slice_orig_index (#a:Type) (s s':seq a) (m n:nat) : Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures (forall (i:int).{:pattern (index s i) \/ (index s' i)} m <= i /\ i < n ==> index s i == index s' i))
=
let aux (i:nat{m <= i /\ i < n}) : Lemma (index s i == index s' i) =
lemma_index_slice s m n (i - m);
lemma_index_slice s' m n (i - m)
in Classical.forall_intro aux
let lemma_ishl_32 (x:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 x k == x * pow2 k % pow2_32)
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
FStar.UInt.shift_left_value_lemma #32 x k;
()
let lemma_ishl_ixor_32 (x y:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k))
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
Vale.Def.TypesNative_s.reveal_ishl 32 y k;
Vale.Def.TypesNative_s.reveal_ishl 32 (ixor x y) k;
Vale.Def.TypesNative_s.reveal_ixor 32 x y;
Vale.Def.TypesNative_s.reveal_ixor 32 (ishl x k) (ishl y k);
FStar.UInt.shift_left_logxor_lemma #32 x y k;
()
unfold let pow2_24 = 0x1000000
let nat24 = natN pow2_24
let nat32_xor_bytewise_1_helper1 (x0 x0':nat8) (x1 x1':nat24) (x x':nat32) : Lemma
(requires
x == x0 + 0x100 * x1 /\
x' == x0' + 0x100 * x1' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_2_helper1 (x0 x0' x1 x1':nat16) (x x':nat32) : Lemma
(requires
x == x0 + 0x10000 * x1 /\
x' == x0' + 0x10000 * x1' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_3_helper1 (x0 x0':nat24) (x1 x1':nat8) (x x':nat32) : Lemma
(requires
x == x0 + 0x1000000 * x1 /\
x' == x0' + 0x1000000 * x1' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_1_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t123 = t1 + 0x100 * t2 + 0x10000 * t3 in
let t123' = t1' + 0x100 * t2' + 0x10000 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_1_helper1 t0 t0' t123 t123' x x';
()
let nat32_xor_bytewise_2_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
) | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val nat32_xor_bytewise_2_helper2 (x x': nat32) (t t': four nat8)
: Lemma
(requires
x == four_to_nat 8 t /\ x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1) | [] | Vale.AES.GCTR.nat32_xor_bytewise_2_helper2 | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
x: Vale.Def.Types_s.nat32 ->
x': Vale.Def.Types_s.nat32 ->
t: Vale.Def.Words_s.four Vale.Def.Types_s.nat8 ->
t': Vale.Def.Words_s.four Vale.Def.Types_s.nat8
-> FStar.Pervasives.Lemma
(requires
x == Vale.Def.Words.Four_s.four_to_nat 8 t /\ x' == Vale.Def.Words.Four_s.four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000)
(ensures Mkfour?.lo0 t == Mkfour?.lo0 t' /\ Mkfour?.lo1 t == Mkfour?.lo1 t') | {
"end_col": 4,
"end_line": 363,
"start_col": 3,
"start_line": 353
} |
FStar.Pervasives.Lemma | val nat32_xor_bytewise_3_helper3 (k k': nat32) (s s': four nat8)
: Lemma
(requires
k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\
s.hi2 == s'.hi2) (ensures k * 0x100 % 0x100000000 == k' * 0x100 % 0x100000000) | [
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let nat32_xor_bytewise_3_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures k * 0x100 % 0x100000000 == k' * 0x100 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
() | val nat32_xor_bytewise_3_helper3 (k k': nat32) (s s': four nat8)
: Lemma
(requires
k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\
s.hi2 == s'.hi2) (ensures k * 0x100 % 0x100000000 == k' * 0x100 % 0x100000000)
let nat32_xor_bytewise_3_helper3 (k k': nat32) (s s': four nat8)
: Lemma
(requires
k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\
s.hi2 == s'.hi2) (ensures k * 0x100 % 0x100000000 == k' * 0x100 % 0x100000000) = | false | null | true | let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s);
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.Def.Types_s.nat32",
"Vale.Def.Words_s.four",
"Vale.Def.Types_s.nat8",
"Prims.unit",
"FStar.Pervasives.assert_norm",
"Prims.eq2",
"Vale.Def.Words_s.natN",
"Prims.pow2",
"FStar.Mul.op_Star",
"Vale.Def.Words.Four_s.four_to_nat",
"Vale.Def.Words.Four_s.four_to_nat_unfold",
"Vale.Def.Words_s.nat8",
"Prims.l_and",
"Vale.Def.Words_s.pow2_32",
"Vale.Def.Words_s.__proj__Mkfour__item__lo0",
"Vale.Def.Words_s.__proj__Mkfour__item__lo1",
"Vale.Def.Words_s.__proj__Mkfour__item__hi2",
"Prims.squash",
"Prims.int",
"Prims.op_Modulus",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb
let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
()
(*
Want to show that:
slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, out_b))) 0 num_bytes
==
gctr_encrypt_LE icb_BE (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) ...
We know that
slice (buffer128_as_seq(mem, out_b) 0 num_blocks
==
gctr_encrypt_recursive icb_BE (slice buffer128_as_seq(mem, in_b) 0 num_blocks) ...
And we know that:
get_mem out_b num_blocks
==
gctr_encrypt_block(icb_BE, (get_mem inb num_blocks), alg, key, num_blocks);
Internally gctr_encrypt_LE will compute:
full_blocks, final_block = split (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) (num_blocks * 16)
We'd like to show that
Step1: le_bytes_to_seq_quad32 full_blocks == slice buffer128_as_seq(mem, in_b) 0 num_blocks
and
Step2: final_block == slice (le_quad32_to_bytes (get_mem inb num_blocks)) 0 num_extra
Then we need to break down the byte-level effects of gctr_encrypt_block to show that even though the
padded version of final_block differs from (get_mem inb num_blocks), after we slice it at the end,
we end up with the same value
*)
let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
=
let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
()
#reset-options "--smtencoding.elim_box true --z3rlimit 30"
let lemma_slice_orig_index (#a:Type) (s s':seq a) (m n:nat) : Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures (forall (i:int).{:pattern (index s i) \/ (index s' i)} m <= i /\ i < n ==> index s i == index s' i))
=
let aux (i:nat{m <= i /\ i < n}) : Lemma (index s i == index s' i) =
lemma_index_slice s m n (i - m);
lemma_index_slice s' m n (i - m)
in Classical.forall_intro aux
let lemma_ishl_32 (x:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 x k == x * pow2 k % pow2_32)
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
FStar.UInt.shift_left_value_lemma #32 x k;
()
let lemma_ishl_ixor_32 (x y:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k))
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
Vale.Def.TypesNative_s.reveal_ishl 32 y k;
Vale.Def.TypesNative_s.reveal_ishl 32 (ixor x y) k;
Vale.Def.TypesNative_s.reveal_ixor 32 x y;
Vale.Def.TypesNative_s.reveal_ixor 32 (ishl x k) (ishl y k);
FStar.UInt.shift_left_logxor_lemma #32 x y k;
()
unfold let pow2_24 = 0x1000000
let nat24 = natN pow2_24
let nat32_xor_bytewise_1_helper1 (x0 x0':nat8) (x1 x1':nat24) (x x':nat32) : Lemma
(requires
x == x0 + 0x100 * x1 /\
x' == x0' + 0x100 * x1' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_2_helper1 (x0 x0' x1 x1':nat16) (x x':nat32) : Lemma
(requires
x == x0 + 0x10000 * x1 /\
x' == x0' + 0x10000 * x1' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_3_helper1 (x0 x0':nat24) (x1 x1':nat8) (x x':nat32) : Lemma
(requires
x == x0 + 0x1000000 * x1 /\
x' == x0' + 0x1000000 * x1' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_1_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t123 = t1 + 0x100 * t2 + 0x10000 * t3 in
let t123' = t1' + 0x100 * t2' + 0x10000 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_1_helper1 t0 t0' t123 t123' x x';
()
let nat32_xor_bytewise_2_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t01 = t0 + 0x100 * t1 in
let t23 = t2 + 0x100 * t3 in
let t01' = t0' + 0x100 * t1' in
let t23' = t2' + 0x100 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_2_helper1 t01 t01' t23 t23' x x';
()
let nat32_xor_bytewise_3_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t012 = t0 + 0x100 * t1 + 0x10000 * t2 in
let t012' = t0' + 0x100 * t1' + 0x10000 * t2' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_3_helper1 t012 t012' t3 t3' x x';
()
let nat32_xor_bytewise_1_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0
)
(ensures k * 0x1000000 % 0x100000000 == k' * 0x1000000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_2_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures k * 0x10000 % 0x100000000 == k' * 0x10000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_3_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
) | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val nat32_xor_bytewise_3_helper3 (k k': nat32) (s s': four nat8)
: Lemma
(requires
k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\
s.hi2 == s'.hi2) (ensures k * 0x100 % 0x100000000 == k' * 0x100 % 0x100000000) | [] | Vale.AES.GCTR.nat32_xor_bytewise_3_helper3 | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
k: Vale.Def.Types_s.nat32 ->
k': Vale.Def.Types_s.nat32 ->
s: Vale.Def.Words_s.four Vale.Def.Types_s.nat8 ->
s': Vale.Def.Words_s.four Vale.Def.Types_s.nat8
-> FStar.Pervasives.Lemma
(requires
k == Vale.Def.Words.Four_s.four_to_nat 8 s /\ k' == Vale.Def.Words.Four_s.four_to_nat 8 s' /\
Mkfour?.lo0 s == Mkfour?.lo0 s' /\ Mkfour?.lo1 s == Mkfour?.lo1 s' /\
Mkfour?.hi2 s == Mkfour?.hi2 s')
(ensures k * 0x100 % 0x100000000 == k' * 0x100 % 0x100000000) | {
"end_col": 4,
"end_line": 422,
"start_col": 3,
"start_line": 417
} |
FStar.Pervasives.Lemma | val step2
(s: seq nat8 {0 < length s /\ length s < 16})
(q icb_BE: quad32)
(alg: algorithm)
(key: aes_key_LE alg)
(i: int)
: Lemma
(let q_bytes = le_quad32_to_bytes q in
let q_bytes_prefix = slice q_bytes 0 (length s) in
let q_cipher = gctr_encrypt_block icb_BE q alg key i in
let q_cipher_bytes = slice (le_quad32_to_bytes q_cipher) 0 (length s) in
let s_quad = le_bytes_to_quad32 (pad_to_128_bits s) in
let s_cipher = gctr_encrypt_block icb_BE s_quad alg key i in
let s_cipher_bytes = slice (le_quad32_to_bytes s_cipher) 0 (length s) in
s == q_bytes_prefix ==> s_cipher_bytes == q_cipher_bytes) | [
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let step2 (s:seq nat8 { 0 < length s /\ length s < 16 }) (q:quad32) (icb_BE:quad32) (alg:algorithm) (key:aes_key_LE alg) (i:int):
Lemma(let q_bytes = le_quad32_to_bytes q in
let q_bytes_prefix = slice q_bytes 0 (length s) in
let q_cipher = gctr_encrypt_block icb_BE q alg key i in
let q_cipher_bytes = slice (le_quad32_to_bytes q_cipher) 0 (length s) in
let s_quad = le_bytes_to_quad32 (pad_to_128_bits s) in
let s_cipher = gctr_encrypt_block icb_BE s_quad alg key i in
let s_cipher_bytes = slice (le_quad32_to_bytes s_cipher) 0 (length s) in
s == q_bytes_prefix ==> s_cipher_bytes == q_cipher_bytes)
=
let q_bytes = le_quad32_to_bytes q in
let q_bytes_prefix = slice q_bytes 0 (length s) in
let q_cipher = gctr_encrypt_block icb_BE q alg key i in
let q_cipher_bytes = slice (le_quad32_to_bytes q_cipher) 0 (length s) in
let s_quad = le_bytes_to_quad32 (pad_to_128_bits s) in
let s_cipher = gctr_encrypt_block icb_BE s_quad alg key i in
let s_cipher_bytes = slice (le_quad32_to_bytes s_cipher) 0 (length s) in
let enc_ctr = aes_encrypt_LE alg key (reverse_bytes_quad32 (inc32 icb_BE i)) in
let icb_LE = reverse_bytes_quad32 (inc32 icb_BE i) in
if s = q_bytes_prefix then (
// s_cipher_bytes = slice (le_quad32_to_bytes s_cipher) 0 (length s)
// = slice (le_quad32_to_bytes (gctr_encrypt_block icb_BE s_quad alg key i)) 0 (length s)
// = slice (le_quad32_to_bytes (gctr_encrypt_block icb_BE (le_bytes_to_quad32 (pad_to_128_bits s)) alg key i)) 0 (length s)
// q_cipher_bytes = gctr_encrypt_block icb_BE q alg key i
le_quad32_to_bytes_to_quad32 (pad_to_128_bits s);
slice_pad_to_128_bits s;
quad32_xor_bytewise q (le_bytes_to_quad32 (pad_to_128_bits s)) (aes_encrypt_LE alg key icb_LE) (length s);
//assert (equal s_cipher_bytes q_cipher_bytes);
()
) else
();
() | val step2
(s: seq nat8 {0 < length s /\ length s < 16})
(q icb_BE: quad32)
(alg: algorithm)
(key: aes_key_LE alg)
(i: int)
: Lemma
(let q_bytes = le_quad32_to_bytes q in
let q_bytes_prefix = slice q_bytes 0 (length s) in
let q_cipher = gctr_encrypt_block icb_BE q alg key i in
let q_cipher_bytes = slice (le_quad32_to_bytes q_cipher) 0 (length s) in
let s_quad = le_bytes_to_quad32 (pad_to_128_bits s) in
let s_cipher = gctr_encrypt_block icb_BE s_quad alg key i in
let s_cipher_bytes = slice (le_quad32_to_bytes s_cipher) 0 (length s) in
s == q_bytes_prefix ==> s_cipher_bytes == q_cipher_bytes)
let step2
(s: seq nat8 {0 < length s /\ length s < 16})
(q icb_BE: quad32)
(alg: algorithm)
(key: aes_key_LE alg)
(i: int)
: Lemma
(let q_bytes = le_quad32_to_bytes q in
let q_bytes_prefix = slice q_bytes 0 (length s) in
let q_cipher = gctr_encrypt_block icb_BE q alg key i in
let q_cipher_bytes = slice (le_quad32_to_bytes q_cipher) 0 (length s) in
let s_quad = le_bytes_to_quad32 (pad_to_128_bits s) in
let s_cipher = gctr_encrypt_block icb_BE s_quad alg key i in
let s_cipher_bytes = slice (le_quad32_to_bytes s_cipher) 0 (length s) in
s == q_bytes_prefix ==> s_cipher_bytes == q_cipher_bytes) = | false | null | true | let q_bytes = le_quad32_to_bytes q in
let q_bytes_prefix = slice q_bytes 0 (length s) in
let q_cipher = gctr_encrypt_block icb_BE q alg key i in
let q_cipher_bytes = slice (le_quad32_to_bytes q_cipher) 0 (length s) in
let s_quad = le_bytes_to_quad32 (pad_to_128_bits s) in
let s_cipher = gctr_encrypt_block icb_BE s_quad alg key i in
let s_cipher_bytes = slice (le_quad32_to_bytes s_cipher) 0 (length s) in
let enc_ctr = aes_encrypt_LE alg key (reverse_bytes_quad32 (inc32 icb_BE i)) in
let icb_LE = reverse_bytes_quad32 (inc32 icb_BE i) in
if s = q_bytes_prefix
then
(le_quad32_to_bytes_to_quad32 (pad_to_128_bits s);
slice_pad_to_128_bits s;
quad32_xor_bytewise q
(le_bytes_to_quad32 (pad_to_128_bits s))
(aes_encrypt_LE alg key icb_LE)
(length s);
());
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"FStar.Seq.Base.seq",
"Vale.Def.Types_s.nat8",
"Prims.l_and",
"Prims.b2t",
"Prims.op_LessThan",
"FStar.Seq.Base.length",
"Vale.Def.Types_s.quad32",
"Vale.AES.AES_common_s.algorithm",
"Vale.AES.AES_s.aes_key_LE",
"Prims.int",
"Prims.unit",
"Prims.op_Equality",
"Vale.AES.GCTR.quad32_xor_bytewise",
"Vale.Def.Types_s.le_bytes_to_quad32",
"Vale.AES.GCTR_s.pad_to_128_bits",
"Vale.AES.AES_s.aes_encrypt_LE",
"Vale.AES.GCTR.slice_pad_to_128_bits",
"Vale.Arch.Types.le_quad32_to_bytes_to_quad32",
"Prims.bool",
"Vale.Def.Types_s.reverse_bytes_quad32",
"Vale.AES.GCTR_s.inc32",
"Vale.Def.Words_s.nat8",
"FStar.Seq.Base.slice",
"Vale.Def.Types_s.le_quad32_to_bytes",
"Vale.AES.GCTR_s.gctr_encrypt_block",
"Prims.l_True",
"Prims.squash",
"Prims.l_imp",
"Prims.eq2",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb
let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
()
(*
Want to show that:
slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, out_b))) 0 num_bytes
==
gctr_encrypt_LE icb_BE (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) ...
We know that
slice (buffer128_as_seq(mem, out_b) 0 num_blocks
==
gctr_encrypt_recursive icb_BE (slice buffer128_as_seq(mem, in_b) 0 num_blocks) ...
And we know that:
get_mem out_b num_blocks
==
gctr_encrypt_block(icb_BE, (get_mem inb num_blocks), alg, key, num_blocks);
Internally gctr_encrypt_LE will compute:
full_blocks, final_block = split (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) (num_blocks * 16)
We'd like to show that
Step1: le_bytes_to_seq_quad32 full_blocks == slice buffer128_as_seq(mem, in_b) 0 num_blocks
and
Step2: final_block == slice (le_quad32_to_bytes (get_mem inb num_blocks)) 0 num_extra
Then we need to break down the byte-level effects of gctr_encrypt_block to show that even though the
padded version of final_block differs from (get_mem inb num_blocks), after we slice it at the end,
we end up with the same value
*)
let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
=
let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
()
#reset-options "--smtencoding.elim_box true --z3rlimit 30"
let lemma_slice_orig_index (#a:Type) (s s':seq a) (m n:nat) : Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures (forall (i:int).{:pattern (index s i) \/ (index s' i)} m <= i /\ i < n ==> index s i == index s' i))
=
let aux (i:nat{m <= i /\ i < n}) : Lemma (index s i == index s' i) =
lemma_index_slice s m n (i - m);
lemma_index_slice s' m n (i - m)
in Classical.forall_intro aux
let lemma_ishl_32 (x:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 x k == x * pow2 k % pow2_32)
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
FStar.UInt.shift_left_value_lemma #32 x k;
()
let lemma_ishl_ixor_32 (x y:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k))
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
Vale.Def.TypesNative_s.reveal_ishl 32 y k;
Vale.Def.TypesNative_s.reveal_ishl 32 (ixor x y) k;
Vale.Def.TypesNative_s.reveal_ixor 32 x y;
Vale.Def.TypesNative_s.reveal_ixor 32 (ishl x k) (ishl y k);
FStar.UInt.shift_left_logxor_lemma #32 x y k;
()
unfold let pow2_24 = 0x1000000
let nat24 = natN pow2_24
let nat32_xor_bytewise_1_helper1 (x0 x0':nat8) (x1 x1':nat24) (x x':nat32) : Lemma
(requires
x == x0 + 0x100 * x1 /\
x' == x0' + 0x100 * x1' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_2_helper1 (x0 x0' x1 x1':nat16) (x x':nat32) : Lemma
(requires
x == x0 + 0x10000 * x1 /\
x' == x0' + 0x10000 * x1' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_3_helper1 (x0 x0':nat24) (x1 x1':nat8) (x x':nat32) : Lemma
(requires
x == x0 + 0x1000000 * x1 /\
x' == x0' + 0x1000000 * x1' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_1_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t123 = t1 + 0x100 * t2 + 0x10000 * t3 in
let t123' = t1' + 0x100 * t2' + 0x10000 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_1_helper1 t0 t0' t123 t123' x x';
()
let nat32_xor_bytewise_2_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t01 = t0 + 0x100 * t1 in
let t23 = t2 + 0x100 * t3 in
let t01' = t0' + 0x100 * t1' in
let t23' = t2' + 0x100 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_2_helper1 t01 t01' t23 t23' x x';
()
let nat32_xor_bytewise_3_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t012 = t0 + 0x100 * t1 + 0x10000 * t2 in
let t012' = t0' + 0x100 * t1' + 0x10000 * t2' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_3_helper1 t012 t012' t3 t3' x x';
()
let nat32_xor_bytewise_1_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0
)
(ensures k * 0x1000000 % 0x100000000 == k' * 0x1000000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_2_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures k * 0x10000 % 0x100000000 == k' * 0x10000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_3_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures k * 0x100 % 0x100000000 == k' * 0x100 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_1 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_1_helper3 k k' s s';
lemma_ishl_32 k 24;
lemma_ishl_32 k' 24;
lemma_ishl_32 x 24;
lemma_ishl_32 x' 24;
lemma_ishl_ixor_32 k m 24;
lemma_ishl_ixor_32 k' m 24;
assert_norm (pow2 24 == pow2_24);
nat32_xor_bytewise_1_helper2 x x' t t';
()
let nat32_xor_bytewise_2 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_2_helper3 k k' s s';
lemma_ishl_32 k 16;
lemma_ishl_32 k' 16;
lemma_ishl_32 x 16;
lemma_ishl_32 x' 16;
lemma_ishl_ixor_32 k m 16;
lemma_ishl_ixor_32 k' m 16;
// assert (ishl #pow2_32 k 16 == k * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 k' 16 == k' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == x * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x' 16 == x' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == ixor (ishl k 16) (ishl m 16));
// assert (ishl #pow2_32 x' 16 == ixor (ishl k' 16) (ishl m 16));
// assert (x * 0x10000 % 0x100000000 == ixor (k * 0x10000 % 0x100000000) (ishl m 16));
// assert (x' * 0x10000 % 0x100000000 == ixor (k' * 0x10000 % 0x100000000) (ishl m 16));
nat32_xor_bytewise_2_helper2 x x' t t';
()
let nat32_xor_bytewise_3 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_3_helper3 k k' s s';
lemma_ishl_32 k 8;
lemma_ishl_32 k' 8;
lemma_ishl_32 x 8;
lemma_ishl_32 x' 8;
lemma_ishl_ixor_32 k m 8;
lemma_ishl_ixor_32 k' m 8;
nat32_xor_bytewise_3_helper2 x x' t t';
()
#reset-options "--z3rlimit 50 --smtencoding.nl_arith_repr boxwrap --smtencoding.l_arith_repr boxwrap"
let nat32_xor_bytewise_4 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s == s'
)
(ensures t == t')
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
()
#reset-options
let nat32_xor_bytewise (k k' m:nat32) (s s' t t':seq4 nat8) (n:nat) : Lemma
(requires
n <= 4 /\
k == four_to_nat 8 (seq_to_four_LE s) /\
k' == four_to_nat 8 (seq_to_four_LE s') /\
ixor k m == four_to_nat 8 (seq_to_four_LE t) /\
ixor k' m == four_to_nat 8 (seq_to_four_LE t') /\
equal (slice s 0 n) (slice s' 0 n)
)
// (ensures equal (slice t 0 n) (slice t' 0 n))
(ensures (forall (i:nat).{:pattern (index t i) \/ (index t' i)} i < n ==> index t i == index t' i))
=
assert (n > 0 ==> index (slice s 0 n) 0 == index (slice s' 0 n) 0);
assert (n > 1 ==> index (slice s 0 n) 1 == index (slice s' 0 n) 1);
assert (n > 2 ==> index (slice s 0 n) 2 == index (slice s' 0 n) 2);
assert (n > 3 ==> index (slice s 0 n) 3 == index (slice s' 0 n) 3);
let x = ixor k m in
let x' = ixor k' m in
if n = 1 then nat32_xor_bytewise_1 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 2 then nat32_xor_bytewise_2 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 3 then nat32_xor_bytewise_3 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 4 then nat32_xor_bytewise_4 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
assert (equal (slice t 0 n) (slice t' 0 n));
lemma_slice_orig_index t t' 0 n;
()
let quad32_xor_bytewise (q q' r:quad32) (n:nat{ n <= 16 }) : Lemma
(requires (let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
slice q_bytes 0 n == slice q'_bytes 0 n))
(ensures (let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
let qr_bytes = le_quad32_to_bytes (quad32_xor q r) in
let q'r_bytes = le_quad32_to_bytes (quad32_xor q' r) in
slice qr_bytes 0 n == slice q'r_bytes 0 n))
=
let s = le_quad32_to_bytes q in
let s' = le_quad32_to_bytes q' in
let t = le_quad32_to_bytes (quad32_xor q r) in
let t' = le_quad32_to_bytes (quad32_xor q' r) in
lemma_slices_le_quad32_to_bytes q;
lemma_slices_le_quad32_to_bytes q';
lemma_slices_le_quad32_to_bytes (quad32_xor q r);
lemma_slices_le_quad32_to_bytes (quad32_xor q' r);
lemma_slice_orig_index s s' 0 n;
quad32_xor_reveal ();
reverse_bytes_nat32_reveal ();
if n < 4 then nat32_xor_bytewise q.lo0 q'.lo0 r.lo0 (slice s 0 4) (slice s' 0 4) (slice t 0 4) (slice t' 0 4) n
else
(
nat32_xor_bytewise q.lo0 q'.lo0 r.lo0 (slice s 0 4) (slice s' 0 4) (slice t 0 4) (slice t' 0 4) 4;
if n < 8 then nat32_xor_bytewise q.lo1 q'.lo1 r.lo1 (slice s 4 8) (slice s' 4 8) (slice t 4 8) (slice t' 4 8) (n - 4)
else
(
nat32_xor_bytewise q.lo1 q'.lo1 r.lo1 (slice s 4 8) (slice s' 4 8) (slice t 4 8) (slice t' 4 8) 4;
if n < 12 then nat32_xor_bytewise q.hi2 q'.hi2 r.hi2 (slice s 8 12) (slice s' 8 12) (slice t 8 12) (slice t' 8 12) (n - 8)
else
(
nat32_xor_bytewise q.hi2 q'.hi2 r.hi2 (slice s 8 12) (slice s' 8 12) (slice t 8 12) (slice t' 8 12) 4;
nat32_xor_bytewise q.hi3 q'.hi3 r.hi3 (slice s 12 16) (slice s' 12 16) (slice t 12 16) (slice t' 12 16) (n - 12);
()
)
)
);
assert (equal (slice t 0 n) (slice t' 0 n));
()
let slice_pad_to_128_bits (s:seq nat8 { 0 < length s /\ length s < 16 }) :
Lemma(slice (pad_to_128_bits s) 0 (length s) == s)
=
assert (length s % 16 == length s);
assert (equal s (slice (pad_to_128_bits s) 0 (length s)));
()
let step2 (s:seq nat8 { 0 < length s /\ length s < 16 }) (q:quad32) (icb_BE:quad32) (alg:algorithm) (key:aes_key_LE alg) (i:int):
Lemma(let q_bytes = le_quad32_to_bytes q in
let q_bytes_prefix = slice q_bytes 0 (length s) in
let q_cipher = gctr_encrypt_block icb_BE q alg key i in
let q_cipher_bytes = slice (le_quad32_to_bytes q_cipher) 0 (length s) in
let s_quad = le_bytes_to_quad32 (pad_to_128_bits s) in
let s_cipher = gctr_encrypt_block icb_BE s_quad alg key i in
let s_cipher_bytes = slice (le_quad32_to_bytes s_cipher) 0 (length s) in | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val step2
(s: seq nat8 {0 < length s /\ length s < 16})
(q icb_BE: quad32)
(alg: algorithm)
(key: aes_key_LE alg)
(i: int)
: Lemma
(let q_bytes = le_quad32_to_bytes q in
let q_bytes_prefix = slice q_bytes 0 (length s) in
let q_cipher = gctr_encrypt_block icb_BE q alg key i in
let q_cipher_bytes = slice (le_quad32_to_bytes q_cipher) 0 (length s) in
let s_quad = le_bytes_to_quad32 (pad_to_128_bits s) in
let s_cipher = gctr_encrypt_block icb_BE s_quad alg key i in
let s_cipher_bytes = slice (le_quad32_to_bytes s_cipher) 0 (length s) in
s == q_bytes_prefix ==> s_cipher_bytes == q_cipher_bytes) | [] | Vale.AES.GCTR.step2 | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
s:
FStar.Seq.Base.seq Vale.Def.Types_s.nat8
{0 < FStar.Seq.Base.length s /\ FStar.Seq.Base.length s < 16} ->
q: Vale.Def.Types_s.quad32 ->
icb_BE: Vale.Def.Types_s.quad32 ->
alg: Vale.AES.AES_common_s.algorithm ->
key: Vale.AES.AES_s.aes_key_LE alg ->
i: Prims.int
-> FStar.Pervasives.Lemma
(ensures
(let q_bytes = Vale.Def.Types_s.le_quad32_to_bytes q in
let q_bytes_prefix = FStar.Seq.Base.slice q_bytes 0 (FStar.Seq.Base.length s) in
let q_cipher = Vale.AES.GCTR_s.gctr_encrypt_block icb_BE q alg key i in
let q_cipher_bytes =
FStar.Seq.Base.slice (Vale.Def.Types_s.le_quad32_to_bytes q_cipher)
0
(FStar.Seq.Base.length s)
in
let s_quad = Vale.Def.Types_s.le_bytes_to_quad32 (Vale.AES.GCTR_s.pad_to_128_bits s) in
let s_cipher = Vale.AES.GCTR_s.gctr_encrypt_block icb_BE s_quad alg key i in
let s_cipher_bytes =
FStar.Seq.Base.slice (Vale.Def.Types_s.le_quad32_to_bytes s_cipher)
0
(FStar.Seq.Base.length s)
in
s == q_bytes_prefix ==> s_cipher_bytes == q_cipher_bytes)) | {
"end_col": 4,
"end_line": 640,
"start_col": 3,
"start_line": 616
} |
FStar.Pervasives.Lemma | val step1 (p: seq quad32) (num_bytes: nat{num_bytes < 16 * length p})
: Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block =
split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16)
in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE) | [
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
=
let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
() | val step1 (p: seq quad32) (num_bytes: nat{num_bytes < 16 * length p})
: Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block =
split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16)
in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
let step1 (p: seq quad32) (num_bytes: nat{num_bytes < 16 * length p})
: Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block =
split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16)
in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE) = | false | null | true | let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block =
split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16)
in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"FStar.Seq.Base.seq",
"Vale.Def.Types_s.quad32",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThan",
"FStar.Mul.op_Star",
"FStar.Seq.Base.length",
"Vale.Def.Types_s.nat8",
"Prims.unit",
"Prims._assert",
"Prims.eq2",
"FStar.Seq.Base.slice",
"Vale.Arch.Types.le_bytes_to_seq_quad32_to_bytes",
"Vale.Def.Types_s.le_seq_quad32_to_bytes",
"Vale.Arch.Types.slice_commutes_le_seq_quad32_to_bytes0",
"Prims.int",
"Vale.Def.Types_s.le_bytes_to_seq_quad32",
"FStar.Pervasives.Native.tuple2",
"Vale.Def.Words_s.nat8",
"FStar.Seq.Properties.split",
"Prims.op_Division",
"Prims.op_Modulus",
"Prims.l_True",
"Prims.squash",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb
let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
()
(*
Want to show that:
slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, out_b))) 0 num_bytes
==
gctr_encrypt_LE icb_BE (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) ...
We know that
slice (buffer128_as_seq(mem, out_b) 0 num_blocks
==
gctr_encrypt_recursive icb_BE (slice buffer128_as_seq(mem, in_b) 0 num_blocks) ...
And we know that:
get_mem out_b num_blocks
==
gctr_encrypt_block(icb_BE, (get_mem inb num_blocks), alg, key, num_blocks);
Internally gctr_encrypt_LE will compute:
full_blocks, final_block = split (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) (num_blocks * 16)
We'd like to show that
Step1: le_bytes_to_seq_quad32 full_blocks == slice buffer128_as_seq(mem, in_b) 0 num_blocks
and
Step2: final_block == slice (le_quad32_to_bytes (get_mem inb num_blocks)) 0 num_extra
Then we need to break down the byte-level effects of gctr_encrypt_block to show that even though the
padded version of final_block differs from (get_mem inb num_blocks), after we slice it at the end,
we end up with the same value
*)
let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val step1 (p: seq quad32) (num_bytes: nat{num_bytes < 16 * length p})
: Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block =
split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16)
in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE) | [] | Vale.AES.GCTR.step1 | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
p: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
num_bytes: Prims.nat{num_bytes < 16 * FStar.Seq.Base.length p}
-> FStar.Pervasives.Lemma
(ensures
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let _ =
FStar.Seq.Properties.split (FStar.Seq.Base.slice (Vale.Def.Types_s.le_seq_quad32_to_bytes p
)
0
num_bytes)
(num_blocks * 16)
in
(let FStar.Pervasives.Native.Mktuple2 #_ #_ full_blocks _ = _ in
let full_quads_LE = Vale.Def.Types_s.le_bytes_to_seq_quad32 full_blocks in
let p_prefix = FStar.Seq.Base.slice p 0 num_blocks in
p_prefix == full_quads_LE)
<:
Type0)) | {
"end_col": 4,
"end_line": 266,
"start_col": 3,
"start_line": 253
} |
FStar.Pervasives.Lemma | val lemma_slice_orig_index (#a: Type) (s s': seq a) (m n: nat)
: Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures
(forall (i: int). {:pattern (index s i)\/(index s' i)}
m <= i /\ i < n ==> index s i == index s' i)) | [
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let lemma_slice_orig_index (#a:Type) (s s':seq a) (m n:nat) : Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures (forall (i:int).{:pattern (index s i) \/ (index s' i)} m <= i /\ i < n ==> index s i == index s' i))
=
let aux (i:nat{m <= i /\ i < n}) : Lemma (index s i == index s' i) =
lemma_index_slice s m n (i - m);
lemma_index_slice s' m n (i - m)
in Classical.forall_intro aux | val lemma_slice_orig_index (#a: Type) (s s': seq a) (m n: nat)
: Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures
(forall (i: int). {:pattern (index s i)\/(index s' i)}
m <= i /\ i < n ==> index s i == index s' i))
let lemma_slice_orig_index (#a: Type) (s s': seq a) (m n: nat)
: Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures
(forall (i: int). {:pattern (index s i)\/(index s' i)}
m <= i /\ i < n ==> index s i == index s' i)) = | false | null | true | let aux (i: nat{m <= i /\ i < n}) : Lemma (index s i == index s' i) =
lemma_index_slice s m n (i - m);
lemma_index_slice s' m n (i - m)
in
Classical.forall_intro aux | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"FStar.Seq.Base.seq",
"Prims.nat",
"FStar.Classical.forall_intro",
"Prims.l_and",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Prims.op_LessThan",
"Prims.eq2",
"FStar.Seq.Base.index",
"Prims.unit",
"Prims.l_True",
"Prims.squash",
"Prims.Nil",
"FStar.Pervasives.pattern",
"FStar.Seq.Base.lemma_index_slice",
"Prims.op_Subtraction",
"FStar.Seq.Base.length",
"FStar.Seq.Base.slice",
"Prims.l_Forall",
"Prims.int",
"Prims.l_imp"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb
let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
()
(*
Want to show that:
slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, out_b))) 0 num_bytes
==
gctr_encrypt_LE icb_BE (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) ...
We know that
slice (buffer128_as_seq(mem, out_b) 0 num_blocks
==
gctr_encrypt_recursive icb_BE (slice buffer128_as_seq(mem, in_b) 0 num_blocks) ...
And we know that:
get_mem out_b num_blocks
==
gctr_encrypt_block(icb_BE, (get_mem inb num_blocks), alg, key, num_blocks);
Internally gctr_encrypt_LE will compute:
full_blocks, final_block = split (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) (num_blocks * 16)
We'd like to show that
Step1: le_bytes_to_seq_quad32 full_blocks == slice buffer128_as_seq(mem, in_b) 0 num_blocks
and
Step2: final_block == slice (le_quad32_to_bytes (get_mem inb num_blocks)) 0 num_extra
Then we need to break down the byte-level effects of gctr_encrypt_block to show that even though the
padded version of final_block differs from (get_mem inb num_blocks), after we slice it at the end,
we end up with the same value
*)
let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
=
let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
()
#reset-options "--smtencoding.elim_box true --z3rlimit 30"
let lemma_slice_orig_index (#a:Type) (s s':seq a) (m n:nat) : Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n) | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val lemma_slice_orig_index (#a: Type) (s s': seq a) (m n: nat)
: Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures
(forall (i: int). {:pattern (index s i)\/(index s' i)}
m <= i /\ i < n ==> index s i == index s' i)) | [] | Vale.AES.GCTR.lemma_slice_orig_index | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | s: FStar.Seq.Base.seq a -> s': FStar.Seq.Base.seq a -> m: Prims.nat -> n: Prims.nat
-> FStar.Pervasives.Lemma
(requires
FStar.Seq.Base.length s == FStar.Seq.Base.length s' /\ m <= n /\
n <= FStar.Seq.Base.length s /\ FStar.Seq.Base.slice s m n == FStar.Seq.Base.slice s' m n)
(ensures
forall (i: Prims.int). {:pattern FStar.Seq.Base.index s i\/FStar.Seq.Base.index s' i}
m <= i /\ i < n ==> FStar.Seq.Base.index s i == FStar.Seq.Base.index s' i) | {
"end_col": 31,
"end_line": 276,
"start_col": 3,
"start_line": 272
} |
FStar.Pervasives.Lemma | val nat32_xor_bytewise_2_helper3 (k k': nat32) (s s': four nat8)
: Lemma
(requires k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
) (ensures k * 0x10000 % 0x100000000 == k' * 0x10000 % 0x100000000) | [
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let nat32_xor_bytewise_2_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures k * 0x10000 % 0x100000000 == k' * 0x10000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
() | val nat32_xor_bytewise_2_helper3 (k k': nat32) (s s': four nat8)
: Lemma
(requires k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
) (ensures k * 0x10000 % 0x100000000 == k' * 0x10000 % 0x100000000)
let nat32_xor_bytewise_2_helper3 (k k': nat32) (s s': four nat8)
: Lemma
(requires k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
) (ensures k * 0x10000 % 0x100000000 == k' * 0x10000 % 0x100000000) = | false | null | true | let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s);
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.Def.Types_s.nat32",
"Vale.Def.Words_s.four",
"Vale.Def.Types_s.nat8",
"Prims.unit",
"FStar.Pervasives.assert_norm",
"Prims.eq2",
"Vale.Def.Words_s.natN",
"Prims.pow2",
"FStar.Mul.op_Star",
"Vale.Def.Words.Four_s.four_to_nat",
"Vale.Def.Words.Four_s.four_to_nat_unfold",
"Vale.Def.Words_s.nat8",
"Prims.l_and",
"Vale.Def.Words_s.pow2_32",
"Vale.Def.Words_s.__proj__Mkfour__item__lo0",
"Vale.Def.Words_s.__proj__Mkfour__item__lo1",
"Prims.squash",
"Prims.int",
"Prims.op_Modulus",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb
let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
()
(*
Want to show that:
slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, out_b))) 0 num_bytes
==
gctr_encrypt_LE icb_BE (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) ...
We know that
slice (buffer128_as_seq(mem, out_b) 0 num_blocks
==
gctr_encrypt_recursive icb_BE (slice buffer128_as_seq(mem, in_b) 0 num_blocks) ...
And we know that:
get_mem out_b num_blocks
==
gctr_encrypt_block(icb_BE, (get_mem inb num_blocks), alg, key, num_blocks);
Internally gctr_encrypt_LE will compute:
full_blocks, final_block = split (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) (num_blocks * 16)
We'd like to show that
Step1: le_bytes_to_seq_quad32 full_blocks == slice buffer128_as_seq(mem, in_b) 0 num_blocks
and
Step2: final_block == slice (le_quad32_to_bytes (get_mem inb num_blocks)) 0 num_extra
Then we need to break down the byte-level effects of gctr_encrypt_block to show that even though the
padded version of final_block differs from (get_mem inb num_blocks), after we slice it at the end,
we end up with the same value
*)
let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
=
let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
()
#reset-options "--smtencoding.elim_box true --z3rlimit 30"
let lemma_slice_orig_index (#a:Type) (s s':seq a) (m n:nat) : Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures (forall (i:int).{:pattern (index s i) \/ (index s' i)} m <= i /\ i < n ==> index s i == index s' i))
=
let aux (i:nat{m <= i /\ i < n}) : Lemma (index s i == index s' i) =
lemma_index_slice s m n (i - m);
lemma_index_slice s' m n (i - m)
in Classical.forall_intro aux
let lemma_ishl_32 (x:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 x k == x * pow2 k % pow2_32)
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
FStar.UInt.shift_left_value_lemma #32 x k;
()
let lemma_ishl_ixor_32 (x y:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k))
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
Vale.Def.TypesNative_s.reveal_ishl 32 y k;
Vale.Def.TypesNative_s.reveal_ishl 32 (ixor x y) k;
Vale.Def.TypesNative_s.reveal_ixor 32 x y;
Vale.Def.TypesNative_s.reveal_ixor 32 (ishl x k) (ishl y k);
FStar.UInt.shift_left_logxor_lemma #32 x y k;
()
unfold let pow2_24 = 0x1000000
let nat24 = natN pow2_24
let nat32_xor_bytewise_1_helper1 (x0 x0':nat8) (x1 x1':nat24) (x x':nat32) : Lemma
(requires
x == x0 + 0x100 * x1 /\
x' == x0' + 0x100 * x1' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_2_helper1 (x0 x0' x1 x1':nat16) (x x':nat32) : Lemma
(requires
x == x0 + 0x10000 * x1 /\
x' == x0' + 0x10000 * x1' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_3_helper1 (x0 x0':nat24) (x1 x1':nat8) (x x':nat32) : Lemma
(requires
x == x0 + 0x1000000 * x1 /\
x' == x0' + 0x1000000 * x1' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_1_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t123 = t1 + 0x100 * t2 + 0x10000 * t3 in
let t123' = t1' + 0x100 * t2' + 0x10000 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_1_helper1 t0 t0' t123 t123' x x';
()
let nat32_xor_bytewise_2_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t01 = t0 + 0x100 * t1 in
let t23 = t2 + 0x100 * t3 in
let t01' = t0' + 0x100 * t1' in
let t23' = t2' + 0x100 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_2_helper1 t01 t01' t23 t23' x x';
()
let nat32_xor_bytewise_3_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t012 = t0 + 0x100 * t1 + 0x10000 * t2 in
let t012' = t0' + 0x100 * t1' + 0x10000 * t2' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_3_helper1 t012 t012' t3 t3' x x';
()
let nat32_xor_bytewise_1_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0
)
(ensures k * 0x1000000 % 0x100000000 == k' * 0x1000000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_2_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
) | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val nat32_xor_bytewise_2_helper3 (k k': nat32) (s s': four nat8)
: Lemma
(requires k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
) (ensures k * 0x10000 % 0x100000000 == k' * 0x10000 % 0x100000000) | [] | Vale.AES.GCTR.nat32_xor_bytewise_2_helper3 | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
k: Vale.Def.Types_s.nat32 ->
k': Vale.Def.Types_s.nat32 ->
s: Vale.Def.Words_s.four Vale.Def.Types_s.nat8 ->
s': Vale.Def.Words_s.four Vale.Def.Types_s.nat8
-> FStar.Pervasives.Lemma
(requires
k == Vale.Def.Words.Four_s.four_to_nat 8 s /\ k' == Vale.Def.Words.Four_s.four_to_nat 8 s' /\
Mkfour?.lo0 s == Mkfour?.lo0 s' /\ Mkfour?.lo1 s == Mkfour?.lo1 s')
(ensures k * 0x10000 % 0x100000000 == k' * 0x10000 % 0x100000000) | {
"end_col": 4,
"end_line": 408,
"start_col": 3,
"start_line": 403
} |
FStar.Pervasives.Lemma | val gctr_encrypt_length
(icb_BE: quad32)
(plain: gctr_plain_LE)
(alg: algorithm)
(key: aes_key_LE alg)
: Lemma (length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))] | [
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
) | val gctr_encrypt_length
(icb_BE: quad32)
(plain: gctr_plain_LE)
(alg: algorithm)
(key: aes_key_LE alg)
: Lemma (length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
let gctr_encrypt_length
(icb_BE: quad32)
(plain: gctr_plain_LE)
(alg: algorithm)
(key: aes_key_LE alg)
: Lemma (length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))] = | false | null | true | reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0
then
(let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0)
else
(let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE =
gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16)
in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()) | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.Def.Types_s.quad32",
"Vale.AES.GCTR_s.gctr_plain_LE",
"Vale.AES.AES_common_s.algorithm",
"Vale.AES.AES_s.aes_key_LE",
"Prims.op_Equality",
"Prims.int",
"Vale.AES.GCTR.gctr_encrypt_recursive_length",
"FStar.Seq.Base.seq",
"Vale.Def.Types_s.le_bytes_to_seq_quad32",
"Prims.bool",
"Vale.Def.Types_s.nat8",
"Prims.unit",
"Prims._assert",
"Prims.eq2",
"Prims.nat",
"FStar.Seq.Base.length",
"FStar.Mul.op_Star",
"Prims.op_Addition",
"Vale.Def.Words_s.nat8",
"FStar.Seq.Base.slice",
"Vale.Def.Types_s.le_quad32_to_bytes",
"Vale.Def.Types_s.le_seq_quad32_to_bytes",
"Vale.AES.GCTR_s.gctr_encrypt_block",
"Prims.op_Division",
"Vale.AES.GCTR_s.gctr_encrypt_recursive",
"Vale.Def.Types_s.le_bytes_to_quad32",
"Vale.AES.GCTR_s.pad_to_128_bits",
"FStar.Pervasives.Native.tuple2",
"FStar.Seq.Properties.split",
"Prims.op_Subtraction",
"Vale.AES.GCTR_s.gctr_encrypt_LE",
"Prims.op_Modulus",
"Vale.AES.GCTR_s.gctr_encrypt_LE_reveal",
"FStar.Pervasives.reveal_opaque",
"Prims.l_True",
"Prims.squash",
"Prims.Cons",
"FStar.Pervasives.pattern",
"FStar.Pervasives.smt_pat",
"Prims.Nil"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))] | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 40,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val gctr_encrypt_length
(icb_BE: quad32)
(plain: gctr_plain_LE)
(alg: algorithm)
(key: aes_key_LE alg)
: Lemma (length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))] | [] | Vale.AES.GCTR.gctr_encrypt_length | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
icb_BE: Vale.Def.Types_s.quad32 ->
plain: Vale.AES.GCTR_s.gctr_plain_LE ->
alg: Vale.AES.AES_common_s.algorithm ->
key: Vale.AES.AES_s.aes_key_LE alg
-> FStar.Pervasives.Lemma
(ensures
FStar.Seq.Base.length (Vale.AES.GCTR_s.gctr_encrypt_LE icb_BE plain alg key) ==
FStar.Seq.Base.length plain)
[SMTPat (FStar.Seq.Base.length (Vale.AES.GCTR_s.gctr_encrypt_LE icb_BE plain alg key))] | {
"end_col": 3,
"end_line": 148,
"start_col": 2,
"start_line": 121
} |
FStar.Pervasives.Lemma | val nat32_xor_bytewise_3_helper2 (x x': nat32) (t t': four nat8)
: Lemma
(requires
x == four_to_nat 8 t /\ x' == four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2) | [
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let nat32_xor_bytewise_3_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t012 = t0 + 0x100 * t1 + 0x10000 * t2 in
let t012' = t0' + 0x100 * t1' + 0x10000 * t2' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_3_helper1 t012 t012' t3 t3' x x';
() | val nat32_xor_bytewise_3_helper2 (x x': nat32) (t t': four nat8)
: Lemma
(requires
x == four_to_nat 8 t /\ x' == four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
let nat32_xor_bytewise_3_helper2 (x x': nat32) (t t': four nat8)
: Lemma
(requires
x == four_to_nat 8 t /\ x' == four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2) = | false | null | true | let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t012 = t0 + 0x100 * t1 + 0x10000 * t2 in
let t012' = t0' + 0x100 * t1' + 0x10000 * t2' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t);
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_3_helper1 t012 t012' t3 t3' x x';
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.Def.Types_s.nat32",
"Vale.Def.Words_s.four",
"Vale.Def.Types_s.nat8",
"Prims.unit",
"Vale.AES.GCTR.nat32_xor_bytewise_3_helper1",
"FStar.Pervasives.assert_norm",
"Prims.eq2",
"Vale.Def.Words_s.natN",
"Prims.pow2",
"FStar.Mul.op_Star",
"Vale.Def.Words.Four_s.four_to_nat",
"Vale.Def.Words.Four_s.four_to_nat_unfold",
"Prims.int",
"Prims.op_Addition",
"Vale.Def.Words_s.nat8",
"Prims.l_and",
"Vale.Def.Words_s.pow2_32",
"Prims.op_Modulus",
"Prims.squash",
"Vale.Def.Words_s.__proj__Mkfour__item__lo0",
"Vale.Def.Words_s.__proj__Mkfour__item__lo1",
"Vale.Def.Words_s.__proj__Mkfour__item__hi2",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb
let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
()
(*
Want to show that:
slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, out_b))) 0 num_bytes
==
gctr_encrypt_LE icb_BE (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) ...
We know that
slice (buffer128_as_seq(mem, out_b) 0 num_blocks
==
gctr_encrypt_recursive icb_BE (slice buffer128_as_seq(mem, in_b) 0 num_blocks) ...
And we know that:
get_mem out_b num_blocks
==
gctr_encrypt_block(icb_BE, (get_mem inb num_blocks), alg, key, num_blocks);
Internally gctr_encrypt_LE will compute:
full_blocks, final_block = split (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) (num_blocks * 16)
We'd like to show that
Step1: le_bytes_to_seq_quad32 full_blocks == slice buffer128_as_seq(mem, in_b) 0 num_blocks
and
Step2: final_block == slice (le_quad32_to_bytes (get_mem inb num_blocks)) 0 num_extra
Then we need to break down the byte-level effects of gctr_encrypt_block to show that even though the
padded version of final_block differs from (get_mem inb num_blocks), after we slice it at the end,
we end up with the same value
*)
let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
=
let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
()
#reset-options "--smtencoding.elim_box true --z3rlimit 30"
let lemma_slice_orig_index (#a:Type) (s s':seq a) (m n:nat) : Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures (forall (i:int).{:pattern (index s i) \/ (index s' i)} m <= i /\ i < n ==> index s i == index s' i))
=
let aux (i:nat{m <= i /\ i < n}) : Lemma (index s i == index s' i) =
lemma_index_slice s m n (i - m);
lemma_index_slice s' m n (i - m)
in Classical.forall_intro aux
let lemma_ishl_32 (x:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 x k == x * pow2 k % pow2_32)
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
FStar.UInt.shift_left_value_lemma #32 x k;
()
let lemma_ishl_ixor_32 (x y:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k))
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
Vale.Def.TypesNative_s.reveal_ishl 32 y k;
Vale.Def.TypesNative_s.reveal_ishl 32 (ixor x y) k;
Vale.Def.TypesNative_s.reveal_ixor 32 x y;
Vale.Def.TypesNative_s.reveal_ixor 32 (ishl x k) (ishl y k);
FStar.UInt.shift_left_logxor_lemma #32 x y k;
()
unfold let pow2_24 = 0x1000000
let nat24 = natN pow2_24
let nat32_xor_bytewise_1_helper1 (x0 x0':nat8) (x1 x1':nat24) (x x':nat32) : Lemma
(requires
x == x0 + 0x100 * x1 /\
x' == x0' + 0x100 * x1' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_2_helper1 (x0 x0' x1 x1':nat16) (x x':nat32) : Lemma
(requires
x == x0 + 0x10000 * x1 /\
x' == x0' + 0x10000 * x1' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_3_helper1 (x0 x0':nat24) (x1 x1':nat8) (x x':nat32) : Lemma
(requires
x == x0 + 0x1000000 * x1 /\
x' == x0' + 0x1000000 * x1' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_1_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t123 = t1 + 0x100 * t2 + 0x10000 * t3 in
let t123' = t1' + 0x100 * t2' + 0x10000 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_1_helper1 t0 t0' t123 t123' x x';
()
let nat32_xor_bytewise_2_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t01 = t0 + 0x100 * t1 in
let t23 = t2 + 0x100 * t3 in
let t01' = t0' + 0x100 * t1' in
let t23' = t2' + 0x100 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_2_helper1 t01 t01' t23 t23' x x';
()
let nat32_xor_bytewise_3_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
) | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val nat32_xor_bytewise_3_helper2 (x x': nat32) (t t': four nat8)
: Lemma
(requires
x == four_to_nat 8 t /\ x' == four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2) | [] | Vale.AES.GCTR.nat32_xor_bytewise_3_helper2 | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
x: Vale.Def.Types_s.nat32 ->
x': Vale.Def.Types_s.nat32 ->
t: Vale.Def.Words_s.four Vale.Def.Types_s.nat8 ->
t': Vale.Def.Words_s.four Vale.Def.Types_s.nat8
-> FStar.Pervasives.Lemma
(requires
x == Vale.Def.Words.Four_s.four_to_nat 8 t /\ x' == Vale.Def.Words.Four_s.four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000)
(ensures
Mkfour?.lo0 t == Mkfour?.lo0 t' /\ Mkfour?.lo1 t == Mkfour?.lo1 t' /\
Mkfour?.hi2 t == Mkfour?.hi2 t') | {
"end_col": 4,
"end_line": 380,
"start_col": 3,
"start_line": 372
} |
FStar.Pervasives.Lemma | val gctr_encrypt_one_block (icb_BE plain:quad32) (alg:algorithm) (key:seq nat32) : Lemma
(requires is_aes_key_LE alg key)
(ensures
gctr_encrypt_LE icb_BE (le_quad32_to_bytes plain) alg key ==
le_seq_quad32_to_bytes (create 1 (quad32_xor plain (aes_encrypt_BE alg key icb_BE)))
) | [
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let gctr_encrypt_one_block (icb_BE plain:quad32) (alg:algorithm) (key:seq nat32) =
gctr_encrypt_LE_reveal ();
assert(inc32 icb_BE 0 == icb_BE);
let encrypted_icb = aes_encrypt_BE alg key icb_BE in
let p = le_quad32_to_bytes plain in
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let p_seq = create 1 plain in
assert (length p == 16);
le_bytes_to_seq_quad32_to_bytes_one_quad plain;
assert (p_seq == plain_quads_LE);
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (cipher_quads_LE == cons (gctr_encrypt_block icb_BE (head plain_quads_LE) alg key 0) (gctr_encrypt_recursive icb_BE (tail plain_quads_LE) alg key (1)));
assert (head plain_quads_LE == plain);
assert (gctr_encrypt_block icb_BE (head plain_quads_LE) alg key 0 ==
(let icb_LE = reverse_bytes_quad32 (inc32 icb_BE 0) in
quad32_xor (head plain_quads_LE) (aes_encrypt_LE alg key icb_LE)));
assert (quad32_xor plain (aes_encrypt_LE alg key (reverse_bytes_quad32 icb_BE))
==
(let icb_LE = reverse_bytes_quad32 (inc32 icb_BE 0) in
quad32_xor (head plain_quads_LE) (aes_encrypt_LE alg key icb_LE)));
assert (gctr_encrypt_block icb_BE (head plain_quads_LE) alg key 0 == quad32_xor plain (aes_encrypt_LE alg key (reverse_bytes_quad32 icb_BE)));
aes_encrypt_LE_reveal ();
assert (gctr_encrypt_block icb_BE (head plain_quads_LE) alg key 0 == quad32_xor plain (aes_encrypt_BE alg key icb_BE));
assert (gctr_encrypt_block icb_BE (head plain_quads_LE) alg key 0 == quad32_xor plain encrypted_icb);
assert(gctr_encrypt_recursive icb_BE (tail p_seq) alg key 1 == empty); // OBSERVE
//assert(gctr_encrypt_LE icb p alg key == cons (quad32_xor plain encrypted_icb) empty);
let x = quad32_xor plain encrypted_icb in
append_empty_r (create 1 x); // This is the missing piece
() | val gctr_encrypt_one_block (icb_BE plain:quad32) (alg:algorithm) (key:seq nat32) : Lemma
(requires is_aes_key_LE alg key)
(ensures
gctr_encrypt_LE icb_BE (le_quad32_to_bytes plain) alg key ==
le_seq_quad32_to_bytes (create 1 (quad32_xor plain (aes_encrypt_BE alg key icb_BE)))
)
let gctr_encrypt_one_block (icb_BE plain: quad32) (alg: algorithm) (key: seq nat32) = | false | null | true | gctr_encrypt_LE_reveal ();
assert (inc32 icb_BE 0 == icb_BE);
let encrypted_icb = aes_encrypt_BE alg key icb_BE in
let p = le_quad32_to_bytes plain in
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let p_seq = create 1 plain in
assert (length p == 16);
le_bytes_to_seq_quad32_to_bytes_one_quad plain;
assert (p_seq == plain_quads_LE);
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (cipher_quads_LE ==
cons (gctr_encrypt_block icb_BE (head plain_quads_LE) alg key 0)
(gctr_encrypt_recursive icb_BE (tail plain_quads_LE) alg key (1)));
assert (head plain_quads_LE == plain);
assert (gctr_encrypt_block icb_BE (head plain_quads_LE) alg key 0 ==
(let icb_LE = reverse_bytes_quad32 (inc32 icb_BE 0) in
quad32_xor (head plain_quads_LE) (aes_encrypt_LE alg key icb_LE)));
assert (quad32_xor plain (aes_encrypt_LE alg key (reverse_bytes_quad32 icb_BE)) ==
(let icb_LE = reverse_bytes_quad32 (inc32 icb_BE 0) in
quad32_xor (head plain_quads_LE) (aes_encrypt_LE alg key icb_LE)));
assert (gctr_encrypt_block icb_BE (head plain_quads_LE) alg key 0 ==
quad32_xor plain (aes_encrypt_LE alg key (reverse_bytes_quad32 icb_BE)));
aes_encrypt_LE_reveal ();
assert (gctr_encrypt_block icb_BE (head plain_quads_LE) alg key 0 ==
quad32_xor plain (aes_encrypt_BE alg key icb_BE));
assert (gctr_encrypt_block icb_BE (head plain_quads_LE) alg key 0 == quad32_xor plain encrypted_icb);
assert (gctr_encrypt_recursive icb_BE (tail p_seq) alg key 1 == empty);
let x = quad32_xor plain encrypted_icb in
append_empty_r (create 1 x);
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.Def.Types_s.quad32",
"Vale.AES.AES_common_s.algorithm",
"FStar.Seq.Base.seq",
"Vale.Def.Types_s.nat32",
"Prims.unit",
"FStar.Seq.Base.append_empty_r",
"FStar.Seq.Base.create",
"Vale.Def.Types_s.quad32_xor",
"Prims._assert",
"Prims.eq2",
"Vale.AES.GCTR_s.gctr_encrypt_recursive",
"FStar.Seq.Properties.tail",
"FStar.Seq.Base.empty",
"Vale.AES.GCTR_s.gctr_encrypt_block",
"FStar.Seq.Properties.head",
"Vale.AES.GCTR.aes_encrypt_BE",
"Vale.AES.AES_s.aes_encrypt_LE_reveal",
"Vale.AES.AES_s.aes_encrypt_LE",
"Vale.Def.Types_s.reverse_bytes_quad32",
"Vale.AES.GCTR_s.inc32",
"FStar.Seq.Properties.cons",
"Vale.Arch.Types.le_bytes_to_seq_quad32_to_bytes_one_quad",
"Prims.int",
"FStar.Seq.Base.length",
"Vale.Def.Types_s.nat8",
"Vale.Def.Types_s.le_bytes_to_seq_quad32",
"Vale.Def.Words_s.nat8",
"Vale.Def.Types_s.le_quad32_to_bytes",
"Vale.AES.GCTR_s.gctr_encrypt_LE_reveal"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb
let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
()
(*
Want to show that:
slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, out_b))) 0 num_bytes
==
gctr_encrypt_LE icb_BE (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) ...
We know that
slice (buffer128_as_seq(mem, out_b) 0 num_blocks
==
gctr_encrypt_recursive icb_BE (slice buffer128_as_seq(mem, in_b) 0 num_blocks) ...
And we know that:
get_mem out_b num_blocks
==
gctr_encrypt_block(icb_BE, (get_mem inb num_blocks), alg, key, num_blocks);
Internally gctr_encrypt_LE will compute:
full_blocks, final_block = split (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) (num_blocks * 16)
We'd like to show that
Step1: le_bytes_to_seq_quad32 full_blocks == slice buffer128_as_seq(mem, in_b) 0 num_blocks
and
Step2: final_block == slice (le_quad32_to_bytes (get_mem inb num_blocks)) 0 num_extra
Then we need to break down the byte-level effects of gctr_encrypt_block to show that even though the
padded version of final_block differs from (get_mem inb num_blocks), after we slice it at the end,
we end up with the same value
*)
let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
=
let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
()
#reset-options "--smtencoding.elim_box true --z3rlimit 30"
let lemma_slice_orig_index (#a:Type) (s s':seq a) (m n:nat) : Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures (forall (i:int).{:pattern (index s i) \/ (index s' i)} m <= i /\ i < n ==> index s i == index s' i))
=
let aux (i:nat{m <= i /\ i < n}) : Lemma (index s i == index s' i) =
lemma_index_slice s m n (i - m);
lemma_index_slice s' m n (i - m)
in Classical.forall_intro aux
let lemma_ishl_32 (x:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 x k == x * pow2 k % pow2_32)
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
FStar.UInt.shift_left_value_lemma #32 x k;
()
let lemma_ishl_ixor_32 (x y:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k))
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
Vale.Def.TypesNative_s.reveal_ishl 32 y k;
Vale.Def.TypesNative_s.reveal_ishl 32 (ixor x y) k;
Vale.Def.TypesNative_s.reveal_ixor 32 x y;
Vale.Def.TypesNative_s.reveal_ixor 32 (ishl x k) (ishl y k);
FStar.UInt.shift_left_logxor_lemma #32 x y k;
()
unfold let pow2_24 = 0x1000000
let nat24 = natN pow2_24
let nat32_xor_bytewise_1_helper1 (x0 x0':nat8) (x1 x1':nat24) (x x':nat32) : Lemma
(requires
x == x0 + 0x100 * x1 /\
x' == x0' + 0x100 * x1' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_2_helper1 (x0 x0' x1 x1':nat16) (x x':nat32) : Lemma
(requires
x == x0 + 0x10000 * x1 /\
x' == x0' + 0x10000 * x1' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_3_helper1 (x0 x0':nat24) (x1 x1':nat8) (x x':nat32) : Lemma
(requires
x == x0 + 0x1000000 * x1 /\
x' == x0' + 0x1000000 * x1' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_1_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t123 = t1 + 0x100 * t2 + 0x10000 * t3 in
let t123' = t1' + 0x100 * t2' + 0x10000 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_1_helper1 t0 t0' t123 t123' x x';
()
let nat32_xor_bytewise_2_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t01 = t0 + 0x100 * t1 in
let t23 = t2 + 0x100 * t3 in
let t01' = t0' + 0x100 * t1' in
let t23' = t2' + 0x100 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_2_helper1 t01 t01' t23 t23' x x';
()
let nat32_xor_bytewise_3_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t012 = t0 + 0x100 * t1 + 0x10000 * t2 in
let t012' = t0' + 0x100 * t1' + 0x10000 * t2' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_3_helper1 t012 t012' t3 t3' x x';
()
let nat32_xor_bytewise_1_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0
)
(ensures k * 0x1000000 % 0x100000000 == k' * 0x1000000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_2_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures k * 0x10000 % 0x100000000 == k' * 0x10000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_3_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures k * 0x100 % 0x100000000 == k' * 0x100 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_1 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_1_helper3 k k' s s';
lemma_ishl_32 k 24;
lemma_ishl_32 k' 24;
lemma_ishl_32 x 24;
lemma_ishl_32 x' 24;
lemma_ishl_ixor_32 k m 24;
lemma_ishl_ixor_32 k' m 24;
assert_norm (pow2 24 == pow2_24);
nat32_xor_bytewise_1_helper2 x x' t t';
()
let nat32_xor_bytewise_2 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_2_helper3 k k' s s';
lemma_ishl_32 k 16;
lemma_ishl_32 k' 16;
lemma_ishl_32 x 16;
lemma_ishl_32 x' 16;
lemma_ishl_ixor_32 k m 16;
lemma_ishl_ixor_32 k' m 16;
// assert (ishl #pow2_32 k 16 == k * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 k' 16 == k' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == x * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x' 16 == x' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == ixor (ishl k 16) (ishl m 16));
// assert (ishl #pow2_32 x' 16 == ixor (ishl k' 16) (ishl m 16));
// assert (x * 0x10000 % 0x100000000 == ixor (k * 0x10000 % 0x100000000) (ishl m 16));
// assert (x' * 0x10000 % 0x100000000 == ixor (k' * 0x10000 % 0x100000000) (ishl m 16));
nat32_xor_bytewise_2_helper2 x x' t t';
()
let nat32_xor_bytewise_3 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_3_helper3 k k' s s';
lemma_ishl_32 k 8;
lemma_ishl_32 k' 8;
lemma_ishl_32 x 8;
lemma_ishl_32 x' 8;
lemma_ishl_ixor_32 k m 8;
lemma_ishl_ixor_32 k' m 8;
nat32_xor_bytewise_3_helper2 x x' t t';
()
#reset-options "--z3rlimit 50 --smtencoding.nl_arith_repr boxwrap --smtencoding.l_arith_repr boxwrap"
let nat32_xor_bytewise_4 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s == s'
)
(ensures t == t')
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
()
#reset-options
let nat32_xor_bytewise (k k' m:nat32) (s s' t t':seq4 nat8) (n:nat) : Lemma
(requires
n <= 4 /\
k == four_to_nat 8 (seq_to_four_LE s) /\
k' == four_to_nat 8 (seq_to_four_LE s') /\
ixor k m == four_to_nat 8 (seq_to_four_LE t) /\
ixor k' m == four_to_nat 8 (seq_to_four_LE t') /\
equal (slice s 0 n) (slice s' 0 n)
)
// (ensures equal (slice t 0 n) (slice t' 0 n))
(ensures (forall (i:nat).{:pattern (index t i) \/ (index t' i)} i < n ==> index t i == index t' i))
=
assert (n > 0 ==> index (slice s 0 n) 0 == index (slice s' 0 n) 0);
assert (n > 1 ==> index (slice s 0 n) 1 == index (slice s' 0 n) 1);
assert (n > 2 ==> index (slice s 0 n) 2 == index (slice s' 0 n) 2);
assert (n > 3 ==> index (slice s 0 n) 3 == index (slice s' 0 n) 3);
let x = ixor k m in
let x' = ixor k' m in
if n = 1 then nat32_xor_bytewise_1 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 2 then nat32_xor_bytewise_2 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 3 then nat32_xor_bytewise_3 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 4 then nat32_xor_bytewise_4 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
assert (equal (slice t 0 n) (slice t' 0 n));
lemma_slice_orig_index t t' 0 n;
()
let quad32_xor_bytewise (q q' r:quad32) (n:nat{ n <= 16 }) : Lemma
(requires (let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
slice q_bytes 0 n == slice q'_bytes 0 n))
(ensures (let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
let qr_bytes = le_quad32_to_bytes (quad32_xor q r) in
let q'r_bytes = le_quad32_to_bytes (quad32_xor q' r) in
slice qr_bytes 0 n == slice q'r_bytes 0 n))
=
let s = le_quad32_to_bytes q in
let s' = le_quad32_to_bytes q' in
let t = le_quad32_to_bytes (quad32_xor q r) in
let t' = le_quad32_to_bytes (quad32_xor q' r) in
lemma_slices_le_quad32_to_bytes q;
lemma_slices_le_quad32_to_bytes q';
lemma_slices_le_quad32_to_bytes (quad32_xor q r);
lemma_slices_le_quad32_to_bytes (quad32_xor q' r);
lemma_slice_orig_index s s' 0 n;
quad32_xor_reveal ();
reverse_bytes_nat32_reveal ();
if n < 4 then nat32_xor_bytewise q.lo0 q'.lo0 r.lo0 (slice s 0 4) (slice s' 0 4) (slice t 0 4) (slice t' 0 4) n
else
(
nat32_xor_bytewise q.lo0 q'.lo0 r.lo0 (slice s 0 4) (slice s' 0 4) (slice t 0 4) (slice t' 0 4) 4;
if n < 8 then nat32_xor_bytewise q.lo1 q'.lo1 r.lo1 (slice s 4 8) (slice s' 4 8) (slice t 4 8) (slice t' 4 8) (n - 4)
else
(
nat32_xor_bytewise q.lo1 q'.lo1 r.lo1 (slice s 4 8) (slice s' 4 8) (slice t 4 8) (slice t' 4 8) 4;
if n < 12 then nat32_xor_bytewise q.hi2 q'.hi2 r.hi2 (slice s 8 12) (slice s' 8 12) (slice t 8 12) (slice t' 8 12) (n - 8)
else
(
nat32_xor_bytewise q.hi2 q'.hi2 r.hi2 (slice s 8 12) (slice s' 8 12) (slice t 8 12) (slice t' 8 12) 4;
nat32_xor_bytewise q.hi3 q'.hi3 r.hi3 (slice s 12 16) (slice s' 12 16) (slice t 12 16) (slice t' 12 16) (n - 12);
()
)
)
);
assert (equal (slice t 0 n) (slice t' 0 n));
()
let slice_pad_to_128_bits (s:seq nat8 { 0 < length s /\ length s < 16 }) :
Lemma(slice (pad_to_128_bits s) 0 (length s) == s)
=
assert (length s % 16 == length s);
assert (equal s (slice (pad_to_128_bits s) 0 (length s)));
()
let step2 (s:seq nat8 { 0 < length s /\ length s < 16 }) (q:quad32) (icb_BE:quad32) (alg:algorithm) (key:aes_key_LE alg) (i:int):
Lemma(let q_bytes = le_quad32_to_bytes q in
let q_bytes_prefix = slice q_bytes 0 (length s) in
let q_cipher = gctr_encrypt_block icb_BE q alg key i in
let q_cipher_bytes = slice (le_quad32_to_bytes q_cipher) 0 (length s) in
let s_quad = le_bytes_to_quad32 (pad_to_128_bits s) in
let s_cipher = gctr_encrypt_block icb_BE s_quad alg key i in
let s_cipher_bytes = slice (le_quad32_to_bytes s_cipher) 0 (length s) in
s == q_bytes_prefix ==> s_cipher_bytes == q_cipher_bytes)
=
let q_bytes = le_quad32_to_bytes q in
let q_bytes_prefix = slice q_bytes 0 (length s) in
let q_cipher = gctr_encrypt_block icb_BE q alg key i in
let q_cipher_bytes = slice (le_quad32_to_bytes q_cipher) 0 (length s) in
let s_quad = le_bytes_to_quad32 (pad_to_128_bits s) in
let s_cipher = gctr_encrypt_block icb_BE s_quad alg key i in
let s_cipher_bytes = slice (le_quad32_to_bytes s_cipher) 0 (length s) in
let enc_ctr = aes_encrypt_LE alg key (reverse_bytes_quad32 (inc32 icb_BE i)) in
let icb_LE = reverse_bytes_quad32 (inc32 icb_BE i) in
if s = q_bytes_prefix then (
// s_cipher_bytes = slice (le_quad32_to_bytes s_cipher) 0 (length s)
// = slice (le_quad32_to_bytes (gctr_encrypt_block icb_BE s_quad alg key i)) 0 (length s)
// = slice (le_quad32_to_bytes (gctr_encrypt_block icb_BE (le_bytes_to_quad32 (pad_to_128_bits s)) alg key i)) 0 (length s)
// q_cipher_bytes = gctr_encrypt_block icb_BE q alg key i
le_quad32_to_bytes_to_quad32 (pad_to_128_bits s);
slice_pad_to_128_bits s;
quad32_xor_bytewise q (le_bytes_to_quad32 (pad_to_128_bits s)) (aes_encrypt_LE alg key icb_LE) (length s);
//assert (equal s_cipher_bytes q_cipher_bytes);
()
) else
();
()
#reset-options "--z3rlimit 30"
open FStar.Seq.Properties
let gctr_partial_to_full_advanced (icb_BE:quad32) (plain:seq quad32) (cipher:seq quad32) (alg:algorithm) (key:seq nat32) (num_bytes:nat) =
gctr_encrypt_LE_reveal ();
let num_blocks = num_bytes / 16 in
let plain_bytes = slice (le_seq_quad32_to_bytes plain) 0 num_bytes in
let cipher_bytes = slice (le_seq_quad32_to_bytes cipher) 0 num_bytes in
step1 plain num_bytes;
let s = slice (le_seq_quad32_to_bytes plain) (num_blocks * 16) num_bytes in
let final_p = index plain num_blocks in
step2 s final_p icb_BE alg key num_blocks;
let num_extra = num_bytes % 16 in
let full_bytes_len = num_bytes - num_extra in
let full_blocks, final_block = split plain_bytes full_bytes_len in
assert (full_bytes_len % 16 == 0);
assert (length full_blocks == full_bytes_len);
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
assert (cipher_quads_LE == slice cipher 0 num_blocks); // LHS quads
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
assert (le_seq_quad32_to_bytes cipher_quads_LE == le_seq_quad32_to_bytes (slice cipher 0 num_blocks)); // LHS bytes
assert (length s == num_extra);
let q_prefix = slice (le_quad32_to_bytes final_p) 0 num_extra in
le_seq_quad32_to_bytes_tail_prefix plain num_bytes;
assert (q_prefix == s);
assert(final_cipher_bytes_LE == slice (le_quad32_to_bytes (index cipher num_blocks)) 0 num_extra); // RHS bytes
le_seq_quad32_to_bytes_tail_prefix cipher num_bytes;
assert (slice (le_quad32_to_bytes (index cipher num_blocks)) 0 num_extra ==
slice (le_seq_quad32_to_bytes cipher) (num_blocks * 16) num_bytes);
slice_commutes_le_seq_quad32_to_bytes0 cipher num_blocks;
assert (le_seq_quad32_to_bytes (slice cipher 0 num_blocks) == slice (le_seq_quad32_to_bytes cipher) 0 (num_blocks * 16));
assert (slice (slice (le_seq_quad32_to_bytes cipher) (num_blocks * 16) (length cipher * 16)) 0 num_extra ==
slice (le_seq_quad32_to_bytes cipher) (num_blocks * 16) num_bytes);
slice_append_adds (le_seq_quad32_to_bytes cipher) (num_blocks * 16) num_bytes;
assert (slice (le_seq_quad32_to_bytes cipher) 0 (num_blocks * 16) @|
slice (le_seq_quad32_to_bytes cipher) (num_blocks * 16) num_bytes ==
slice (le_seq_quad32_to_bytes cipher) 0 num_bytes);
assert (cipher_bytes == (le_seq_quad32_to_bytes (slice cipher 0 num_blocks)) @| slice (le_quad32_to_bytes (index cipher num_blocks)) 0 num_extra);
() | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val gctr_encrypt_one_block (icb_BE plain:quad32) (alg:algorithm) (key:seq nat32) : Lemma
(requires is_aes_key_LE alg key)
(ensures
gctr_encrypt_LE icb_BE (le_quad32_to_bytes plain) alg key ==
le_seq_quad32_to_bytes (create 1 (quad32_xor plain (aes_encrypt_BE alg key icb_BE)))
) | [] | Vale.AES.GCTR.gctr_encrypt_one_block | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
icb_BE: Vale.Def.Types_s.quad32 ->
plain: Vale.Def.Types_s.quad32 ->
alg: Vale.AES.AES_common_s.algorithm ->
key: FStar.Seq.Base.seq Vale.Def.Types_s.nat32
-> FStar.Pervasives.Lemma (requires Vale.AES.AES_s.is_aes_key_LE alg key)
(ensures
Vale.AES.GCTR_s.gctr_encrypt_LE icb_BE (Vale.Def.Types_s.le_quad32_to_bytes plain) alg key ==
Vale.Def.Types_s.le_seq_quad32_to_bytes (FStar.Seq.Base.create 1
(Vale.Def.Types_s.quad32_xor plain (Vale.AES.GCTR.aes_encrypt_BE alg key icb_BE)))) | {
"end_col": 4,
"end_line": 724,
"start_col": 2,
"start_line": 696
} |
FStar.Pervasives.Lemma | val lemma_length_simplifier (s bytes t: seq quad32) (num_bytes: nat)
: Lemma
(requires
t == (if num_bytes > (length s) * 16 then append s bytes else s) /\
(num_bytes <= (length s) * 16 ==> num_bytes == (length s * 16)) /\
length s * 16 <= num_bytes /\ num_bytes < length s * 16 + 16 /\ length bytes == 1)
(ensures
slice (le_seq_quad32_to_bytes t) 0 num_bytes ==
slice (le_seq_quad32_to_bytes (append s bytes)) 0 num_bytes) | [
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let lemma_length_simplifier (s bytes t:seq quad32) (num_bytes:nat) : Lemma
(requires t == (if num_bytes > (length s) * 16 then append s bytes else s) /\
(num_bytes <= (length s) * 16 ==> num_bytes == (length s * 16)) /\
length s * 16 <= num_bytes /\
num_bytes < length s * 16 + 16 /\
length bytes == 1
)
(ensures slice (le_seq_quad32_to_bytes t) 0 num_bytes ==
slice (le_seq_quad32_to_bytes (append s bytes)) 0 num_bytes)
=
if num_bytes > (length s) * 16 then (
()
) else (
calc (==) {
slice (le_seq_quad32_to_bytes (append s bytes)) 0 num_bytes;
== { append_distributes_le_seq_quad32_to_bytes s bytes }
slice (append (le_seq_quad32_to_bytes s) (le_seq_quad32_to_bytes bytes)) 0 num_bytes;
== { Vale.Lib.Seqs.lemma_slice_first_exactly_in_append (le_seq_quad32_to_bytes s) (le_seq_quad32_to_bytes bytes) }
le_seq_quad32_to_bytes s;
== { assert (length (le_seq_quad32_to_bytes s) == num_bytes) }
slice (le_seq_quad32_to_bytes s) 0 num_bytes;
};
()
) | val lemma_length_simplifier (s bytes t: seq quad32) (num_bytes: nat)
: Lemma
(requires
t == (if num_bytes > (length s) * 16 then append s bytes else s) /\
(num_bytes <= (length s) * 16 ==> num_bytes == (length s * 16)) /\
length s * 16 <= num_bytes /\ num_bytes < length s * 16 + 16 /\ length bytes == 1)
(ensures
slice (le_seq_quad32_to_bytes t) 0 num_bytes ==
slice (le_seq_quad32_to_bytes (append s bytes)) 0 num_bytes)
let lemma_length_simplifier (s bytes t: seq quad32) (num_bytes: nat)
: Lemma
(requires
t == (if num_bytes > (length s) * 16 then append s bytes else s) /\
(num_bytes <= (length s) * 16 ==> num_bytes == (length s * 16)) /\
length s * 16 <= num_bytes /\ num_bytes < length s * 16 + 16 /\ length bytes == 1)
(ensures
slice (le_seq_quad32_to_bytes t) 0 num_bytes ==
slice (le_seq_quad32_to_bytes (append s bytes)) 0 num_bytes) = | false | null | true | if num_bytes > (length s) * 16
then (())
else
(calc ( == ) {
slice (le_seq_quad32_to_bytes (append s bytes)) 0 num_bytes;
( == ) { append_distributes_le_seq_quad32_to_bytes s bytes }
slice (append (le_seq_quad32_to_bytes s) (le_seq_quad32_to_bytes bytes)) 0 num_bytes;
( == ) { Vale.Lib.Seqs.lemma_slice_first_exactly_in_append (le_seq_quad32_to_bytes s)
(le_seq_quad32_to_bytes bytes) }
le_seq_quad32_to_bytes s;
( == ) { assert (length (le_seq_quad32_to_bytes s) == num_bytes) }
slice (le_seq_quad32_to_bytes s) 0 num_bytes;
};
()) | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"FStar.Seq.Base.seq",
"Vale.Def.Types_s.quad32",
"Prims.nat",
"Prims.op_GreaterThan",
"FStar.Mul.op_Star",
"FStar.Seq.Base.length",
"Prims.bool",
"Prims.unit",
"FStar.Calc.calc_finish",
"Vale.Def.Types_s.nat8",
"Prims.eq2",
"FStar.Seq.Base.slice",
"Vale.Def.Types_s.le_seq_quad32_to_bytes",
"FStar.Seq.Base.append",
"Prims.Cons",
"FStar.Preorder.relation",
"Prims.Nil",
"FStar.Calc.calc_step",
"FStar.Calc.calc_init",
"FStar.Calc.calc_pack",
"Vale.Arch.Types.append_distributes_le_seq_quad32_to_bytes",
"Prims.squash",
"Vale.Lib.Seqs.lemma_slice_first_exactly_in_append",
"Prims._assert",
"Prims.l_and",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Prims.int",
"Prims.op_LessThan",
"Prims.op_Addition",
"FStar.Pervasives.pattern"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb
let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
()
(*
Want to show that:
slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, out_b))) 0 num_bytes
==
gctr_encrypt_LE icb_BE (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) ...
We know that
slice (buffer128_as_seq(mem, out_b) 0 num_blocks
==
gctr_encrypt_recursive icb_BE (slice buffer128_as_seq(mem, in_b) 0 num_blocks) ...
And we know that:
get_mem out_b num_blocks
==
gctr_encrypt_block(icb_BE, (get_mem inb num_blocks), alg, key, num_blocks);
Internally gctr_encrypt_LE will compute:
full_blocks, final_block = split (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) (num_blocks * 16)
We'd like to show that
Step1: le_bytes_to_seq_quad32 full_blocks == slice buffer128_as_seq(mem, in_b) 0 num_blocks
and
Step2: final_block == slice (le_quad32_to_bytes (get_mem inb num_blocks)) 0 num_extra
Then we need to break down the byte-level effects of gctr_encrypt_block to show that even though the
padded version of final_block differs from (get_mem inb num_blocks), after we slice it at the end,
we end up with the same value
*)
let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
=
let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
()
#reset-options "--smtencoding.elim_box true --z3rlimit 30"
let lemma_slice_orig_index (#a:Type) (s s':seq a) (m n:nat) : Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures (forall (i:int).{:pattern (index s i) \/ (index s' i)} m <= i /\ i < n ==> index s i == index s' i))
=
let aux (i:nat{m <= i /\ i < n}) : Lemma (index s i == index s' i) =
lemma_index_slice s m n (i - m);
lemma_index_slice s' m n (i - m)
in Classical.forall_intro aux
let lemma_ishl_32 (x:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 x k == x * pow2 k % pow2_32)
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
FStar.UInt.shift_left_value_lemma #32 x k;
()
let lemma_ishl_ixor_32 (x y:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k))
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
Vale.Def.TypesNative_s.reveal_ishl 32 y k;
Vale.Def.TypesNative_s.reveal_ishl 32 (ixor x y) k;
Vale.Def.TypesNative_s.reveal_ixor 32 x y;
Vale.Def.TypesNative_s.reveal_ixor 32 (ishl x k) (ishl y k);
FStar.UInt.shift_left_logxor_lemma #32 x y k;
()
unfold let pow2_24 = 0x1000000
let nat24 = natN pow2_24
let nat32_xor_bytewise_1_helper1 (x0 x0':nat8) (x1 x1':nat24) (x x':nat32) : Lemma
(requires
x == x0 + 0x100 * x1 /\
x' == x0' + 0x100 * x1' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_2_helper1 (x0 x0' x1 x1':nat16) (x x':nat32) : Lemma
(requires
x == x0 + 0x10000 * x1 /\
x' == x0' + 0x10000 * x1' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_3_helper1 (x0 x0':nat24) (x1 x1':nat8) (x x':nat32) : Lemma
(requires
x == x0 + 0x1000000 * x1 /\
x' == x0' + 0x1000000 * x1' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_1_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t123 = t1 + 0x100 * t2 + 0x10000 * t3 in
let t123' = t1' + 0x100 * t2' + 0x10000 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_1_helper1 t0 t0' t123 t123' x x';
()
let nat32_xor_bytewise_2_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t01 = t0 + 0x100 * t1 in
let t23 = t2 + 0x100 * t3 in
let t01' = t0' + 0x100 * t1' in
let t23' = t2' + 0x100 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_2_helper1 t01 t01' t23 t23' x x';
()
let nat32_xor_bytewise_3_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t012 = t0 + 0x100 * t1 + 0x10000 * t2 in
let t012' = t0' + 0x100 * t1' + 0x10000 * t2' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_3_helper1 t012 t012' t3 t3' x x';
()
let nat32_xor_bytewise_1_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0
)
(ensures k * 0x1000000 % 0x100000000 == k' * 0x1000000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_2_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures k * 0x10000 % 0x100000000 == k' * 0x10000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_3_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures k * 0x100 % 0x100000000 == k' * 0x100 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_1 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_1_helper3 k k' s s';
lemma_ishl_32 k 24;
lemma_ishl_32 k' 24;
lemma_ishl_32 x 24;
lemma_ishl_32 x' 24;
lemma_ishl_ixor_32 k m 24;
lemma_ishl_ixor_32 k' m 24;
assert_norm (pow2 24 == pow2_24);
nat32_xor_bytewise_1_helper2 x x' t t';
()
let nat32_xor_bytewise_2 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_2_helper3 k k' s s';
lemma_ishl_32 k 16;
lemma_ishl_32 k' 16;
lemma_ishl_32 x 16;
lemma_ishl_32 x' 16;
lemma_ishl_ixor_32 k m 16;
lemma_ishl_ixor_32 k' m 16;
// assert (ishl #pow2_32 k 16 == k * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 k' 16 == k' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == x * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x' 16 == x' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == ixor (ishl k 16) (ishl m 16));
// assert (ishl #pow2_32 x' 16 == ixor (ishl k' 16) (ishl m 16));
// assert (x * 0x10000 % 0x100000000 == ixor (k * 0x10000 % 0x100000000) (ishl m 16));
// assert (x' * 0x10000 % 0x100000000 == ixor (k' * 0x10000 % 0x100000000) (ishl m 16));
nat32_xor_bytewise_2_helper2 x x' t t';
()
let nat32_xor_bytewise_3 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_3_helper3 k k' s s';
lemma_ishl_32 k 8;
lemma_ishl_32 k' 8;
lemma_ishl_32 x 8;
lemma_ishl_32 x' 8;
lemma_ishl_ixor_32 k m 8;
lemma_ishl_ixor_32 k' m 8;
nat32_xor_bytewise_3_helper2 x x' t t';
()
#reset-options "--z3rlimit 50 --smtencoding.nl_arith_repr boxwrap --smtencoding.l_arith_repr boxwrap"
let nat32_xor_bytewise_4 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s == s'
)
(ensures t == t')
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
()
#reset-options
let nat32_xor_bytewise (k k' m:nat32) (s s' t t':seq4 nat8) (n:nat) : Lemma
(requires
n <= 4 /\
k == four_to_nat 8 (seq_to_four_LE s) /\
k' == four_to_nat 8 (seq_to_four_LE s') /\
ixor k m == four_to_nat 8 (seq_to_four_LE t) /\
ixor k' m == four_to_nat 8 (seq_to_four_LE t') /\
equal (slice s 0 n) (slice s' 0 n)
)
// (ensures equal (slice t 0 n) (slice t' 0 n))
(ensures (forall (i:nat).{:pattern (index t i) \/ (index t' i)} i < n ==> index t i == index t' i))
=
assert (n > 0 ==> index (slice s 0 n) 0 == index (slice s' 0 n) 0);
assert (n > 1 ==> index (slice s 0 n) 1 == index (slice s' 0 n) 1);
assert (n > 2 ==> index (slice s 0 n) 2 == index (slice s' 0 n) 2);
assert (n > 3 ==> index (slice s 0 n) 3 == index (slice s' 0 n) 3);
let x = ixor k m in
let x' = ixor k' m in
if n = 1 then nat32_xor_bytewise_1 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 2 then nat32_xor_bytewise_2 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 3 then nat32_xor_bytewise_3 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 4 then nat32_xor_bytewise_4 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
assert (equal (slice t 0 n) (slice t' 0 n));
lemma_slice_orig_index t t' 0 n;
()
let quad32_xor_bytewise (q q' r:quad32) (n:nat{ n <= 16 }) : Lemma
(requires (let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
slice q_bytes 0 n == slice q'_bytes 0 n))
(ensures (let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
let qr_bytes = le_quad32_to_bytes (quad32_xor q r) in
let q'r_bytes = le_quad32_to_bytes (quad32_xor q' r) in
slice qr_bytes 0 n == slice q'r_bytes 0 n))
=
let s = le_quad32_to_bytes q in
let s' = le_quad32_to_bytes q' in
let t = le_quad32_to_bytes (quad32_xor q r) in
let t' = le_quad32_to_bytes (quad32_xor q' r) in
lemma_slices_le_quad32_to_bytes q;
lemma_slices_le_quad32_to_bytes q';
lemma_slices_le_quad32_to_bytes (quad32_xor q r);
lemma_slices_le_quad32_to_bytes (quad32_xor q' r);
lemma_slice_orig_index s s' 0 n;
quad32_xor_reveal ();
reverse_bytes_nat32_reveal ();
if n < 4 then nat32_xor_bytewise q.lo0 q'.lo0 r.lo0 (slice s 0 4) (slice s' 0 4) (slice t 0 4) (slice t' 0 4) n
else
(
nat32_xor_bytewise q.lo0 q'.lo0 r.lo0 (slice s 0 4) (slice s' 0 4) (slice t 0 4) (slice t' 0 4) 4;
if n < 8 then nat32_xor_bytewise q.lo1 q'.lo1 r.lo1 (slice s 4 8) (slice s' 4 8) (slice t 4 8) (slice t' 4 8) (n - 4)
else
(
nat32_xor_bytewise q.lo1 q'.lo1 r.lo1 (slice s 4 8) (slice s' 4 8) (slice t 4 8) (slice t' 4 8) 4;
if n < 12 then nat32_xor_bytewise q.hi2 q'.hi2 r.hi2 (slice s 8 12) (slice s' 8 12) (slice t 8 12) (slice t' 8 12) (n - 8)
else
(
nat32_xor_bytewise q.hi2 q'.hi2 r.hi2 (slice s 8 12) (slice s' 8 12) (slice t 8 12) (slice t' 8 12) 4;
nat32_xor_bytewise q.hi3 q'.hi3 r.hi3 (slice s 12 16) (slice s' 12 16) (slice t 12 16) (slice t' 12 16) (n - 12);
()
)
)
);
assert (equal (slice t 0 n) (slice t' 0 n));
()
let slice_pad_to_128_bits (s:seq nat8 { 0 < length s /\ length s < 16 }) :
Lemma(slice (pad_to_128_bits s) 0 (length s) == s)
=
assert (length s % 16 == length s);
assert (equal s (slice (pad_to_128_bits s) 0 (length s)));
()
let step2 (s:seq nat8 { 0 < length s /\ length s < 16 }) (q:quad32) (icb_BE:quad32) (alg:algorithm) (key:aes_key_LE alg) (i:int):
Lemma(let q_bytes = le_quad32_to_bytes q in
let q_bytes_prefix = slice q_bytes 0 (length s) in
let q_cipher = gctr_encrypt_block icb_BE q alg key i in
let q_cipher_bytes = slice (le_quad32_to_bytes q_cipher) 0 (length s) in
let s_quad = le_bytes_to_quad32 (pad_to_128_bits s) in
let s_cipher = gctr_encrypt_block icb_BE s_quad alg key i in
let s_cipher_bytes = slice (le_quad32_to_bytes s_cipher) 0 (length s) in
s == q_bytes_prefix ==> s_cipher_bytes == q_cipher_bytes)
=
let q_bytes = le_quad32_to_bytes q in
let q_bytes_prefix = slice q_bytes 0 (length s) in
let q_cipher = gctr_encrypt_block icb_BE q alg key i in
let q_cipher_bytes = slice (le_quad32_to_bytes q_cipher) 0 (length s) in
let s_quad = le_bytes_to_quad32 (pad_to_128_bits s) in
let s_cipher = gctr_encrypt_block icb_BE s_quad alg key i in
let s_cipher_bytes = slice (le_quad32_to_bytes s_cipher) 0 (length s) in
let enc_ctr = aes_encrypt_LE alg key (reverse_bytes_quad32 (inc32 icb_BE i)) in
let icb_LE = reverse_bytes_quad32 (inc32 icb_BE i) in
if s = q_bytes_prefix then (
// s_cipher_bytes = slice (le_quad32_to_bytes s_cipher) 0 (length s)
// = slice (le_quad32_to_bytes (gctr_encrypt_block icb_BE s_quad alg key i)) 0 (length s)
// = slice (le_quad32_to_bytes (gctr_encrypt_block icb_BE (le_bytes_to_quad32 (pad_to_128_bits s)) alg key i)) 0 (length s)
// q_cipher_bytes = gctr_encrypt_block icb_BE q alg key i
le_quad32_to_bytes_to_quad32 (pad_to_128_bits s);
slice_pad_to_128_bits s;
quad32_xor_bytewise q (le_bytes_to_quad32 (pad_to_128_bits s)) (aes_encrypt_LE alg key icb_LE) (length s);
//assert (equal s_cipher_bytes q_cipher_bytes);
()
) else
();
()
#reset-options "--z3rlimit 30"
open FStar.Seq.Properties
let gctr_partial_to_full_advanced (icb_BE:quad32) (plain:seq quad32) (cipher:seq quad32) (alg:algorithm) (key:seq nat32) (num_bytes:nat) =
gctr_encrypt_LE_reveal ();
let num_blocks = num_bytes / 16 in
let plain_bytes = slice (le_seq_quad32_to_bytes plain) 0 num_bytes in
let cipher_bytes = slice (le_seq_quad32_to_bytes cipher) 0 num_bytes in
step1 plain num_bytes;
let s = slice (le_seq_quad32_to_bytes plain) (num_blocks * 16) num_bytes in
let final_p = index plain num_blocks in
step2 s final_p icb_BE alg key num_blocks;
let num_extra = num_bytes % 16 in
let full_bytes_len = num_bytes - num_extra in
let full_blocks, final_block = split plain_bytes full_bytes_len in
assert (full_bytes_len % 16 == 0);
assert (length full_blocks == full_bytes_len);
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
assert (cipher_quads_LE == slice cipher 0 num_blocks); // LHS quads
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
assert (le_seq_quad32_to_bytes cipher_quads_LE == le_seq_quad32_to_bytes (slice cipher 0 num_blocks)); // LHS bytes
assert (length s == num_extra);
let q_prefix = slice (le_quad32_to_bytes final_p) 0 num_extra in
le_seq_quad32_to_bytes_tail_prefix plain num_bytes;
assert (q_prefix == s);
assert(final_cipher_bytes_LE == slice (le_quad32_to_bytes (index cipher num_blocks)) 0 num_extra); // RHS bytes
le_seq_quad32_to_bytes_tail_prefix cipher num_bytes;
assert (slice (le_quad32_to_bytes (index cipher num_blocks)) 0 num_extra ==
slice (le_seq_quad32_to_bytes cipher) (num_blocks * 16) num_bytes);
slice_commutes_le_seq_quad32_to_bytes0 cipher num_blocks;
assert (le_seq_quad32_to_bytes (slice cipher 0 num_blocks) == slice (le_seq_quad32_to_bytes cipher) 0 (num_blocks * 16));
assert (slice (slice (le_seq_quad32_to_bytes cipher) (num_blocks * 16) (length cipher * 16)) 0 num_extra ==
slice (le_seq_quad32_to_bytes cipher) (num_blocks * 16) num_bytes);
slice_append_adds (le_seq_quad32_to_bytes cipher) (num_blocks * 16) num_bytes;
assert (slice (le_seq_quad32_to_bytes cipher) 0 (num_blocks * 16) @|
slice (le_seq_quad32_to_bytes cipher) (num_blocks * 16) num_bytes ==
slice (le_seq_quad32_to_bytes cipher) 0 num_bytes);
assert (cipher_bytes == (le_seq_quad32_to_bytes (slice cipher 0 num_blocks)) @| slice (le_quad32_to_bytes (index cipher num_blocks)) 0 num_extra);
()
let gctr_encrypt_one_block (icb_BE plain:quad32) (alg:algorithm) (key:seq nat32) =
gctr_encrypt_LE_reveal ();
assert(inc32 icb_BE 0 == icb_BE);
let encrypted_icb = aes_encrypt_BE alg key icb_BE in
let p = le_quad32_to_bytes plain in
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let p_seq = create 1 plain in
assert (length p == 16);
le_bytes_to_seq_quad32_to_bytes_one_quad plain;
assert (p_seq == plain_quads_LE);
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (cipher_quads_LE == cons (gctr_encrypt_block icb_BE (head plain_quads_LE) alg key 0) (gctr_encrypt_recursive icb_BE (tail plain_quads_LE) alg key (1)));
assert (head plain_quads_LE == plain);
assert (gctr_encrypt_block icb_BE (head plain_quads_LE) alg key 0 ==
(let icb_LE = reverse_bytes_quad32 (inc32 icb_BE 0) in
quad32_xor (head plain_quads_LE) (aes_encrypt_LE alg key icb_LE)));
assert (quad32_xor plain (aes_encrypt_LE alg key (reverse_bytes_quad32 icb_BE))
==
(let icb_LE = reverse_bytes_quad32 (inc32 icb_BE 0) in
quad32_xor (head plain_quads_LE) (aes_encrypt_LE alg key icb_LE)));
assert (gctr_encrypt_block icb_BE (head plain_quads_LE) alg key 0 == quad32_xor plain (aes_encrypt_LE alg key (reverse_bytes_quad32 icb_BE)));
aes_encrypt_LE_reveal ();
assert (gctr_encrypt_block icb_BE (head plain_quads_LE) alg key 0 == quad32_xor plain (aes_encrypt_BE alg key icb_BE));
assert (gctr_encrypt_block icb_BE (head plain_quads_LE) alg key 0 == quad32_xor plain encrypted_icb);
assert(gctr_encrypt_recursive icb_BE (tail p_seq) alg key 1 == empty); // OBSERVE
//assert(gctr_encrypt_LE icb p alg key == cons (quad32_xor plain encrypted_icb) empty);
let x = quad32_xor plain encrypted_icb in
append_empty_r (create 1 x); // This is the missing piece
()
let lemma_length_simplifier (s bytes t:seq quad32) (num_bytes:nat) : Lemma
(requires t == (if num_bytes > (length s) * 16 then append s bytes else s) /\
(num_bytes <= (length s) * 16 ==> num_bytes == (length s * 16)) /\
length s * 16 <= num_bytes /\
num_bytes < length s * 16 + 16 /\
length bytes == 1
)
(ensures slice (le_seq_quad32_to_bytes t) 0 num_bytes ==
slice (le_seq_quad32_to_bytes (append s bytes)) 0 num_bytes) | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val lemma_length_simplifier (s bytes t: seq quad32) (num_bytes: nat)
: Lemma
(requires
t == (if num_bytes > (length s) * 16 then append s bytes else s) /\
(num_bytes <= (length s) * 16 ==> num_bytes == (length s * 16)) /\
length s * 16 <= num_bytes /\ num_bytes < length s * 16 + 16 /\ length bytes == 1)
(ensures
slice (le_seq_quad32_to_bytes t) 0 num_bytes ==
slice (le_seq_quad32_to_bytes (append s bytes)) 0 num_bytes) | [] | Vale.AES.GCTR.lemma_length_simplifier | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
s: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
bytes: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
t: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
num_bytes: Prims.nat
-> FStar.Pervasives.Lemma
(requires
t ==
(match num_bytes > FStar.Seq.Base.length s * 16 with
| true -> FStar.Seq.Base.append s bytes
| _ -> s) /\
(num_bytes <= FStar.Seq.Base.length s * 16 ==> num_bytes == FStar.Seq.Base.length s * 16) /\
FStar.Seq.Base.length s * 16 <= num_bytes /\ num_bytes < FStar.Seq.Base.length s * 16 + 16 /\
FStar.Seq.Base.length bytes == 1)
(ensures
FStar.Seq.Base.slice (Vale.Def.Types_s.le_seq_quad32_to_bytes t) 0 num_bytes ==
FStar.Seq.Base.slice (Vale.Def.Types_s.le_seq_quad32_to_bytes (FStar.Seq.Base.append s bytes
))
0
num_bytes) | {
"end_col": 3,
"end_line": 751,
"start_col": 2,
"start_line": 738
} |
FStar.Pervasives.Lemma | val gctr_bytes_helper (alg:algorithm) (key:seq nat32)
(p128 p_bytes c128 c_bytes:seq quad32)
(p_num_bytes:nat)
(iv_BE:quad32) : Lemma
(requires length p128 * 16 < pow2_32 /\
length p128 * 16 <= p_num_bytes /\
p_num_bytes < length p128 * 16 + 16 /\
length p128 == length c128 /\
length p_bytes == 1 /\
length c_bytes == 1 /\
is_aes_key_LE alg key /\
// Ensured by Gctr_core_opt
gctr_partial_def alg (length p128) p128 c128 key iv_BE /\
(p_num_bytes > length p128 * 16 ==>
index c_bytes 0 == gctr_encrypt_block (inc32 iv_BE (length p128)) (index p_bytes 0) alg key 0))
(ensures (let plain_raw_quads = append p128 p_bytes in
let plain_bytes = slice (le_seq_quad32_to_bytes plain_raw_quads) 0 p_num_bytes in
let cipher_raw_quads = append c128 c_bytes in
let cipher_bytes = slice (le_seq_quad32_to_bytes cipher_raw_quads) 0 p_num_bytes in
is_gctr_plain_LE plain_bytes /\
cipher_bytes == gctr_encrypt_LE iv_BE plain_bytes alg key)) | [
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let gctr_bytes_helper alg key p128 p_bytes c128 c_bytes p_num_bytes iv_BE =
let icb_BE_inc = inc32 iv_BE (length p128) in
assert (gctr_encrypt_block icb_BE_inc (index p_bytes 0) alg key 0 ==
gctr_encrypt_block iv_BE (index p_bytes 0) alg key (length p128));
//assert (gctr_partial_def alg 1 p_bytes c_bytes key icb_BE_inc);
gctr_partial_reveal ();
if p_num_bytes = length p128 * 16 then (
gctr_partial_completed alg p128 c128 key iv_BE;
gctr_partial_to_full_basic iv_BE p128 alg key c128;
assert (le_seq_quad32_to_bytes c128 == gctr_encrypt_LE iv_BE (le_seq_quad32_to_bytes p128) alg key);
assert (equal (slice (le_seq_quad32_to_bytes p128) 0 p_num_bytes) (le_seq_quad32_to_bytes p128));
assert (equal (slice (le_seq_quad32_to_bytes c128) 0 p_num_bytes) (le_seq_quad32_to_bytes c128));
()
) else (
aes_encrypt_LE_reveal ();
lemma_gctr_partial_append alg (length p128) 1 p128 c128 p_bytes c_bytes key iv_BE icb_BE_inc;
let plain = append p128 p_bytes in
let cipher = append c128 c_bytes in
let num_blocks = p_num_bytes / 16 in
//gctr_partial_completed alg plain cipher key iv_BE;
gctr_partial_completed alg p128 c128 key iv_BE;
assert (equal (slice plain 0 num_blocks) p128);
assert (equal (slice cipher 0 num_blocks) c128);
gctr_partial_to_full_advanced iv_BE (append p128 p_bytes) (append c128 c_bytes) alg key p_num_bytes
);
lemma_length_simplifier p128 p_bytes (if p_num_bytes > length p128 * 16 then append p128 p_bytes else p128) p_num_bytes;
lemma_length_simplifier c128 c_bytes (if p_num_bytes > length c128 * 16 then append c128 c_bytes else c128) p_num_bytes;
() | val gctr_bytes_helper (alg:algorithm) (key:seq nat32)
(p128 p_bytes c128 c_bytes:seq quad32)
(p_num_bytes:nat)
(iv_BE:quad32) : Lemma
(requires length p128 * 16 < pow2_32 /\
length p128 * 16 <= p_num_bytes /\
p_num_bytes < length p128 * 16 + 16 /\
length p128 == length c128 /\
length p_bytes == 1 /\
length c_bytes == 1 /\
is_aes_key_LE alg key /\
// Ensured by Gctr_core_opt
gctr_partial_def alg (length p128) p128 c128 key iv_BE /\
(p_num_bytes > length p128 * 16 ==>
index c_bytes 0 == gctr_encrypt_block (inc32 iv_BE (length p128)) (index p_bytes 0) alg key 0))
(ensures (let plain_raw_quads = append p128 p_bytes in
let plain_bytes = slice (le_seq_quad32_to_bytes plain_raw_quads) 0 p_num_bytes in
let cipher_raw_quads = append c128 c_bytes in
let cipher_bytes = slice (le_seq_quad32_to_bytes cipher_raw_quads) 0 p_num_bytes in
is_gctr_plain_LE plain_bytes /\
cipher_bytes == gctr_encrypt_LE iv_BE plain_bytes alg key))
let gctr_bytes_helper alg key p128 p_bytes c128 c_bytes p_num_bytes iv_BE = | false | null | true | let icb_BE_inc = inc32 iv_BE (length p128) in
assert (gctr_encrypt_block icb_BE_inc (index p_bytes 0) alg key 0 ==
gctr_encrypt_block iv_BE (index p_bytes 0) alg key (length p128));
gctr_partial_reveal ();
if p_num_bytes = length p128 * 16
then
(gctr_partial_completed alg p128 c128 key iv_BE;
gctr_partial_to_full_basic iv_BE p128 alg key c128;
assert (le_seq_quad32_to_bytes c128 ==
gctr_encrypt_LE iv_BE (le_seq_quad32_to_bytes p128) alg key);
assert (equal (slice (le_seq_quad32_to_bytes p128) 0 p_num_bytes) (le_seq_quad32_to_bytes p128));
assert (equal (slice (le_seq_quad32_to_bytes c128) 0 p_num_bytes) (le_seq_quad32_to_bytes c128));
())
else
(aes_encrypt_LE_reveal ();
lemma_gctr_partial_append alg (length p128) 1 p128 c128 p_bytes c_bytes key iv_BE icb_BE_inc;
let plain = append p128 p_bytes in
let cipher = append c128 c_bytes in
let num_blocks = p_num_bytes / 16 in
gctr_partial_completed alg p128 c128 key iv_BE;
assert (equal (slice plain 0 num_blocks) p128);
assert (equal (slice cipher 0 num_blocks) c128);
gctr_partial_to_full_advanced iv_BE
(append p128 p_bytes)
(append c128 c_bytes)
alg
key
p_num_bytes);
lemma_length_simplifier p128
p_bytes
(if p_num_bytes > length p128 * 16 then append p128 p_bytes else p128)
p_num_bytes;
lemma_length_simplifier c128
c_bytes
(if p_num_bytes > length c128 * 16 then append c128 c_bytes else c128)
p_num_bytes;
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.AES.AES_common_s.algorithm",
"FStar.Seq.Base.seq",
"Vale.Def.Types_s.nat32",
"Vale.Def.Types_s.quad32",
"Prims.nat",
"Prims.unit",
"Vale.AES.GCTR.lemma_length_simplifier",
"Prims.op_GreaterThan",
"FStar.Mul.op_Star",
"FStar.Seq.Base.length",
"FStar.Seq.Base.append",
"Prims.bool",
"Prims.op_Equality",
"Prims.int",
"Prims._assert",
"FStar.Seq.Base.equal",
"Vale.Def.Types_s.nat8",
"FStar.Seq.Base.slice",
"Vale.Def.Types_s.le_seq_quad32_to_bytes",
"Prims.eq2",
"Vale.AES.GCTR_s.gctr_encrypt_LE",
"Vale.AES.GCTR.gctr_partial_to_full_basic",
"Vale.AES.GCTR.gctr_partial_completed",
"Vale.AES.GCTR.gctr_partial_to_full_advanced",
"Prims.op_Division",
"Vale.AES.GCTR.lemma_gctr_partial_append",
"Vale.AES.AES_s.aes_encrypt_LE_reveal",
"Vale.AES.GCTR.gctr_partial_reveal",
"Vale.AES.GCTR_s.gctr_encrypt_block",
"FStar.Seq.Base.index",
"Vale.AES.GCTR_s.inc32"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb
let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
()
(*
Want to show that:
slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, out_b))) 0 num_bytes
==
gctr_encrypt_LE icb_BE (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) ...
We know that
slice (buffer128_as_seq(mem, out_b) 0 num_blocks
==
gctr_encrypt_recursive icb_BE (slice buffer128_as_seq(mem, in_b) 0 num_blocks) ...
And we know that:
get_mem out_b num_blocks
==
gctr_encrypt_block(icb_BE, (get_mem inb num_blocks), alg, key, num_blocks);
Internally gctr_encrypt_LE will compute:
full_blocks, final_block = split (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) (num_blocks * 16)
We'd like to show that
Step1: le_bytes_to_seq_quad32 full_blocks == slice buffer128_as_seq(mem, in_b) 0 num_blocks
and
Step2: final_block == slice (le_quad32_to_bytes (get_mem inb num_blocks)) 0 num_extra
Then we need to break down the byte-level effects of gctr_encrypt_block to show that even though the
padded version of final_block differs from (get_mem inb num_blocks), after we slice it at the end,
we end up with the same value
*)
let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
=
let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
()
#reset-options "--smtencoding.elim_box true --z3rlimit 30"
let lemma_slice_orig_index (#a:Type) (s s':seq a) (m n:nat) : Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures (forall (i:int).{:pattern (index s i) \/ (index s' i)} m <= i /\ i < n ==> index s i == index s' i))
=
let aux (i:nat{m <= i /\ i < n}) : Lemma (index s i == index s' i) =
lemma_index_slice s m n (i - m);
lemma_index_slice s' m n (i - m)
in Classical.forall_intro aux
let lemma_ishl_32 (x:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 x k == x * pow2 k % pow2_32)
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
FStar.UInt.shift_left_value_lemma #32 x k;
()
let lemma_ishl_ixor_32 (x y:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k))
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
Vale.Def.TypesNative_s.reveal_ishl 32 y k;
Vale.Def.TypesNative_s.reveal_ishl 32 (ixor x y) k;
Vale.Def.TypesNative_s.reveal_ixor 32 x y;
Vale.Def.TypesNative_s.reveal_ixor 32 (ishl x k) (ishl y k);
FStar.UInt.shift_left_logxor_lemma #32 x y k;
()
unfold let pow2_24 = 0x1000000
let nat24 = natN pow2_24
let nat32_xor_bytewise_1_helper1 (x0 x0':nat8) (x1 x1':nat24) (x x':nat32) : Lemma
(requires
x == x0 + 0x100 * x1 /\
x' == x0' + 0x100 * x1' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_2_helper1 (x0 x0' x1 x1':nat16) (x x':nat32) : Lemma
(requires
x == x0 + 0x10000 * x1 /\
x' == x0' + 0x10000 * x1' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_3_helper1 (x0 x0':nat24) (x1 x1':nat8) (x x':nat32) : Lemma
(requires
x == x0 + 0x1000000 * x1 /\
x' == x0' + 0x1000000 * x1' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_1_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t123 = t1 + 0x100 * t2 + 0x10000 * t3 in
let t123' = t1' + 0x100 * t2' + 0x10000 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_1_helper1 t0 t0' t123 t123' x x';
()
let nat32_xor_bytewise_2_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t01 = t0 + 0x100 * t1 in
let t23 = t2 + 0x100 * t3 in
let t01' = t0' + 0x100 * t1' in
let t23' = t2' + 0x100 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_2_helper1 t01 t01' t23 t23' x x';
()
let nat32_xor_bytewise_3_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t012 = t0 + 0x100 * t1 + 0x10000 * t2 in
let t012' = t0' + 0x100 * t1' + 0x10000 * t2' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_3_helper1 t012 t012' t3 t3' x x';
()
let nat32_xor_bytewise_1_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0
)
(ensures k * 0x1000000 % 0x100000000 == k' * 0x1000000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_2_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures k * 0x10000 % 0x100000000 == k' * 0x10000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_3_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures k * 0x100 % 0x100000000 == k' * 0x100 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_1 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_1_helper3 k k' s s';
lemma_ishl_32 k 24;
lemma_ishl_32 k' 24;
lemma_ishl_32 x 24;
lemma_ishl_32 x' 24;
lemma_ishl_ixor_32 k m 24;
lemma_ishl_ixor_32 k' m 24;
assert_norm (pow2 24 == pow2_24);
nat32_xor_bytewise_1_helper2 x x' t t';
()
let nat32_xor_bytewise_2 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_2_helper3 k k' s s';
lemma_ishl_32 k 16;
lemma_ishl_32 k' 16;
lemma_ishl_32 x 16;
lemma_ishl_32 x' 16;
lemma_ishl_ixor_32 k m 16;
lemma_ishl_ixor_32 k' m 16;
// assert (ishl #pow2_32 k 16 == k * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 k' 16 == k' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == x * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x' 16 == x' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == ixor (ishl k 16) (ishl m 16));
// assert (ishl #pow2_32 x' 16 == ixor (ishl k' 16) (ishl m 16));
// assert (x * 0x10000 % 0x100000000 == ixor (k * 0x10000 % 0x100000000) (ishl m 16));
// assert (x' * 0x10000 % 0x100000000 == ixor (k' * 0x10000 % 0x100000000) (ishl m 16));
nat32_xor_bytewise_2_helper2 x x' t t';
()
let nat32_xor_bytewise_3 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_3_helper3 k k' s s';
lemma_ishl_32 k 8;
lemma_ishl_32 k' 8;
lemma_ishl_32 x 8;
lemma_ishl_32 x' 8;
lemma_ishl_ixor_32 k m 8;
lemma_ishl_ixor_32 k' m 8;
nat32_xor_bytewise_3_helper2 x x' t t';
()
#reset-options "--z3rlimit 50 --smtencoding.nl_arith_repr boxwrap --smtencoding.l_arith_repr boxwrap"
let nat32_xor_bytewise_4 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s == s'
)
(ensures t == t')
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
()
#reset-options
let nat32_xor_bytewise (k k' m:nat32) (s s' t t':seq4 nat8) (n:nat) : Lemma
(requires
n <= 4 /\
k == four_to_nat 8 (seq_to_four_LE s) /\
k' == four_to_nat 8 (seq_to_four_LE s') /\
ixor k m == four_to_nat 8 (seq_to_four_LE t) /\
ixor k' m == four_to_nat 8 (seq_to_four_LE t') /\
equal (slice s 0 n) (slice s' 0 n)
)
// (ensures equal (slice t 0 n) (slice t' 0 n))
(ensures (forall (i:nat).{:pattern (index t i) \/ (index t' i)} i < n ==> index t i == index t' i))
=
assert (n > 0 ==> index (slice s 0 n) 0 == index (slice s' 0 n) 0);
assert (n > 1 ==> index (slice s 0 n) 1 == index (slice s' 0 n) 1);
assert (n > 2 ==> index (slice s 0 n) 2 == index (slice s' 0 n) 2);
assert (n > 3 ==> index (slice s 0 n) 3 == index (slice s' 0 n) 3);
let x = ixor k m in
let x' = ixor k' m in
if n = 1 then nat32_xor_bytewise_1 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 2 then nat32_xor_bytewise_2 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 3 then nat32_xor_bytewise_3 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 4 then nat32_xor_bytewise_4 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
assert (equal (slice t 0 n) (slice t' 0 n));
lemma_slice_orig_index t t' 0 n;
()
let quad32_xor_bytewise (q q' r:quad32) (n:nat{ n <= 16 }) : Lemma
(requires (let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
slice q_bytes 0 n == slice q'_bytes 0 n))
(ensures (let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
let qr_bytes = le_quad32_to_bytes (quad32_xor q r) in
let q'r_bytes = le_quad32_to_bytes (quad32_xor q' r) in
slice qr_bytes 0 n == slice q'r_bytes 0 n))
=
let s = le_quad32_to_bytes q in
let s' = le_quad32_to_bytes q' in
let t = le_quad32_to_bytes (quad32_xor q r) in
let t' = le_quad32_to_bytes (quad32_xor q' r) in
lemma_slices_le_quad32_to_bytes q;
lemma_slices_le_quad32_to_bytes q';
lemma_slices_le_quad32_to_bytes (quad32_xor q r);
lemma_slices_le_quad32_to_bytes (quad32_xor q' r);
lemma_slice_orig_index s s' 0 n;
quad32_xor_reveal ();
reverse_bytes_nat32_reveal ();
if n < 4 then nat32_xor_bytewise q.lo0 q'.lo0 r.lo0 (slice s 0 4) (slice s' 0 4) (slice t 0 4) (slice t' 0 4) n
else
(
nat32_xor_bytewise q.lo0 q'.lo0 r.lo0 (slice s 0 4) (slice s' 0 4) (slice t 0 4) (slice t' 0 4) 4;
if n < 8 then nat32_xor_bytewise q.lo1 q'.lo1 r.lo1 (slice s 4 8) (slice s' 4 8) (slice t 4 8) (slice t' 4 8) (n - 4)
else
(
nat32_xor_bytewise q.lo1 q'.lo1 r.lo1 (slice s 4 8) (slice s' 4 8) (slice t 4 8) (slice t' 4 8) 4;
if n < 12 then nat32_xor_bytewise q.hi2 q'.hi2 r.hi2 (slice s 8 12) (slice s' 8 12) (slice t 8 12) (slice t' 8 12) (n - 8)
else
(
nat32_xor_bytewise q.hi2 q'.hi2 r.hi2 (slice s 8 12) (slice s' 8 12) (slice t 8 12) (slice t' 8 12) 4;
nat32_xor_bytewise q.hi3 q'.hi3 r.hi3 (slice s 12 16) (slice s' 12 16) (slice t 12 16) (slice t' 12 16) (n - 12);
()
)
)
);
assert (equal (slice t 0 n) (slice t' 0 n));
()
let slice_pad_to_128_bits (s:seq nat8 { 0 < length s /\ length s < 16 }) :
Lemma(slice (pad_to_128_bits s) 0 (length s) == s)
=
assert (length s % 16 == length s);
assert (equal s (slice (pad_to_128_bits s) 0 (length s)));
()
let step2 (s:seq nat8 { 0 < length s /\ length s < 16 }) (q:quad32) (icb_BE:quad32) (alg:algorithm) (key:aes_key_LE alg) (i:int):
Lemma(let q_bytes = le_quad32_to_bytes q in
let q_bytes_prefix = slice q_bytes 0 (length s) in
let q_cipher = gctr_encrypt_block icb_BE q alg key i in
let q_cipher_bytes = slice (le_quad32_to_bytes q_cipher) 0 (length s) in
let s_quad = le_bytes_to_quad32 (pad_to_128_bits s) in
let s_cipher = gctr_encrypt_block icb_BE s_quad alg key i in
let s_cipher_bytes = slice (le_quad32_to_bytes s_cipher) 0 (length s) in
s == q_bytes_prefix ==> s_cipher_bytes == q_cipher_bytes)
=
let q_bytes = le_quad32_to_bytes q in
let q_bytes_prefix = slice q_bytes 0 (length s) in
let q_cipher = gctr_encrypt_block icb_BE q alg key i in
let q_cipher_bytes = slice (le_quad32_to_bytes q_cipher) 0 (length s) in
let s_quad = le_bytes_to_quad32 (pad_to_128_bits s) in
let s_cipher = gctr_encrypt_block icb_BE s_quad alg key i in
let s_cipher_bytes = slice (le_quad32_to_bytes s_cipher) 0 (length s) in
let enc_ctr = aes_encrypt_LE alg key (reverse_bytes_quad32 (inc32 icb_BE i)) in
let icb_LE = reverse_bytes_quad32 (inc32 icb_BE i) in
if s = q_bytes_prefix then (
// s_cipher_bytes = slice (le_quad32_to_bytes s_cipher) 0 (length s)
// = slice (le_quad32_to_bytes (gctr_encrypt_block icb_BE s_quad alg key i)) 0 (length s)
// = slice (le_quad32_to_bytes (gctr_encrypt_block icb_BE (le_bytes_to_quad32 (pad_to_128_bits s)) alg key i)) 0 (length s)
// q_cipher_bytes = gctr_encrypt_block icb_BE q alg key i
le_quad32_to_bytes_to_quad32 (pad_to_128_bits s);
slice_pad_to_128_bits s;
quad32_xor_bytewise q (le_bytes_to_quad32 (pad_to_128_bits s)) (aes_encrypt_LE alg key icb_LE) (length s);
//assert (equal s_cipher_bytes q_cipher_bytes);
()
) else
();
()
#reset-options "--z3rlimit 30"
open FStar.Seq.Properties
let gctr_partial_to_full_advanced (icb_BE:quad32) (plain:seq quad32) (cipher:seq quad32) (alg:algorithm) (key:seq nat32) (num_bytes:nat) =
gctr_encrypt_LE_reveal ();
let num_blocks = num_bytes / 16 in
let plain_bytes = slice (le_seq_quad32_to_bytes plain) 0 num_bytes in
let cipher_bytes = slice (le_seq_quad32_to_bytes cipher) 0 num_bytes in
step1 plain num_bytes;
let s = slice (le_seq_quad32_to_bytes plain) (num_blocks * 16) num_bytes in
let final_p = index plain num_blocks in
step2 s final_p icb_BE alg key num_blocks;
let num_extra = num_bytes % 16 in
let full_bytes_len = num_bytes - num_extra in
let full_blocks, final_block = split plain_bytes full_bytes_len in
assert (full_bytes_len % 16 == 0);
assert (length full_blocks == full_bytes_len);
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
assert (cipher_quads_LE == slice cipher 0 num_blocks); // LHS quads
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
assert (le_seq_quad32_to_bytes cipher_quads_LE == le_seq_quad32_to_bytes (slice cipher 0 num_blocks)); // LHS bytes
assert (length s == num_extra);
let q_prefix = slice (le_quad32_to_bytes final_p) 0 num_extra in
le_seq_quad32_to_bytes_tail_prefix plain num_bytes;
assert (q_prefix == s);
assert(final_cipher_bytes_LE == slice (le_quad32_to_bytes (index cipher num_blocks)) 0 num_extra); // RHS bytes
le_seq_quad32_to_bytes_tail_prefix cipher num_bytes;
assert (slice (le_quad32_to_bytes (index cipher num_blocks)) 0 num_extra ==
slice (le_seq_quad32_to_bytes cipher) (num_blocks * 16) num_bytes);
slice_commutes_le_seq_quad32_to_bytes0 cipher num_blocks;
assert (le_seq_quad32_to_bytes (slice cipher 0 num_blocks) == slice (le_seq_quad32_to_bytes cipher) 0 (num_blocks * 16));
assert (slice (slice (le_seq_quad32_to_bytes cipher) (num_blocks * 16) (length cipher * 16)) 0 num_extra ==
slice (le_seq_quad32_to_bytes cipher) (num_blocks * 16) num_bytes);
slice_append_adds (le_seq_quad32_to_bytes cipher) (num_blocks * 16) num_bytes;
assert (slice (le_seq_quad32_to_bytes cipher) 0 (num_blocks * 16) @|
slice (le_seq_quad32_to_bytes cipher) (num_blocks * 16) num_bytes ==
slice (le_seq_quad32_to_bytes cipher) 0 num_bytes);
assert (cipher_bytes == (le_seq_quad32_to_bytes (slice cipher 0 num_blocks)) @| slice (le_quad32_to_bytes (index cipher num_blocks)) 0 num_extra);
()
let gctr_encrypt_one_block (icb_BE plain:quad32) (alg:algorithm) (key:seq nat32) =
gctr_encrypt_LE_reveal ();
assert(inc32 icb_BE 0 == icb_BE);
let encrypted_icb = aes_encrypt_BE alg key icb_BE in
let p = le_quad32_to_bytes plain in
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let p_seq = create 1 plain in
assert (length p == 16);
le_bytes_to_seq_quad32_to_bytes_one_quad plain;
assert (p_seq == plain_quads_LE);
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (cipher_quads_LE == cons (gctr_encrypt_block icb_BE (head plain_quads_LE) alg key 0) (gctr_encrypt_recursive icb_BE (tail plain_quads_LE) alg key (1)));
assert (head plain_quads_LE == plain);
assert (gctr_encrypt_block icb_BE (head plain_quads_LE) alg key 0 ==
(let icb_LE = reverse_bytes_quad32 (inc32 icb_BE 0) in
quad32_xor (head plain_quads_LE) (aes_encrypt_LE alg key icb_LE)));
assert (quad32_xor plain (aes_encrypt_LE alg key (reverse_bytes_quad32 icb_BE))
==
(let icb_LE = reverse_bytes_quad32 (inc32 icb_BE 0) in
quad32_xor (head plain_quads_LE) (aes_encrypt_LE alg key icb_LE)));
assert (gctr_encrypt_block icb_BE (head plain_quads_LE) alg key 0 == quad32_xor plain (aes_encrypt_LE alg key (reverse_bytes_quad32 icb_BE)));
aes_encrypt_LE_reveal ();
assert (gctr_encrypt_block icb_BE (head plain_quads_LE) alg key 0 == quad32_xor plain (aes_encrypt_BE alg key icb_BE));
assert (gctr_encrypt_block icb_BE (head plain_quads_LE) alg key 0 == quad32_xor plain encrypted_icb);
assert(gctr_encrypt_recursive icb_BE (tail p_seq) alg key 1 == empty); // OBSERVE
//assert(gctr_encrypt_LE icb p alg key == cons (quad32_xor plain encrypted_icb) empty);
let x = quad32_xor plain encrypted_icb in
append_empty_r (create 1 x); // This is the missing piece
()
let lemma_length_simplifier (s bytes t:seq quad32) (num_bytes:nat) : Lemma
(requires t == (if num_bytes > (length s) * 16 then append s bytes else s) /\
(num_bytes <= (length s) * 16 ==> num_bytes == (length s * 16)) /\
length s * 16 <= num_bytes /\
num_bytes < length s * 16 + 16 /\
length bytes == 1
)
(ensures slice (le_seq_quad32_to_bytes t) 0 num_bytes ==
slice (le_seq_quad32_to_bytes (append s bytes)) 0 num_bytes)
=
if num_bytes > (length s) * 16 then (
()
) else (
calc (==) {
slice (le_seq_quad32_to_bytes (append s bytes)) 0 num_bytes;
== { append_distributes_le_seq_quad32_to_bytes s bytes }
slice (append (le_seq_quad32_to_bytes s) (le_seq_quad32_to_bytes bytes)) 0 num_bytes;
== { Vale.Lib.Seqs.lemma_slice_first_exactly_in_append (le_seq_quad32_to_bytes s) (le_seq_quad32_to_bytes bytes) }
le_seq_quad32_to_bytes s;
== { assert (length (le_seq_quad32_to_bytes s) == num_bytes) }
slice (le_seq_quad32_to_bytes s) 0 num_bytes;
};
()
) | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val gctr_bytes_helper (alg:algorithm) (key:seq nat32)
(p128 p_bytes c128 c_bytes:seq quad32)
(p_num_bytes:nat)
(iv_BE:quad32) : Lemma
(requires length p128 * 16 < pow2_32 /\
length p128 * 16 <= p_num_bytes /\
p_num_bytes < length p128 * 16 + 16 /\
length p128 == length c128 /\
length p_bytes == 1 /\
length c_bytes == 1 /\
is_aes_key_LE alg key /\
// Ensured by Gctr_core_opt
gctr_partial_def alg (length p128) p128 c128 key iv_BE /\
(p_num_bytes > length p128 * 16 ==>
index c_bytes 0 == gctr_encrypt_block (inc32 iv_BE (length p128)) (index p_bytes 0) alg key 0))
(ensures (let plain_raw_quads = append p128 p_bytes in
let plain_bytes = slice (le_seq_quad32_to_bytes plain_raw_quads) 0 p_num_bytes in
let cipher_raw_quads = append c128 c_bytes in
let cipher_bytes = slice (le_seq_quad32_to_bytes cipher_raw_quads) 0 p_num_bytes in
is_gctr_plain_LE plain_bytes /\
cipher_bytes == gctr_encrypt_LE iv_BE plain_bytes alg key)) | [] | Vale.AES.GCTR.gctr_bytes_helper | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
alg: Vale.AES.AES_common_s.algorithm ->
key: FStar.Seq.Base.seq Vale.Def.Types_s.nat32 ->
p128: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
p_bytes: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
c128: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
c_bytes: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
p_num_bytes: Prims.nat ->
iv_BE: Vale.Def.Types_s.quad32
-> FStar.Pervasives.Lemma
(requires
FStar.Seq.Base.length p128 * 16 < Vale.Def.Words_s.pow2_32 /\
FStar.Seq.Base.length p128 * 16 <= p_num_bytes /\
p_num_bytes < FStar.Seq.Base.length p128 * 16 + 16 /\
FStar.Seq.Base.length p128 == FStar.Seq.Base.length c128 /\
FStar.Seq.Base.length p_bytes == 1 /\ FStar.Seq.Base.length c_bytes == 1 /\
Vale.AES.AES_s.is_aes_key_LE alg key /\
Vale.AES.GCTR.gctr_partial_def alg (FStar.Seq.Base.length p128) p128 c128 key iv_BE /\
(p_num_bytes > FStar.Seq.Base.length p128 * 16 ==>
FStar.Seq.Base.index c_bytes 0 ==
Vale.AES.GCTR_s.gctr_encrypt_block (Vale.AES.GCTR_s.inc32 iv_BE
(FStar.Seq.Base.length p128))
(FStar.Seq.Base.index p_bytes 0)
alg
key
0))
(ensures
(let plain_raw_quads = FStar.Seq.Base.append p128 p_bytes in
let plain_bytes =
FStar.Seq.Base.slice (Vale.Def.Types_s.le_seq_quad32_to_bytes plain_raw_quads)
0
p_num_bytes
in
let cipher_raw_quads = FStar.Seq.Base.append c128 c_bytes in
let cipher_bytes =
FStar.Seq.Base.slice (Vale.Def.Types_s.le_seq_quad32_to_bytes cipher_raw_quads)
0
p_num_bytes
in
Vale.AES.GCTR_s.is_gctr_plain_LE plain_bytes /\
cipher_bytes == Vale.AES.GCTR_s.gctr_encrypt_LE iv_BE plain_bytes alg key)) | {
"end_col": 4,
"end_line": 781,
"start_col": 75,
"start_line": 753
} |
FStar.Pervasives.Lemma | val gctr_partial_opaque_ignores_postfix (alg:algorithm) (bound:nat32) (plain plain' cipher cipher':seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires is_aes_key_LE alg key /\
length plain >= bound /\
length cipher >= bound /\
length plain' >= bound /\
length cipher' >= bound /\
slice plain 0 bound == slice plain' 0 bound /\
slice cipher 0 bound == slice cipher' 0 bound)
(ensures gctr_partial alg bound plain cipher key icb <==> gctr_partial alg bound plain' cipher' key icb) | [
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
() | val gctr_partial_opaque_ignores_postfix (alg:algorithm) (bound:nat32) (plain plain' cipher cipher':seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires is_aes_key_LE alg key /\
length plain >= bound /\
length cipher >= bound /\
length plain' >= bound /\
length cipher' >= bound /\
slice plain 0 bound == slice plain' 0 bound /\
slice cipher 0 bound == slice cipher' 0 bound)
(ensures gctr_partial alg bound plain cipher key icb <==> gctr_partial alg bound plain' cipher' key icb)
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb = | false | null | true | gctr_partial_reveal ();
assert (forall i. 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i. 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i. 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i. 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.AES.AES_common_s.algorithm",
"Vale.Def.Types_s.nat32",
"FStar.Seq.Base.seq",
"Vale.Def.Types_s.quad32",
"Prims.unit",
"Prims._assert",
"Prims.l_Forall",
"Prims.int",
"Prims.l_and",
"Prims.b2t",
"Prims.op_GreaterThanOrEqual",
"Prims.op_LessThan",
"FStar.Seq.Base.length",
"FStar.Seq.Base.slice",
"Prims.l_imp",
"Prims.op_LessThanOrEqual",
"Prims.eq2",
"FStar.Seq.Base.index",
"Vale.AES.GCTR.gctr_partial_reveal"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
() | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val gctr_partial_opaque_ignores_postfix (alg:algorithm) (bound:nat32) (plain plain' cipher cipher':seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires is_aes_key_LE alg key /\
length plain >= bound /\
length cipher >= bound /\
length plain' >= bound /\
length cipher' >= bound /\
slice plain 0 bound == slice plain' 0 bound /\
slice cipher 0 bound == slice cipher' 0 bound)
(ensures gctr_partial alg bound plain cipher key icb <==> gctr_partial alg bound plain' cipher' key icb) | [] | Vale.AES.GCTR.gctr_partial_opaque_ignores_postfix | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
alg: Vale.AES.AES_common_s.algorithm ->
bound: Vale.Def.Types_s.nat32 ->
plain: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
plain': FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
cipher: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
cipher': FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
key: FStar.Seq.Base.seq Vale.Def.Types_s.nat32 ->
icb: Vale.Def.Types_s.quad32
-> FStar.Pervasives.Lemma
(requires
Vale.AES.AES_s.is_aes_key_LE alg key /\ FStar.Seq.Base.length plain >= bound /\
FStar.Seq.Base.length cipher >= bound /\ FStar.Seq.Base.length plain' >= bound /\
FStar.Seq.Base.length cipher' >= bound /\
FStar.Seq.Base.slice plain 0 bound == FStar.Seq.Base.slice plain' 0 bound /\
FStar.Seq.Base.slice cipher 0 bound == FStar.Seq.Base.slice cipher' 0 bound)
(ensures
Vale.AES.GCTR.gctr_partial alg bound plain cipher key icb <==>
Vale.AES.GCTR.gctr_partial alg bound plain' cipher' key icb) | {
"end_col": 4,
"end_line": 68,
"start_col": 2,
"start_line": 62
} |
FStar.Pervasives.Lemma | val gctr_indexed_helper
(icb: quad32)
(plain: gctr_plain_internal_LE)
(alg: algorithm)
(key: aes_key_LE alg)
(i: int)
: Lemma (requires True)
(ensures
(let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j. {:pattern index cipher j}
0 <= j /\ j < length plain ==>
index cipher j ==
quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j))))))
(decreases %[length plain]) | [
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper | val gctr_indexed_helper
(icb: quad32)
(plain: gctr_plain_internal_LE)
(alg: algorithm)
(key: aes_key_LE alg)
(i: int)
: Lemma (requires True)
(ensures
(let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j. {:pattern index cipher j}
0 <= j /\ j < length plain ==>
index cipher j ==
quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j))))))
(decreases %[length plain])
let rec gctr_indexed_helper
(icb: quad32)
(plain: gctr_plain_internal_LE)
(alg: algorithm)
(key: aes_key_LE alg)
(i: int)
: Lemma (requires True)
(ensures
(let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j. {:pattern index cipher j}
0 <= j /\ j < length plain ==>
index cipher j ==
quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j))))))
(decreases %[length plain]) = | false | null | true | if length plain = 0
then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i + 1) in
let helper (j: int)
: Lemma
((0 <= j /\ j < length plain) ==>
(index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)))))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain
then
(gctr_indexed_helper icb tl alg key (i + 1);
assert (index r_cipher (j - 1) ==
quad32_xor (index tl (j - 1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)))))
in
FStar.Classical.forall_intro helper | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma",
""
] | [
"Vale.Def.Types_s.quad32",
"Vale.AES.GCTR_s.gctr_plain_internal_LE",
"Vale.AES.AES_common_s.algorithm",
"Vale.AES.AES_s.aes_key_LE",
"Prims.int",
"Prims.op_Equality",
"FStar.Seq.Base.length",
"Prims.bool",
"FStar.Classical.forall_intro",
"Prims.l_imp",
"Prims.l_and",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Prims.op_LessThan",
"Prims.eq2",
"FStar.Seq.Base.index",
"Vale.Def.Types_s.quad32_xor",
"Vale.AES.GCTR.aes_encrypt_BE",
"Vale.AES.GCTR_s.inc32",
"Prims.op_Addition",
"Prims.unit",
"Prims.l_True",
"Prims.squash",
"Prims.Nil",
"FStar.Pervasives.pattern",
"Prims.op_AmpAmp",
"Prims._assert",
"Prims.op_Subtraction",
"Vale.AES.GCTR.gctr_indexed_helper",
"Vale.AES.AES_s.aes_encrypt_LE_reveal",
"FStar.Seq.Base.seq",
"Vale.AES.GCTR_s.gctr_encrypt_recursive",
"FStar.Seq.Properties.tail",
"Prims.nat",
"Prims.l_Forall",
"Prims.op_GreaterThanOrEqual"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain]) | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val gctr_indexed_helper
(icb: quad32)
(plain: gctr_plain_internal_LE)
(alg: algorithm)
(key: aes_key_LE alg)
(i: int)
: Lemma (requires True)
(ensures
(let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j. {:pattern index cipher j}
0 <= j /\ j < length plain ==>
index cipher j ==
quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j))))))
(decreases %[length plain]) | [
"recursion"
] | Vale.AES.GCTR.gctr_indexed_helper | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
icb: Vale.Def.Types_s.quad32 ->
plain: Vale.AES.GCTR_s.gctr_plain_internal_LE ->
alg: Vale.AES.AES_common_s.algorithm ->
key: Vale.AES.AES_s.aes_key_LE alg ->
i: Prims.int
-> FStar.Pervasives.Lemma
(ensures
(let cipher = Vale.AES.GCTR_s.gctr_encrypt_recursive icb plain alg key i in
FStar.Seq.Base.length cipher == FStar.Seq.Base.length plain /\
(forall (j:
i:
Prims.int
{ i >= 0 /\ i < FStar.Seq.Base.length plain /\
(i >= 0) /\ (i < FStar.Seq.Base.length cipher) }).
{:pattern FStar.Seq.Base.index cipher j}
0 <= j /\ j < FStar.Seq.Base.length plain ==>
FStar.Seq.Base.index cipher j ==
Vale.Def.Types_s.quad32_xor (FStar.Seq.Base.index plain j)
(Vale.AES.GCTR.aes_encrypt_BE alg key (Vale.AES.GCTR_s.inc32 icb (i + j))))))
(decreases FStar.Seq.Base.length plain) | {
"end_col": 41,
"end_line": 174,
"start_col": 2,
"start_line": 160
} |
FStar.Pervasives.Lemma | val nat32_xor_bytewise_1_helper3 (k k': nat32) (s s': four nat8)
: Lemma (requires k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ s.lo0 == s'.lo0)
(ensures k * 0x1000000 % 0x100000000 == k' * 0x1000000 % 0x100000000) | [
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let nat32_xor_bytewise_1_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0
)
(ensures k * 0x1000000 % 0x100000000 == k' * 0x1000000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
() | val nat32_xor_bytewise_1_helper3 (k k': nat32) (s s': four nat8)
: Lemma (requires k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ s.lo0 == s'.lo0)
(ensures k * 0x1000000 % 0x100000000 == k' * 0x1000000 % 0x100000000)
let nat32_xor_bytewise_1_helper3 (k k': nat32) (s s': four nat8)
: Lemma (requires k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ s.lo0 == s'.lo0)
(ensures k * 0x1000000 % 0x100000000 == k' * 0x1000000 % 0x100000000) = | false | null | true | let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s);
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.Def.Types_s.nat32",
"Vale.Def.Words_s.four",
"Vale.Def.Types_s.nat8",
"Prims.unit",
"FStar.Pervasives.assert_norm",
"Prims.eq2",
"Vale.Def.Words_s.natN",
"Prims.pow2",
"FStar.Mul.op_Star",
"Vale.Def.Words.Four_s.four_to_nat",
"Vale.Def.Words.Four_s.four_to_nat_unfold",
"Vale.Def.Words_s.nat8",
"Prims.l_and",
"Vale.Def.Words_s.pow2_32",
"Vale.Def.Words_s.__proj__Mkfour__item__lo0",
"Prims.squash",
"Prims.int",
"Prims.op_Modulus",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb
let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
()
(*
Want to show that:
slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, out_b))) 0 num_bytes
==
gctr_encrypt_LE icb_BE (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) ...
We know that
slice (buffer128_as_seq(mem, out_b) 0 num_blocks
==
gctr_encrypt_recursive icb_BE (slice buffer128_as_seq(mem, in_b) 0 num_blocks) ...
And we know that:
get_mem out_b num_blocks
==
gctr_encrypt_block(icb_BE, (get_mem inb num_blocks), alg, key, num_blocks);
Internally gctr_encrypt_LE will compute:
full_blocks, final_block = split (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) (num_blocks * 16)
We'd like to show that
Step1: le_bytes_to_seq_quad32 full_blocks == slice buffer128_as_seq(mem, in_b) 0 num_blocks
and
Step2: final_block == slice (le_quad32_to_bytes (get_mem inb num_blocks)) 0 num_extra
Then we need to break down the byte-level effects of gctr_encrypt_block to show that even though the
padded version of final_block differs from (get_mem inb num_blocks), after we slice it at the end,
we end up with the same value
*)
let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
=
let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
()
#reset-options "--smtencoding.elim_box true --z3rlimit 30"
let lemma_slice_orig_index (#a:Type) (s s':seq a) (m n:nat) : Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures (forall (i:int).{:pattern (index s i) \/ (index s' i)} m <= i /\ i < n ==> index s i == index s' i))
=
let aux (i:nat{m <= i /\ i < n}) : Lemma (index s i == index s' i) =
lemma_index_slice s m n (i - m);
lemma_index_slice s' m n (i - m)
in Classical.forall_intro aux
let lemma_ishl_32 (x:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 x k == x * pow2 k % pow2_32)
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
FStar.UInt.shift_left_value_lemma #32 x k;
()
let lemma_ishl_ixor_32 (x y:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k))
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
Vale.Def.TypesNative_s.reveal_ishl 32 y k;
Vale.Def.TypesNative_s.reveal_ishl 32 (ixor x y) k;
Vale.Def.TypesNative_s.reveal_ixor 32 x y;
Vale.Def.TypesNative_s.reveal_ixor 32 (ishl x k) (ishl y k);
FStar.UInt.shift_left_logxor_lemma #32 x y k;
()
unfold let pow2_24 = 0x1000000
let nat24 = natN pow2_24
let nat32_xor_bytewise_1_helper1 (x0 x0':nat8) (x1 x1':nat24) (x x':nat32) : Lemma
(requires
x == x0 + 0x100 * x1 /\
x' == x0' + 0x100 * x1' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_2_helper1 (x0 x0' x1 x1':nat16) (x x':nat32) : Lemma
(requires
x == x0 + 0x10000 * x1 /\
x' == x0' + 0x10000 * x1' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_3_helper1 (x0 x0':nat24) (x1 x1':nat8) (x x':nat32) : Lemma
(requires
x == x0 + 0x1000000 * x1 /\
x' == x0' + 0x1000000 * x1' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_1_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t123 = t1 + 0x100 * t2 + 0x10000 * t3 in
let t123' = t1' + 0x100 * t2' + 0x10000 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_1_helper1 t0 t0' t123 t123' x x';
()
let nat32_xor_bytewise_2_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t01 = t0 + 0x100 * t1 in
let t23 = t2 + 0x100 * t3 in
let t01' = t0' + 0x100 * t1' in
let t23' = t2' + 0x100 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_2_helper1 t01 t01' t23 t23' x x';
()
let nat32_xor_bytewise_3_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t012 = t0 + 0x100 * t1 + 0x10000 * t2 in
let t012' = t0' + 0x100 * t1' + 0x10000 * t2' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_3_helper1 t012 t012' t3 t3' x x';
()
let nat32_xor_bytewise_1_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0
) | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val nat32_xor_bytewise_1_helper3 (k k': nat32) (s s': four nat8)
: Lemma (requires k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ s.lo0 == s'.lo0)
(ensures k * 0x1000000 % 0x100000000 == k' * 0x1000000 % 0x100000000) | [] | Vale.AES.GCTR.nat32_xor_bytewise_1_helper3 | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
k: Vale.Def.Types_s.nat32 ->
k': Vale.Def.Types_s.nat32 ->
s: Vale.Def.Words_s.four Vale.Def.Types_s.nat8 ->
s': Vale.Def.Words_s.four Vale.Def.Types_s.nat8
-> FStar.Pervasives.Lemma
(requires
k == Vale.Def.Words.Four_s.four_to_nat 8 s /\ k' == Vale.Def.Words.Four_s.four_to_nat 8 s' /\
Mkfour?.lo0 s == Mkfour?.lo0 s')
(ensures k * 0x1000000 % 0x100000000 == k' * 0x1000000 % 0x100000000) | {
"end_col": 4,
"end_line": 394,
"start_col": 3,
"start_line": 389
} |
FStar.Pervasives.Lemma | val nat32_xor_bytewise (k k' m: nat32) (s s' t t': seq4 nat8) (n: nat)
: Lemma
(requires
n <= 4 /\ k == four_to_nat 8 (seq_to_four_LE s) /\ k' == four_to_nat 8 (seq_to_four_LE s') /\
ixor k m == four_to_nat 8 (seq_to_four_LE t) /\
ixor k' m == four_to_nat 8 (seq_to_four_LE t') /\ equal (slice s 0 n) (slice s' 0 n))
(ensures
(forall (i: nat). {:pattern (index t i)\/(index t' i)} i < n ==> index t i == index t' i)) | [
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let nat32_xor_bytewise (k k' m:nat32) (s s' t t':seq4 nat8) (n:nat) : Lemma
(requires
n <= 4 /\
k == four_to_nat 8 (seq_to_four_LE s) /\
k' == four_to_nat 8 (seq_to_four_LE s') /\
ixor k m == four_to_nat 8 (seq_to_four_LE t) /\
ixor k' m == four_to_nat 8 (seq_to_four_LE t') /\
equal (slice s 0 n) (slice s' 0 n)
)
// (ensures equal (slice t 0 n) (slice t' 0 n))
(ensures (forall (i:nat).{:pattern (index t i) \/ (index t' i)} i < n ==> index t i == index t' i))
=
assert (n > 0 ==> index (slice s 0 n) 0 == index (slice s' 0 n) 0);
assert (n > 1 ==> index (slice s 0 n) 1 == index (slice s' 0 n) 1);
assert (n > 2 ==> index (slice s 0 n) 2 == index (slice s' 0 n) 2);
assert (n > 3 ==> index (slice s 0 n) 3 == index (slice s' 0 n) 3);
let x = ixor k m in
let x' = ixor k' m in
if n = 1 then nat32_xor_bytewise_1 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 2 then nat32_xor_bytewise_2 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 3 then nat32_xor_bytewise_3 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 4 then nat32_xor_bytewise_4 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
assert (equal (slice t 0 n) (slice t' 0 n));
lemma_slice_orig_index t t' 0 n;
() | val nat32_xor_bytewise (k k' m: nat32) (s s' t t': seq4 nat8) (n: nat)
: Lemma
(requires
n <= 4 /\ k == four_to_nat 8 (seq_to_four_LE s) /\ k' == four_to_nat 8 (seq_to_four_LE s') /\
ixor k m == four_to_nat 8 (seq_to_four_LE t) /\
ixor k' m == four_to_nat 8 (seq_to_four_LE t') /\ equal (slice s 0 n) (slice s' 0 n))
(ensures
(forall (i: nat). {:pattern (index t i)\/(index t' i)} i < n ==> index t i == index t' i))
let nat32_xor_bytewise (k k' m: nat32) (s s' t t': seq4 nat8) (n: nat)
: Lemma
(requires
n <= 4 /\ k == four_to_nat 8 (seq_to_four_LE s) /\ k' == four_to_nat 8 (seq_to_four_LE s') /\
ixor k m == four_to_nat 8 (seq_to_four_LE t) /\
ixor k' m == four_to_nat 8 (seq_to_four_LE t') /\ equal (slice s 0 n) (slice s' 0 n))
(ensures
(forall (i: nat). {:pattern (index t i)\/(index t' i)} i < n ==> index t i == index t' i)) = | false | null | true | assert (n > 0 ==> index (slice s 0 n) 0 == index (slice s' 0 n) 0);
assert (n > 1 ==> index (slice s 0 n) 1 == index (slice s' 0 n) 1);
assert (n > 2 ==> index (slice s 0 n) 2 == index (slice s' 0 n) 2);
assert (n > 3 ==> index (slice s 0 n) 3 == index (slice s' 0 n) 3);
let x = ixor k m in
let x' = ixor k' m in
if n = 1
then
nat32_xor_bytewise_1 k
k'
x
x'
m
(seq_to_four_LE s)
(seq_to_four_LE s')
(seq_to_four_LE t)
(seq_to_four_LE t');
if n = 2
then
nat32_xor_bytewise_2 k
k'
x
x'
m
(seq_to_four_LE s)
(seq_to_four_LE s')
(seq_to_four_LE t)
(seq_to_four_LE t');
if n = 3
then
nat32_xor_bytewise_3 k
k'
x
x'
m
(seq_to_four_LE s)
(seq_to_four_LE s')
(seq_to_four_LE t)
(seq_to_four_LE t');
if n = 4
then
nat32_xor_bytewise_4 k
k'
x
x'
m
(seq_to_four_LE s)
(seq_to_four_LE s')
(seq_to_four_LE t)
(seq_to_four_LE t');
assert (equal (slice t 0 n) (slice t' 0 n));
lemma_slice_orig_index t t' 0 n;
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.Def.Types_s.nat32",
"Vale.Def.Words.Seq_s.seq4",
"Vale.Def.Types_s.nat8",
"Prims.nat",
"Prims.unit",
"Vale.AES.GCTR.lemma_slice_orig_index",
"Prims._assert",
"FStar.Seq.Base.equal",
"FStar.Seq.Base.slice",
"Prims.op_Equality",
"Prims.int",
"Vale.AES.GCTR.nat32_xor_bytewise_4",
"Vale.Def.Words.Seq_s.seq_to_four_LE",
"Prims.bool",
"Vale.AES.GCTR.nat32_xor_bytewise_3",
"Vale.AES.GCTR.nat32_xor_bytewise_2",
"Vale.AES.GCTR.nat32_xor_bytewise_1",
"Vale.Def.Words_s.natN",
"Vale.Def.Types_s.ixor",
"Vale.Def.Words_s.pow2_32",
"Prims.l_imp",
"Prims.b2t",
"Prims.op_GreaterThan",
"Prims.eq2",
"FStar.Seq.Base.index",
"Prims.l_and",
"Prims.op_LessThanOrEqual",
"Vale.Def.Words.Four_s.four_to_nat",
"Prims.pow2",
"FStar.Mul.op_Star",
"Prims.squash",
"Prims.l_Forall",
"Prims.op_LessThan",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb
let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
()
(*
Want to show that:
slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, out_b))) 0 num_bytes
==
gctr_encrypt_LE icb_BE (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) ...
We know that
slice (buffer128_as_seq(mem, out_b) 0 num_blocks
==
gctr_encrypt_recursive icb_BE (slice buffer128_as_seq(mem, in_b) 0 num_blocks) ...
And we know that:
get_mem out_b num_blocks
==
gctr_encrypt_block(icb_BE, (get_mem inb num_blocks), alg, key, num_blocks);
Internally gctr_encrypt_LE will compute:
full_blocks, final_block = split (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) (num_blocks * 16)
We'd like to show that
Step1: le_bytes_to_seq_quad32 full_blocks == slice buffer128_as_seq(mem, in_b) 0 num_blocks
and
Step2: final_block == slice (le_quad32_to_bytes (get_mem inb num_blocks)) 0 num_extra
Then we need to break down the byte-level effects of gctr_encrypt_block to show that even though the
padded version of final_block differs from (get_mem inb num_blocks), after we slice it at the end,
we end up with the same value
*)
let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
=
let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
()
#reset-options "--smtencoding.elim_box true --z3rlimit 30"
let lemma_slice_orig_index (#a:Type) (s s':seq a) (m n:nat) : Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures (forall (i:int).{:pattern (index s i) \/ (index s' i)} m <= i /\ i < n ==> index s i == index s' i))
=
let aux (i:nat{m <= i /\ i < n}) : Lemma (index s i == index s' i) =
lemma_index_slice s m n (i - m);
lemma_index_slice s' m n (i - m)
in Classical.forall_intro aux
let lemma_ishl_32 (x:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 x k == x * pow2 k % pow2_32)
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
FStar.UInt.shift_left_value_lemma #32 x k;
()
let lemma_ishl_ixor_32 (x y:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k))
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
Vale.Def.TypesNative_s.reveal_ishl 32 y k;
Vale.Def.TypesNative_s.reveal_ishl 32 (ixor x y) k;
Vale.Def.TypesNative_s.reveal_ixor 32 x y;
Vale.Def.TypesNative_s.reveal_ixor 32 (ishl x k) (ishl y k);
FStar.UInt.shift_left_logxor_lemma #32 x y k;
()
unfold let pow2_24 = 0x1000000
let nat24 = natN pow2_24
let nat32_xor_bytewise_1_helper1 (x0 x0':nat8) (x1 x1':nat24) (x x':nat32) : Lemma
(requires
x == x0 + 0x100 * x1 /\
x' == x0' + 0x100 * x1' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_2_helper1 (x0 x0' x1 x1':nat16) (x x':nat32) : Lemma
(requires
x == x0 + 0x10000 * x1 /\
x' == x0' + 0x10000 * x1' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_3_helper1 (x0 x0':nat24) (x1 x1':nat8) (x x':nat32) : Lemma
(requires
x == x0 + 0x1000000 * x1 /\
x' == x0' + 0x1000000 * x1' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_1_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t123 = t1 + 0x100 * t2 + 0x10000 * t3 in
let t123' = t1' + 0x100 * t2' + 0x10000 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_1_helper1 t0 t0' t123 t123' x x';
()
let nat32_xor_bytewise_2_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t01 = t0 + 0x100 * t1 in
let t23 = t2 + 0x100 * t3 in
let t01' = t0' + 0x100 * t1' in
let t23' = t2' + 0x100 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_2_helper1 t01 t01' t23 t23' x x';
()
let nat32_xor_bytewise_3_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t012 = t0 + 0x100 * t1 + 0x10000 * t2 in
let t012' = t0' + 0x100 * t1' + 0x10000 * t2' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_3_helper1 t012 t012' t3 t3' x x';
()
let nat32_xor_bytewise_1_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0
)
(ensures k * 0x1000000 % 0x100000000 == k' * 0x1000000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_2_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures k * 0x10000 % 0x100000000 == k' * 0x10000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_3_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures k * 0x100 % 0x100000000 == k' * 0x100 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_1 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_1_helper3 k k' s s';
lemma_ishl_32 k 24;
lemma_ishl_32 k' 24;
lemma_ishl_32 x 24;
lemma_ishl_32 x' 24;
lemma_ishl_ixor_32 k m 24;
lemma_ishl_ixor_32 k' m 24;
assert_norm (pow2 24 == pow2_24);
nat32_xor_bytewise_1_helper2 x x' t t';
()
let nat32_xor_bytewise_2 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_2_helper3 k k' s s';
lemma_ishl_32 k 16;
lemma_ishl_32 k' 16;
lemma_ishl_32 x 16;
lemma_ishl_32 x' 16;
lemma_ishl_ixor_32 k m 16;
lemma_ishl_ixor_32 k' m 16;
// assert (ishl #pow2_32 k 16 == k * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 k' 16 == k' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == x * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x' 16 == x' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == ixor (ishl k 16) (ishl m 16));
// assert (ishl #pow2_32 x' 16 == ixor (ishl k' 16) (ishl m 16));
// assert (x * 0x10000 % 0x100000000 == ixor (k * 0x10000 % 0x100000000) (ishl m 16));
// assert (x' * 0x10000 % 0x100000000 == ixor (k' * 0x10000 % 0x100000000) (ishl m 16));
nat32_xor_bytewise_2_helper2 x x' t t';
()
let nat32_xor_bytewise_3 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_3_helper3 k k' s s';
lemma_ishl_32 k 8;
lemma_ishl_32 k' 8;
lemma_ishl_32 x 8;
lemma_ishl_32 x' 8;
lemma_ishl_ixor_32 k m 8;
lemma_ishl_ixor_32 k' m 8;
nat32_xor_bytewise_3_helper2 x x' t t';
()
#reset-options "--z3rlimit 50 --smtencoding.nl_arith_repr boxwrap --smtencoding.l_arith_repr boxwrap"
let nat32_xor_bytewise_4 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s == s'
)
(ensures t == t')
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
()
#reset-options
let nat32_xor_bytewise (k k' m:nat32) (s s' t t':seq4 nat8) (n:nat) : Lemma
(requires
n <= 4 /\
k == four_to_nat 8 (seq_to_four_LE s) /\
k' == four_to_nat 8 (seq_to_four_LE s') /\
ixor k m == four_to_nat 8 (seq_to_four_LE t) /\
ixor k' m == four_to_nat 8 (seq_to_four_LE t') /\
equal (slice s 0 n) (slice s' 0 n)
)
// (ensures equal (slice t 0 n) (slice t' 0 n))
(ensures (forall (i:nat).{:pattern (index t i) \/ (index t' i)} i < n ==> index t i == index t' i)) | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val nat32_xor_bytewise (k k' m: nat32) (s s' t t': seq4 nat8) (n: nat)
: Lemma
(requires
n <= 4 /\ k == four_to_nat 8 (seq_to_four_LE s) /\ k' == four_to_nat 8 (seq_to_four_LE s') /\
ixor k m == four_to_nat 8 (seq_to_four_LE t) /\
ixor k' m == four_to_nat 8 (seq_to_four_LE t') /\ equal (slice s 0 n) (slice s' 0 n))
(ensures
(forall (i: nat). {:pattern (index t i)\/(index t' i)} i < n ==> index t i == index t' i)) | [] | Vale.AES.GCTR.nat32_xor_bytewise | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
k: Vale.Def.Types_s.nat32 ->
k': Vale.Def.Types_s.nat32 ->
m: Vale.Def.Types_s.nat32 ->
s: Vale.Def.Words.Seq_s.seq4 Vale.Def.Types_s.nat8 ->
s': Vale.Def.Words.Seq_s.seq4 Vale.Def.Types_s.nat8 ->
t: Vale.Def.Words.Seq_s.seq4 Vale.Def.Types_s.nat8 ->
t': Vale.Def.Words.Seq_s.seq4 Vale.Def.Types_s.nat8 ->
n: Prims.nat
-> FStar.Pervasives.Lemma
(requires
n <= 4 /\ k == Vale.Def.Words.Four_s.four_to_nat 8 (Vale.Def.Words.Seq_s.seq_to_four_LE s) /\
k' == Vale.Def.Words.Four_s.four_to_nat 8 (Vale.Def.Words.Seq_s.seq_to_four_LE s') /\
Vale.Def.Types_s.ixor k m ==
Vale.Def.Words.Four_s.four_to_nat 8 (Vale.Def.Words.Seq_s.seq_to_four_LE t) /\
Vale.Def.Types_s.ixor k' m ==
Vale.Def.Words.Four_s.four_to_nat 8 (Vale.Def.Words.Seq_s.seq_to_four_LE t') /\
FStar.Seq.Base.equal (FStar.Seq.Base.slice s 0 n) (FStar.Seq.Base.slice s' 0 n))
(ensures
forall (i: Prims.nat). {:pattern FStar.Seq.Base.index t i\/FStar.Seq.Base.index t' i}
i < n ==> FStar.Seq.Base.index t i == FStar.Seq.Base.index t' i) | {
"end_col": 4,
"end_line": 557,
"start_col": 2,
"start_line": 545
} |
FStar.Pervasives.Lemma | val gctr_partial_to_full_advanced (icb_BE:quad32) (plain:seq quad32) (cipher:seq quad32) (alg:algorithm) (key:seq nat32) (num_bytes:nat) : Lemma
(requires
is_aes_key_LE alg key /\
1 <= num_bytes /\
num_bytes < 16 * length plain /\
16 * (length plain - 1) < num_bytes /\
num_bytes % 16 <> 0 /\ num_bytes < pow2_32 /\
length plain == length cipher /\
( let num_blocks = num_bytes / 16 in
slice cipher 0 num_blocks == gctr_encrypt_recursive icb_BE (slice plain 0 num_blocks) alg key 0 /\
index cipher num_blocks == gctr_encrypt_block icb_BE (index plain num_blocks) alg key num_blocks)
)
(ensures (
let plain_bytes = slice (le_seq_quad32_to_bytes plain) 0 num_bytes in
let cipher_bytes = slice (le_seq_quad32_to_bytes cipher) 0 num_bytes in
cipher_bytes == gctr_encrypt_LE icb_BE plain_bytes alg key
)) | [
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let gctr_partial_to_full_advanced (icb_BE:quad32) (plain:seq quad32) (cipher:seq quad32) (alg:algorithm) (key:seq nat32) (num_bytes:nat) =
gctr_encrypt_LE_reveal ();
let num_blocks = num_bytes / 16 in
let plain_bytes = slice (le_seq_quad32_to_bytes plain) 0 num_bytes in
let cipher_bytes = slice (le_seq_quad32_to_bytes cipher) 0 num_bytes in
step1 plain num_bytes;
let s = slice (le_seq_quad32_to_bytes plain) (num_blocks * 16) num_bytes in
let final_p = index plain num_blocks in
step2 s final_p icb_BE alg key num_blocks;
let num_extra = num_bytes % 16 in
let full_bytes_len = num_bytes - num_extra in
let full_blocks, final_block = split plain_bytes full_bytes_len in
assert (full_bytes_len % 16 == 0);
assert (length full_blocks == full_bytes_len);
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
assert (cipher_quads_LE == slice cipher 0 num_blocks); // LHS quads
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
assert (le_seq_quad32_to_bytes cipher_quads_LE == le_seq_quad32_to_bytes (slice cipher 0 num_blocks)); // LHS bytes
assert (length s == num_extra);
let q_prefix = slice (le_quad32_to_bytes final_p) 0 num_extra in
le_seq_quad32_to_bytes_tail_prefix plain num_bytes;
assert (q_prefix == s);
assert(final_cipher_bytes_LE == slice (le_quad32_to_bytes (index cipher num_blocks)) 0 num_extra); // RHS bytes
le_seq_quad32_to_bytes_tail_prefix cipher num_bytes;
assert (slice (le_quad32_to_bytes (index cipher num_blocks)) 0 num_extra ==
slice (le_seq_quad32_to_bytes cipher) (num_blocks * 16) num_bytes);
slice_commutes_le_seq_quad32_to_bytes0 cipher num_blocks;
assert (le_seq_quad32_to_bytes (slice cipher 0 num_blocks) == slice (le_seq_quad32_to_bytes cipher) 0 (num_blocks * 16));
assert (slice (slice (le_seq_quad32_to_bytes cipher) (num_blocks * 16) (length cipher * 16)) 0 num_extra ==
slice (le_seq_quad32_to_bytes cipher) (num_blocks * 16) num_bytes);
slice_append_adds (le_seq_quad32_to_bytes cipher) (num_blocks * 16) num_bytes;
assert (slice (le_seq_quad32_to_bytes cipher) 0 (num_blocks * 16) @|
slice (le_seq_quad32_to_bytes cipher) (num_blocks * 16) num_bytes ==
slice (le_seq_quad32_to_bytes cipher) 0 num_bytes);
assert (cipher_bytes == (le_seq_quad32_to_bytes (slice cipher 0 num_blocks)) @| slice (le_quad32_to_bytes (index cipher num_blocks)) 0 num_extra);
() | val gctr_partial_to_full_advanced (icb_BE:quad32) (plain:seq quad32) (cipher:seq quad32) (alg:algorithm) (key:seq nat32) (num_bytes:nat) : Lemma
(requires
is_aes_key_LE alg key /\
1 <= num_bytes /\
num_bytes < 16 * length plain /\
16 * (length plain - 1) < num_bytes /\
num_bytes % 16 <> 0 /\ num_bytes < pow2_32 /\
length plain == length cipher /\
( let num_blocks = num_bytes / 16 in
slice cipher 0 num_blocks == gctr_encrypt_recursive icb_BE (slice plain 0 num_blocks) alg key 0 /\
index cipher num_blocks == gctr_encrypt_block icb_BE (index plain num_blocks) alg key num_blocks)
)
(ensures (
let plain_bytes = slice (le_seq_quad32_to_bytes plain) 0 num_bytes in
let cipher_bytes = slice (le_seq_quad32_to_bytes cipher) 0 num_bytes in
cipher_bytes == gctr_encrypt_LE icb_BE plain_bytes alg key
))
let gctr_partial_to_full_advanced
(icb_BE: quad32)
(plain cipher: seq quad32)
(alg: algorithm)
(key: seq nat32)
(num_bytes: nat)
= | false | null | true | gctr_encrypt_LE_reveal ();
let num_blocks = num_bytes / 16 in
let plain_bytes = slice (le_seq_quad32_to_bytes plain) 0 num_bytes in
let cipher_bytes = slice (le_seq_quad32_to_bytes cipher) 0 num_bytes in
step1 plain num_bytes;
let s = slice (le_seq_quad32_to_bytes plain) (num_blocks * 16) num_bytes in
let final_p = index plain num_blocks in
step2 s final_p icb_BE alg key num_blocks;
let num_extra = num_bytes % 16 in
let full_bytes_len = num_bytes - num_extra in
let full_blocks, final_block = split plain_bytes full_bytes_len in
assert (full_bytes_len % 16 == 0);
assert (length full_blocks == full_bytes_len);
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
assert (cipher_quads_LE == slice cipher 0 num_blocks);
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
assert (le_seq_quad32_to_bytes cipher_quads_LE == le_seq_quad32_to_bytes (slice cipher 0 num_blocks)
);
assert (length s == num_extra);
let q_prefix = slice (le_quad32_to_bytes final_p) 0 num_extra in
le_seq_quad32_to_bytes_tail_prefix plain num_bytes;
assert (q_prefix == s);
assert (final_cipher_bytes_LE == slice (le_quad32_to_bytes (index cipher num_blocks)) 0 num_extra);
le_seq_quad32_to_bytes_tail_prefix cipher num_bytes;
assert (slice (le_quad32_to_bytes (index cipher num_blocks)) 0 num_extra ==
slice (le_seq_quad32_to_bytes cipher) (num_blocks * 16) num_bytes);
slice_commutes_le_seq_quad32_to_bytes0 cipher num_blocks;
assert (le_seq_quad32_to_bytes (slice cipher 0 num_blocks) ==
slice (le_seq_quad32_to_bytes cipher) 0 (num_blocks * 16));
assert (slice (slice (le_seq_quad32_to_bytes cipher) (num_blocks * 16) (length cipher * 16))
0
num_extra ==
slice (le_seq_quad32_to_bytes cipher) (num_blocks * 16) num_bytes);
slice_append_adds (le_seq_quad32_to_bytes cipher) (num_blocks * 16) num_bytes;
assert (slice (le_seq_quad32_to_bytes cipher) 0 (num_blocks * 16) @|
slice (le_seq_quad32_to_bytes cipher) (num_blocks * 16) num_bytes ==
slice (le_seq_quad32_to_bytes cipher) 0 num_bytes);
assert (cipher_bytes ==
(le_seq_quad32_to_bytes (slice cipher 0 num_blocks)) @|
slice (le_quad32_to_bytes (index cipher num_blocks)) 0 num_extra);
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.Def.Types_s.quad32",
"FStar.Seq.Base.seq",
"Vale.AES.AES_common_s.algorithm",
"Vale.Def.Types_s.nat32",
"Prims.nat",
"Vale.Def.Types_s.nat8",
"Prims.unit",
"Prims._assert",
"Prims.eq2",
"FStar.Seq.Base.op_At_Bar",
"Vale.Def.Types_s.le_seq_quad32_to_bytes",
"FStar.Seq.Base.slice",
"Vale.Def.Types_s.le_quad32_to_bytes",
"FStar.Seq.Base.index",
"FStar.Mul.op_Star",
"Vale.Lib.Seqs.slice_append_adds",
"FStar.Seq.Base.length",
"Vale.Arch.Types.slice_commutes_le_seq_quad32_to_bytes0",
"Vale.AES.GCM_helpers.le_seq_quad32_to_bytes_tail_prefix",
"Vale.Def.Words_s.nat8",
"Prims.int",
"Vale.AES.GCTR_s.gctr_encrypt_block",
"Prims.op_Division",
"Vale.AES.GCTR_s.gctr_encrypt_recursive",
"Vale.Def.Types_s.le_bytes_to_quad32",
"Vale.AES.GCTR_s.pad_to_128_bits",
"Vale.Def.Types_s.le_bytes_to_seq_quad32",
"Prims.op_Modulus",
"FStar.Pervasives.Native.tuple2",
"FStar.Seq.Properties.split",
"Prims.op_Subtraction",
"Vale.AES.GCTR.step2",
"Vale.AES.GCTR.step1",
"Vale.AES.GCTR_s.gctr_encrypt_LE_reveal"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb
let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
()
(*
Want to show that:
slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, out_b))) 0 num_bytes
==
gctr_encrypt_LE icb_BE (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) ...
We know that
slice (buffer128_as_seq(mem, out_b) 0 num_blocks
==
gctr_encrypt_recursive icb_BE (slice buffer128_as_seq(mem, in_b) 0 num_blocks) ...
And we know that:
get_mem out_b num_blocks
==
gctr_encrypt_block(icb_BE, (get_mem inb num_blocks), alg, key, num_blocks);
Internally gctr_encrypt_LE will compute:
full_blocks, final_block = split (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) (num_blocks * 16)
We'd like to show that
Step1: le_bytes_to_seq_quad32 full_blocks == slice buffer128_as_seq(mem, in_b) 0 num_blocks
and
Step2: final_block == slice (le_quad32_to_bytes (get_mem inb num_blocks)) 0 num_extra
Then we need to break down the byte-level effects of gctr_encrypt_block to show that even though the
padded version of final_block differs from (get_mem inb num_blocks), after we slice it at the end,
we end up with the same value
*)
let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
=
let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
()
#reset-options "--smtencoding.elim_box true --z3rlimit 30"
let lemma_slice_orig_index (#a:Type) (s s':seq a) (m n:nat) : Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures (forall (i:int).{:pattern (index s i) \/ (index s' i)} m <= i /\ i < n ==> index s i == index s' i))
=
let aux (i:nat{m <= i /\ i < n}) : Lemma (index s i == index s' i) =
lemma_index_slice s m n (i - m);
lemma_index_slice s' m n (i - m)
in Classical.forall_intro aux
let lemma_ishl_32 (x:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 x k == x * pow2 k % pow2_32)
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
FStar.UInt.shift_left_value_lemma #32 x k;
()
let lemma_ishl_ixor_32 (x y:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k))
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
Vale.Def.TypesNative_s.reveal_ishl 32 y k;
Vale.Def.TypesNative_s.reveal_ishl 32 (ixor x y) k;
Vale.Def.TypesNative_s.reveal_ixor 32 x y;
Vale.Def.TypesNative_s.reveal_ixor 32 (ishl x k) (ishl y k);
FStar.UInt.shift_left_logxor_lemma #32 x y k;
()
unfold let pow2_24 = 0x1000000
let nat24 = natN pow2_24
let nat32_xor_bytewise_1_helper1 (x0 x0':nat8) (x1 x1':nat24) (x x':nat32) : Lemma
(requires
x == x0 + 0x100 * x1 /\
x' == x0' + 0x100 * x1' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_2_helper1 (x0 x0' x1 x1':nat16) (x x':nat32) : Lemma
(requires
x == x0 + 0x10000 * x1 /\
x' == x0' + 0x10000 * x1' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_3_helper1 (x0 x0':nat24) (x1 x1':nat8) (x x':nat32) : Lemma
(requires
x == x0 + 0x1000000 * x1 /\
x' == x0' + 0x1000000 * x1' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_1_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t123 = t1 + 0x100 * t2 + 0x10000 * t3 in
let t123' = t1' + 0x100 * t2' + 0x10000 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_1_helper1 t0 t0' t123 t123' x x';
()
let nat32_xor_bytewise_2_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t01 = t0 + 0x100 * t1 in
let t23 = t2 + 0x100 * t3 in
let t01' = t0' + 0x100 * t1' in
let t23' = t2' + 0x100 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_2_helper1 t01 t01' t23 t23' x x';
()
let nat32_xor_bytewise_3_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t012 = t0 + 0x100 * t1 + 0x10000 * t2 in
let t012' = t0' + 0x100 * t1' + 0x10000 * t2' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_3_helper1 t012 t012' t3 t3' x x';
()
let nat32_xor_bytewise_1_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0
)
(ensures k * 0x1000000 % 0x100000000 == k' * 0x1000000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_2_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures k * 0x10000 % 0x100000000 == k' * 0x10000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_3_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures k * 0x100 % 0x100000000 == k' * 0x100 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_1 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_1_helper3 k k' s s';
lemma_ishl_32 k 24;
lemma_ishl_32 k' 24;
lemma_ishl_32 x 24;
lemma_ishl_32 x' 24;
lemma_ishl_ixor_32 k m 24;
lemma_ishl_ixor_32 k' m 24;
assert_norm (pow2 24 == pow2_24);
nat32_xor_bytewise_1_helper2 x x' t t';
()
let nat32_xor_bytewise_2 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_2_helper3 k k' s s';
lemma_ishl_32 k 16;
lemma_ishl_32 k' 16;
lemma_ishl_32 x 16;
lemma_ishl_32 x' 16;
lemma_ishl_ixor_32 k m 16;
lemma_ishl_ixor_32 k' m 16;
// assert (ishl #pow2_32 k 16 == k * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 k' 16 == k' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == x * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x' 16 == x' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == ixor (ishl k 16) (ishl m 16));
// assert (ishl #pow2_32 x' 16 == ixor (ishl k' 16) (ishl m 16));
// assert (x * 0x10000 % 0x100000000 == ixor (k * 0x10000 % 0x100000000) (ishl m 16));
// assert (x' * 0x10000 % 0x100000000 == ixor (k' * 0x10000 % 0x100000000) (ishl m 16));
nat32_xor_bytewise_2_helper2 x x' t t';
()
let nat32_xor_bytewise_3 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_3_helper3 k k' s s';
lemma_ishl_32 k 8;
lemma_ishl_32 k' 8;
lemma_ishl_32 x 8;
lemma_ishl_32 x' 8;
lemma_ishl_ixor_32 k m 8;
lemma_ishl_ixor_32 k' m 8;
nat32_xor_bytewise_3_helper2 x x' t t';
()
#reset-options "--z3rlimit 50 --smtencoding.nl_arith_repr boxwrap --smtencoding.l_arith_repr boxwrap"
let nat32_xor_bytewise_4 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s == s'
)
(ensures t == t')
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
()
#reset-options
let nat32_xor_bytewise (k k' m:nat32) (s s' t t':seq4 nat8) (n:nat) : Lemma
(requires
n <= 4 /\
k == four_to_nat 8 (seq_to_four_LE s) /\
k' == four_to_nat 8 (seq_to_four_LE s') /\
ixor k m == four_to_nat 8 (seq_to_four_LE t) /\
ixor k' m == four_to_nat 8 (seq_to_four_LE t') /\
equal (slice s 0 n) (slice s' 0 n)
)
// (ensures equal (slice t 0 n) (slice t' 0 n))
(ensures (forall (i:nat).{:pattern (index t i) \/ (index t' i)} i < n ==> index t i == index t' i))
=
assert (n > 0 ==> index (slice s 0 n) 0 == index (slice s' 0 n) 0);
assert (n > 1 ==> index (slice s 0 n) 1 == index (slice s' 0 n) 1);
assert (n > 2 ==> index (slice s 0 n) 2 == index (slice s' 0 n) 2);
assert (n > 3 ==> index (slice s 0 n) 3 == index (slice s' 0 n) 3);
let x = ixor k m in
let x' = ixor k' m in
if n = 1 then nat32_xor_bytewise_1 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 2 then nat32_xor_bytewise_2 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 3 then nat32_xor_bytewise_3 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 4 then nat32_xor_bytewise_4 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
assert (equal (slice t 0 n) (slice t' 0 n));
lemma_slice_orig_index t t' 0 n;
()
let quad32_xor_bytewise (q q' r:quad32) (n:nat{ n <= 16 }) : Lemma
(requires (let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
slice q_bytes 0 n == slice q'_bytes 0 n))
(ensures (let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
let qr_bytes = le_quad32_to_bytes (quad32_xor q r) in
let q'r_bytes = le_quad32_to_bytes (quad32_xor q' r) in
slice qr_bytes 0 n == slice q'r_bytes 0 n))
=
let s = le_quad32_to_bytes q in
let s' = le_quad32_to_bytes q' in
let t = le_quad32_to_bytes (quad32_xor q r) in
let t' = le_quad32_to_bytes (quad32_xor q' r) in
lemma_slices_le_quad32_to_bytes q;
lemma_slices_le_quad32_to_bytes q';
lemma_slices_le_quad32_to_bytes (quad32_xor q r);
lemma_slices_le_quad32_to_bytes (quad32_xor q' r);
lemma_slice_orig_index s s' 0 n;
quad32_xor_reveal ();
reverse_bytes_nat32_reveal ();
if n < 4 then nat32_xor_bytewise q.lo0 q'.lo0 r.lo0 (slice s 0 4) (slice s' 0 4) (slice t 0 4) (slice t' 0 4) n
else
(
nat32_xor_bytewise q.lo0 q'.lo0 r.lo0 (slice s 0 4) (slice s' 0 4) (slice t 0 4) (slice t' 0 4) 4;
if n < 8 then nat32_xor_bytewise q.lo1 q'.lo1 r.lo1 (slice s 4 8) (slice s' 4 8) (slice t 4 8) (slice t' 4 8) (n - 4)
else
(
nat32_xor_bytewise q.lo1 q'.lo1 r.lo1 (slice s 4 8) (slice s' 4 8) (slice t 4 8) (slice t' 4 8) 4;
if n < 12 then nat32_xor_bytewise q.hi2 q'.hi2 r.hi2 (slice s 8 12) (slice s' 8 12) (slice t 8 12) (slice t' 8 12) (n - 8)
else
(
nat32_xor_bytewise q.hi2 q'.hi2 r.hi2 (slice s 8 12) (slice s' 8 12) (slice t 8 12) (slice t' 8 12) 4;
nat32_xor_bytewise q.hi3 q'.hi3 r.hi3 (slice s 12 16) (slice s' 12 16) (slice t 12 16) (slice t' 12 16) (n - 12);
()
)
)
);
assert (equal (slice t 0 n) (slice t' 0 n));
()
let slice_pad_to_128_bits (s:seq nat8 { 0 < length s /\ length s < 16 }) :
Lemma(slice (pad_to_128_bits s) 0 (length s) == s)
=
assert (length s % 16 == length s);
assert (equal s (slice (pad_to_128_bits s) 0 (length s)));
()
let step2 (s:seq nat8 { 0 < length s /\ length s < 16 }) (q:quad32) (icb_BE:quad32) (alg:algorithm) (key:aes_key_LE alg) (i:int):
Lemma(let q_bytes = le_quad32_to_bytes q in
let q_bytes_prefix = slice q_bytes 0 (length s) in
let q_cipher = gctr_encrypt_block icb_BE q alg key i in
let q_cipher_bytes = slice (le_quad32_to_bytes q_cipher) 0 (length s) in
let s_quad = le_bytes_to_quad32 (pad_to_128_bits s) in
let s_cipher = gctr_encrypt_block icb_BE s_quad alg key i in
let s_cipher_bytes = slice (le_quad32_to_bytes s_cipher) 0 (length s) in
s == q_bytes_prefix ==> s_cipher_bytes == q_cipher_bytes)
=
let q_bytes = le_quad32_to_bytes q in
let q_bytes_prefix = slice q_bytes 0 (length s) in
let q_cipher = gctr_encrypt_block icb_BE q alg key i in
let q_cipher_bytes = slice (le_quad32_to_bytes q_cipher) 0 (length s) in
let s_quad = le_bytes_to_quad32 (pad_to_128_bits s) in
let s_cipher = gctr_encrypt_block icb_BE s_quad alg key i in
let s_cipher_bytes = slice (le_quad32_to_bytes s_cipher) 0 (length s) in
let enc_ctr = aes_encrypt_LE alg key (reverse_bytes_quad32 (inc32 icb_BE i)) in
let icb_LE = reverse_bytes_quad32 (inc32 icb_BE i) in
if s = q_bytes_prefix then (
// s_cipher_bytes = slice (le_quad32_to_bytes s_cipher) 0 (length s)
// = slice (le_quad32_to_bytes (gctr_encrypt_block icb_BE s_quad alg key i)) 0 (length s)
// = slice (le_quad32_to_bytes (gctr_encrypt_block icb_BE (le_bytes_to_quad32 (pad_to_128_bits s)) alg key i)) 0 (length s)
// q_cipher_bytes = gctr_encrypt_block icb_BE q alg key i
le_quad32_to_bytes_to_quad32 (pad_to_128_bits s);
slice_pad_to_128_bits s;
quad32_xor_bytewise q (le_bytes_to_quad32 (pad_to_128_bits s)) (aes_encrypt_LE alg key icb_LE) (length s);
//assert (equal s_cipher_bytes q_cipher_bytes);
()
) else
();
()
#reset-options "--z3rlimit 30"
open FStar.Seq.Properties | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 30,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val gctr_partial_to_full_advanced (icb_BE:quad32) (plain:seq quad32) (cipher:seq quad32) (alg:algorithm) (key:seq nat32) (num_bytes:nat) : Lemma
(requires
is_aes_key_LE alg key /\
1 <= num_bytes /\
num_bytes < 16 * length plain /\
16 * (length plain - 1) < num_bytes /\
num_bytes % 16 <> 0 /\ num_bytes < pow2_32 /\
length plain == length cipher /\
( let num_blocks = num_bytes / 16 in
slice cipher 0 num_blocks == gctr_encrypt_recursive icb_BE (slice plain 0 num_blocks) alg key 0 /\
index cipher num_blocks == gctr_encrypt_block icb_BE (index plain num_blocks) alg key num_blocks)
)
(ensures (
let plain_bytes = slice (le_seq_quad32_to_bytes plain) 0 num_bytes in
let cipher_bytes = slice (le_seq_quad32_to_bytes cipher) 0 num_bytes in
cipher_bytes == gctr_encrypt_LE icb_BE plain_bytes alg key
)) | [] | Vale.AES.GCTR.gctr_partial_to_full_advanced | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
icb_BE: Vale.Def.Types_s.quad32 ->
plain: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
cipher: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
alg: Vale.AES.AES_common_s.algorithm ->
key: FStar.Seq.Base.seq Vale.Def.Types_s.nat32 ->
num_bytes: Prims.nat
-> FStar.Pervasives.Lemma
(requires
Vale.AES.AES_s.is_aes_key_LE alg key /\ 1 <= num_bytes /\
num_bytes < 16 * FStar.Seq.Base.length plain /\
16 * (FStar.Seq.Base.length plain - 1) < num_bytes /\ num_bytes % 16 <> 0 /\
num_bytes < Vale.Def.Words_s.pow2_32 /\
FStar.Seq.Base.length plain == FStar.Seq.Base.length cipher /\
(let num_blocks = num_bytes / 16 in
FStar.Seq.Base.slice cipher 0 num_blocks ==
Vale.AES.GCTR_s.gctr_encrypt_recursive icb_BE
(FStar.Seq.Base.slice plain 0 num_blocks)
alg
key
0 /\
FStar.Seq.Base.index cipher num_blocks ==
Vale.AES.GCTR_s.gctr_encrypt_block icb_BE
(FStar.Seq.Base.index plain num_blocks)
alg
key
num_blocks))
(ensures
(let plain_bytes =
FStar.Seq.Base.slice (Vale.Def.Types_s.le_seq_quad32_to_bytes plain) 0 num_bytes
in
let cipher_bytes =
FStar.Seq.Base.slice (Vale.Def.Types_s.le_seq_quad32_to_bytes cipher) 0 num_bytes
in
cipher_bytes == Vale.AES.GCTR_s.gctr_encrypt_LE icb_BE plain_bytes alg key)) | {
"end_col": 4,
"end_line": 692,
"start_col": 2,
"start_line": 646
} |
FStar.Pervasives.Lemma | val gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires length plain >= bound + 6 /\
length cipher >= bound + 6 /\
is_aes_key_LE alg key /\
bound + 6 < pow2_32 /\
gctr_partial alg bound plain cipher key icb /\
index cipher (bound + 0) == quad32_xor (index plain (bound + 0)) (aes_encrypt_BE alg key (inc32lite icb (bound + 0))) /\
index cipher (bound + 1) == quad32_xor (index plain (bound + 1)) (aes_encrypt_BE alg key (inc32lite icb (bound + 1))) /\
index cipher (bound + 2) == quad32_xor (index plain (bound + 2)) (aes_encrypt_BE alg key (inc32lite icb (bound + 2))) /\
index cipher (bound + 3) == quad32_xor (index plain (bound + 3)) (aes_encrypt_BE alg key (inc32lite icb (bound + 3))) /\
index cipher (bound + 4) == quad32_xor (index plain (bound + 4)) (aes_encrypt_BE alg key (inc32lite icb (bound + 4))) /\
index cipher (bound + 5) == quad32_xor (index plain (bound + 5)) (aes_encrypt_BE alg key (inc32lite icb (bound + 5)))
)
(ensures gctr_partial alg (bound + 6) plain cipher key icb) | [
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
() | val gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires length plain >= bound + 6 /\
length cipher >= bound + 6 /\
is_aes_key_LE alg key /\
bound + 6 < pow2_32 /\
gctr_partial alg bound plain cipher key icb /\
index cipher (bound + 0) == quad32_xor (index plain (bound + 0)) (aes_encrypt_BE alg key (inc32lite icb (bound + 0))) /\
index cipher (bound + 1) == quad32_xor (index plain (bound + 1)) (aes_encrypt_BE alg key (inc32lite icb (bound + 1))) /\
index cipher (bound + 2) == quad32_xor (index plain (bound + 2)) (aes_encrypt_BE alg key (inc32lite icb (bound + 2))) /\
index cipher (bound + 3) == quad32_xor (index plain (bound + 3)) (aes_encrypt_BE alg key (inc32lite icb (bound + 3))) /\
index cipher (bound + 4) == quad32_xor (index plain (bound + 4)) (aes_encrypt_BE alg key (inc32lite icb (bound + 4))) /\
index cipher (bound + 5) == quad32_xor (index plain (bound + 5)) (aes_encrypt_BE alg key (inc32lite icb (bound + 5)))
)
(ensures gctr_partial alg (bound + 6) plain cipher key icb)
let gctr_partial_extend6
(alg: algorithm)
(bound: nat)
(plain cipher: seq quad32)
(key: seq nat32)
(icb: quad32)
= | false | null | true | gctr_partial_reveal ();
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.AES.AES_common_s.algorithm",
"Prims.nat",
"FStar.Seq.Base.seq",
"Vale.Def.Types_s.quad32",
"Vale.Def.Types_s.nat32",
"Prims.unit",
"Vale.AES.GCTR.gctr_partial_reveal"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 0,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires length plain >= bound + 6 /\
length cipher >= bound + 6 /\
is_aes_key_LE alg key /\
bound + 6 < pow2_32 /\
gctr_partial alg bound plain cipher key icb /\
index cipher (bound + 0) == quad32_xor (index plain (bound + 0)) (aes_encrypt_BE alg key (inc32lite icb (bound + 0))) /\
index cipher (bound + 1) == quad32_xor (index plain (bound + 1)) (aes_encrypt_BE alg key (inc32lite icb (bound + 1))) /\
index cipher (bound + 2) == quad32_xor (index plain (bound + 2)) (aes_encrypt_BE alg key (inc32lite icb (bound + 2))) /\
index cipher (bound + 3) == quad32_xor (index plain (bound + 3)) (aes_encrypt_BE alg key (inc32lite icb (bound + 3))) /\
index cipher (bound + 4) == quad32_xor (index plain (bound + 4)) (aes_encrypt_BE alg key (inc32lite icb (bound + 4))) /\
index cipher (bound + 5) == quad32_xor (index plain (bound + 5)) (aes_encrypt_BE alg key (inc32lite icb (bound + 5)))
)
(ensures gctr_partial alg (bound + 6) plain cipher key icb) | [] | Vale.AES.GCTR.gctr_partial_extend6 | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
alg: Vale.AES.AES_common_s.algorithm ->
bound: Prims.nat ->
plain: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
cipher: FStar.Seq.Base.seq Vale.Def.Types_s.quad32 ->
key: FStar.Seq.Base.seq Vale.Def.Types_s.nat32 ->
icb: Vale.Def.Types_s.quad32
-> FStar.Pervasives.Lemma
(requires
FStar.Seq.Base.length plain >= bound + 6 /\ FStar.Seq.Base.length cipher >= bound + 6 /\
Vale.AES.AES_s.is_aes_key_LE alg key /\ bound + 6 < Vale.Def.Words_s.pow2_32 /\
Vale.AES.GCTR.gctr_partial alg bound plain cipher key icb /\
FStar.Seq.Base.index cipher (bound + 0) ==
Vale.Def.Types_s.quad32_xor (FStar.Seq.Base.index plain (bound + 0))
(Vale.AES.GCTR.aes_encrypt_BE alg key (Vale.AES.GCTR.inc32lite icb (bound + 0))) /\
FStar.Seq.Base.index cipher (bound + 1) ==
Vale.Def.Types_s.quad32_xor (FStar.Seq.Base.index plain (bound + 1))
(Vale.AES.GCTR.aes_encrypt_BE alg key (Vale.AES.GCTR.inc32lite icb (bound + 1))) /\
FStar.Seq.Base.index cipher (bound + 2) ==
Vale.Def.Types_s.quad32_xor (FStar.Seq.Base.index plain (bound + 2))
(Vale.AES.GCTR.aes_encrypt_BE alg key (Vale.AES.GCTR.inc32lite icb (bound + 2))) /\
FStar.Seq.Base.index cipher (bound + 3) ==
Vale.Def.Types_s.quad32_xor (FStar.Seq.Base.index plain (bound + 3))
(Vale.AES.GCTR.aes_encrypt_BE alg key (Vale.AES.GCTR.inc32lite icb (bound + 3))) /\
FStar.Seq.Base.index cipher (bound + 4) ==
Vale.Def.Types_s.quad32_xor (FStar.Seq.Base.index plain (bound + 4))
(Vale.AES.GCTR.aes_encrypt_BE alg key (Vale.AES.GCTR.inc32lite icb (bound + 4))) /\
FStar.Seq.Base.index cipher (bound + 5) ==
Vale.Def.Types_s.quad32_xor (FStar.Seq.Base.index plain (bound + 5))
(Vale.AES.GCTR.aes_encrypt_BE alg key (Vale.AES.GCTR.inc32lite icb (bound + 5))))
(ensures Vale.AES.GCTR.gctr_partial alg (bound + 6) plain cipher key icb) | {
"end_col": 4,
"end_line": 73,
"start_col": 2,
"start_line": 72
} |
FStar.Pervasives.Lemma | val nat32_xor_bytewise_4 (k k' x x' m: nat32) (s s' t t': four nat8)
: Lemma
(requires
k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\ ixor k m == x /\ ixor k' m == x' /\ s == s') (ensures t == t') | [
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let nat32_xor_bytewise_4 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s == s'
)
(ensures t == t')
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
() | val nat32_xor_bytewise_4 (k k' x x' m: nat32) (s s' t t': four nat8)
: Lemma
(requires
k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\ ixor k m == x /\ ixor k' m == x' /\ s == s') (ensures t == t')
let nat32_xor_bytewise_4 (k k' x x' m: nat32) (s s' t t': four nat8)
: Lemma
(requires
k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\ ixor k m == x /\ ixor k' m == x' /\ s == s') (ensures t == t') = | false | null | true | let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t);
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.Def.Types_s.nat32",
"Vale.Def.Words_s.four",
"Vale.Def.Types_s.nat8",
"Prims.unit",
"FStar.Pervasives.assert_norm",
"Prims.eq2",
"Vale.Def.Words_s.natN",
"Prims.pow2",
"FStar.Mul.op_Star",
"Vale.Def.Words.Four_s.four_to_nat",
"Vale.Def.Words.Four_s.four_to_nat_unfold",
"Vale.Def.Words_s.nat8",
"Prims.l_and",
"Vale.Def.Words_s.pow2_32",
"Vale.Def.Types_s.ixor",
"Prims.squash",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb
let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
()
(*
Want to show that:
slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, out_b))) 0 num_bytes
==
gctr_encrypt_LE icb_BE (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) ...
We know that
slice (buffer128_as_seq(mem, out_b) 0 num_blocks
==
gctr_encrypt_recursive icb_BE (slice buffer128_as_seq(mem, in_b) 0 num_blocks) ...
And we know that:
get_mem out_b num_blocks
==
gctr_encrypt_block(icb_BE, (get_mem inb num_blocks), alg, key, num_blocks);
Internally gctr_encrypt_LE will compute:
full_blocks, final_block = split (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) (num_blocks * 16)
We'd like to show that
Step1: le_bytes_to_seq_quad32 full_blocks == slice buffer128_as_seq(mem, in_b) 0 num_blocks
and
Step2: final_block == slice (le_quad32_to_bytes (get_mem inb num_blocks)) 0 num_extra
Then we need to break down the byte-level effects of gctr_encrypt_block to show that even though the
padded version of final_block differs from (get_mem inb num_blocks), after we slice it at the end,
we end up with the same value
*)
let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
=
let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
()
#reset-options "--smtencoding.elim_box true --z3rlimit 30"
let lemma_slice_orig_index (#a:Type) (s s':seq a) (m n:nat) : Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures (forall (i:int).{:pattern (index s i) \/ (index s' i)} m <= i /\ i < n ==> index s i == index s' i))
=
let aux (i:nat{m <= i /\ i < n}) : Lemma (index s i == index s' i) =
lemma_index_slice s m n (i - m);
lemma_index_slice s' m n (i - m)
in Classical.forall_intro aux
let lemma_ishl_32 (x:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 x k == x * pow2 k % pow2_32)
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
FStar.UInt.shift_left_value_lemma #32 x k;
()
let lemma_ishl_ixor_32 (x y:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k))
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
Vale.Def.TypesNative_s.reveal_ishl 32 y k;
Vale.Def.TypesNative_s.reveal_ishl 32 (ixor x y) k;
Vale.Def.TypesNative_s.reveal_ixor 32 x y;
Vale.Def.TypesNative_s.reveal_ixor 32 (ishl x k) (ishl y k);
FStar.UInt.shift_left_logxor_lemma #32 x y k;
()
unfold let pow2_24 = 0x1000000
let nat24 = natN pow2_24
let nat32_xor_bytewise_1_helper1 (x0 x0':nat8) (x1 x1':nat24) (x x':nat32) : Lemma
(requires
x == x0 + 0x100 * x1 /\
x' == x0' + 0x100 * x1' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_2_helper1 (x0 x0' x1 x1':nat16) (x x':nat32) : Lemma
(requires
x == x0 + 0x10000 * x1 /\
x' == x0' + 0x10000 * x1' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_3_helper1 (x0 x0':nat24) (x1 x1':nat8) (x x':nat32) : Lemma
(requires
x == x0 + 0x1000000 * x1 /\
x' == x0' + 0x1000000 * x1' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_1_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t123 = t1 + 0x100 * t2 + 0x10000 * t3 in
let t123' = t1' + 0x100 * t2' + 0x10000 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_1_helper1 t0 t0' t123 t123' x x';
()
let nat32_xor_bytewise_2_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t01 = t0 + 0x100 * t1 in
let t23 = t2 + 0x100 * t3 in
let t01' = t0' + 0x100 * t1' in
let t23' = t2' + 0x100 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_2_helper1 t01 t01' t23 t23' x x';
()
let nat32_xor_bytewise_3_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t012 = t0 + 0x100 * t1 + 0x10000 * t2 in
let t012' = t0' + 0x100 * t1' + 0x10000 * t2' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_3_helper1 t012 t012' t3 t3' x x';
()
let nat32_xor_bytewise_1_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0
)
(ensures k * 0x1000000 % 0x100000000 == k' * 0x1000000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_2_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures k * 0x10000 % 0x100000000 == k' * 0x10000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_3_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures k * 0x100 % 0x100000000 == k' * 0x100 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_1 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_1_helper3 k k' s s';
lemma_ishl_32 k 24;
lemma_ishl_32 k' 24;
lemma_ishl_32 x 24;
lemma_ishl_32 x' 24;
lemma_ishl_ixor_32 k m 24;
lemma_ishl_ixor_32 k' m 24;
assert_norm (pow2 24 == pow2_24);
nat32_xor_bytewise_1_helper2 x x' t t';
()
let nat32_xor_bytewise_2 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_2_helper3 k k' s s';
lemma_ishl_32 k 16;
lemma_ishl_32 k' 16;
lemma_ishl_32 x 16;
lemma_ishl_32 x' 16;
lemma_ishl_ixor_32 k m 16;
lemma_ishl_ixor_32 k' m 16;
// assert (ishl #pow2_32 k 16 == k * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 k' 16 == k' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == x * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x' 16 == x' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == ixor (ishl k 16) (ishl m 16));
// assert (ishl #pow2_32 x' 16 == ixor (ishl k' 16) (ishl m 16));
// assert (x * 0x10000 % 0x100000000 == ixor (k * 0x10000 % 0x100000000) (ishl m 16));
// assert (x' * 0x10000 % 0x100000000 == ixor (k' * 0x10000 % 0x100000000) (ishl m 16));
nat32_xor_bytewise_2_helper2 x x' t t';
()
let nat32_xor_bytewise_3 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_3_helper3 k k' s s';
lemma_ishl_32 k 8;
lemma_ishl_32 k' 8;
lemma_ishl_32 x 8;
lemma_ishl_32 x' 8;
lemma_ishl_ixor_32 k m 8;
lemma_ishl_ixor_32 k' m 8;
nat32_xor_bytewise_3_helper2 x x' t t';
()
#reset-options "--z3rlimit 50 --smtencoding.nl_arith_repr boxwrap --smtencoding.l_arith_repr boxwrap"
let nat32_xor_bytewise_4 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s == s'
) | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 50,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val nat32_xor_bytewise_4 (k k' x x' m: nat32) (s s' t t': four nat8)
: Lemma
(requires
k == four_to_nat 8 s /\ k' == four_to_nat 8 s' /\ x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\ ixor k m == x /\ ixor k' m == x' /\ s == s') (ensures t == t') | [] | Vale.AES.GCTR.nat32_xor_bytewise_4 | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
k: Vale.Def.Types_s.nat32 ->
k': Vale.Def.Types_s.nat32 ->
x: Vale.Def.Types_s.nat32 ->
x': Vale.Def.Types_s.nat32 ->
m: Vale.Def.Types_s.nat32 ->
s: Vale.Def.Words_s.four Vale.Def.Types_s.nat8 ->
s': Vale.Def.Words_s.four Vale.Def.Types_s.nat8 ->
t: Vale.Def.Words_s.four Vale.Def.Types_s.nat8 ->
t': Vale.Def.Words_s.four Vale.Def.Types_s.nat8
-> FStar.Pervasives.Lemma
(requires
k == Vale.Def.Words.Four_s.four_to_nat 8 s /\ k' == Vale.Def.Words.Four_s.four_to_nat 8 s' /\
x == Vale.Def.Words.Four_s.four_to_nat 8 t /\ x' == Vale.Def.Words.Four_s.four_to_nat 8 t' /\
Vale.Def.Types_s.ixor k m == x /\ Vale.Def.Types_s.ixor k' m == x' /\ s == s')
(ensures t == t') | {
"end_col": 4,
"end_line": 530,
"start_col": 3,
"start_line": 523
} |
FStar.Pervasives.Lemma | val quad32_xor_bytewise (q q' r: quad32) (n: nat{n <= 16})
: Lemma
(requires
(let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
slice q_bytes 0 n == slice q'_bytes 0 n))
(ensures
(let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
let qr_bytes = le_quad32_to_bytes (quad32_xor q r) in
let q'r_bytes = le_quad32_to_bytes (quad32_xor q' r) in
slice qr_bytes 0 n == slice q'r_bytes 0 n)) | [
{
"abbrev": false,
"full_module": "FStar.Seq.Properties",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.Types_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Four_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words.Seq_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Lib.Seqs",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCM_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.GCTR_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Words_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Prop_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let quad32_xor_bytewise (q q' r:quad32) (n:nat{ n <= 16 }) : Lemma
(requires (let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
slice q_bytes 0 n == slice q'_bytes 0 n))
(ensures (let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
let qr_bytes = le_quad32_to_bytes (quad32_xor q r) in
let q'r_bytes = le_quad32_to_bytes (quad32_xor q' r) in
slice qr_bytes 0 n == slice q'r_bytes 0 n))
=
let s = le_quad32_to_bytes q in
let s' = le_quad32_to_bytes q' in
let t = le_quad32_to_bytes (quad32_xor q r) in
let t' = le_quad32_to_bytes (quad32_xor q' r) in
lemma_slices_le_quad32_to_bytes q;
lemma_slices_le_quad32_to_bytes q';
lemma_slices_le_quad32_to_bytes (quad32_xor q r);
lemma_slices_le_quad32_to_bytes (quad32_xor q' r);
lemma_slice_orig_index s s' 0 n;
quad32_xor_reveal ();
reverse_bytes_nat32_reveal ();
if n < 4 then nat32_xor_bytewise q.lo0 q'.lo0 r.lo0 (slice s 0 4) (slice s' 0 4) (slice t 0 4) (slice t' 0 4) n
else
(
nat32_xor_bytewise q.lo0 q'.lo0 r.lo0 (slice s 0 4) (slice s' 0 4) (slice t 0 4) (slice t' 0 4) 4;
if n < 8 then nat32_xor_bytewise q.lo1 q'.lo1 r.lo1 (slice s 4 8) (slice s' 4 8) (slice t 4 8) (slice t' 4 8) (n - 4)
else
(
nat32_xor_bytewise q.lo1 q'.lo1 r.lo1 (slice s 4 8) (slice s' 4 8) (slice t 4 8) (slice t' 4 8) 4;
if n < 12 then nat32_xor_bytewise q.hi2 q'.hi2 r.hi2 (slice s 8 12) (slice s' 8 12) (slice t 8 12) (slice t' 8 12) (n - 8)
else
(
nat32_xor_bytewise q.hi2 q'.hi2 r.hi2 (slice s 8 12) (slice s' 8 12) (slice t 8 12) (slice t' 8 12) 4;
nat32_xor_bytewise q.hi3 q'.hi3 r.hi3 (slice s 12 16) (slice s' 12 16) (slice t 12 16) (slice t' 12 16) (n - 12);
()
)
)
);
assert (equal (slice t 0 n) (slice t' 0 n));
() | val quad32_xor_bytewise (q q' r: quad32) (n: nat{n <= 16})
: Lemma
(requires
(let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
slice q_bytes 0 n == slice q'_bytes 0 n))
(ensures
(let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
let qr_bytes = le_quad32_to_bytes (quad32_xor q r) in
let q'r_bytes = le_quad32_to_bytes (quad32_xor q' r) in
slice qr_bytes 0 n == slice q'r_bytes 0 n))
let quad32_xor_bytewise (q q' r: quad32) (n: nat{n <= 16})
: Lemma
(requires
(let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
slice q_bytes 0 n == slice q'_bytes 0 n))
(ensures
(let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
let qr_bytes = le_quad32_to_bytes (quad32_xor q r) in
let q'r_bytes = le_quad32_to_bytes (quad32_xor q' r) in
slice qr_bytes 0 n == slice q'r_bytes 0 n)) = | false | null | true | let s = le_quad32_to_bytes q in
let s' = le_quad32_to_bytes q' in
let t = le_quad32_to_bytes (quad32_xor q r) in
let t' = le_quad32_to_bytes (quad32_xor q' r) in
lemma_slices_le_quad32_to_bytes q;
lemma_slices_le_quad32_to_bytes q';
lemma_slices_le_quad32_to_bytes (quad32_xor q r);
lemma_slices_le_quad32_to_bytes (quad32_xor q' r);
lemma_slice_orig_index s s' 0 n;
quad32_xor_reveal ();
reverse_bytes_nat32_reveal ();
if n < 4
then
nat32_xor_bytewise q.lo0 q'.lo0 r.lo0 (slice s 0 4) (slice s' 0 4) (slice t 0 4) (slice t' 0 4) n
else
(nat32_xor_bytewise q.lo0 q'.lo0 r.lo0 (slice s 0 4) (slice s' 0 4) (slice t 0 4) (slice t' 0 4) 4;
if n < 8
then
nat32_xor_bytewise q.lo1
q'.lo1
r.lo1
(slice s 4 8)
(slice s' 4 8)
(slice t 4 8)
(slice t' 4 8)
(n - 4)
else
(nat32_xor_bytewise q.lo1
q'.lo1
r.lo1
(slice s 4 8)
(slice s' 4 8)
(slice t 4 8)
(slice t' 4 8)
4;
if n < 12
then
nat32_xor_bytewise q.hi2
q'.hi2
r.hi2
(slice s 8 12)
(slice s' 8 12)
(slice t 8 12)
(slice t' 8 12)
(n - 8)
else
(nat32_xor_bytewise q.hi2
q'.hi2
r.hi2
(slice s 8 12)
(slice s' 8 12)
(slice t 8 12)
(slice t' 8 12)
4;
nat32_xor_bytewise q.hi3
q'.hi3
r.hi3
(slice s 12 16)
(slice s' 12 16)
(slice t 12 16)
(slice t' 12 16)
(n - 12);
())));
assert (equal (slice t 0 n) (slice t' 0 n));
() | {
"checked_file": "Vale.AES.GCTR.fst.checked",
"dependencies": [
"Vale.Poly1305.Bitvectors.fsti.checked",
"Vale.Lib.Seqs.fsti.checked",
"Vale.Def.Words_s.fsti.checked",
"Vale.Def.Words.Seq_s.fsti.checked",
"Vale.Def.Words.Four_s.fsti.checked",
"Vale.Def.TypesNative_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.Types_helpers.fsti.checked",
"Vale.AES.GCTR_s.fst.checked",
"Vale.AES.GCM_helpers.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"prims.fst.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.Properties.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Mul.fst.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Classical.fsti.checked",
"FStar.Calc.fsti.checked"
],
"interface_file": true,
"source_file": "Vale.AES.GCTR.fst"
} | [
"lemma"
] | [
"Vale.Def.Types_s.quad32",
"Prims.nat",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Prims.unit",
"Prims._assert",
"FStar.Seq.Base.equal",
"Vale.Def.Types_s.nat8",
"FStar.Seq.Base.slice",
"Prims.op_LessThan",
"Vale.AES.GCTR.nat32_xor_bytewise",
"Vale.Def.Words_s.__proj__Mkfour__item__lo0",
"Vale.Def.Types_s.nat32",
"Prims.bool",
"Vale.Def.Words_s.__proj__Mkfour__item__lo1",
"Prims.op_Subtraction",
"Vale.Def.Words_s.__proj__Mkfour__item__hi2",
"Vale.Def.Words_s.__proj__Mkfour__item__hi3",
"Vale.Def.Types_s.reverse_bytes_nat32_reveal",
"Vale.Def.Types_s.quad32_xor_reveal",
"Vale.AES.GCTR.lemma_slice_orig_index",
"Vale.AES.Types_helpers.lemma_slices_le_quad32_to_bytes",
"Vale.Def.Types_s.quad32_xor",
"FStar.Seq.Base.seq",
"Vale.Def.Words_s.nat8",
"Vale.Def.Types_s.le_quad32_to_bytes",
"Prims.eq2",
"Prims.squash",
"Prims.Nil",
"FStar.Pervasives.pattern"
] | [] | module Vale.AES.GCTR
open Vale.Def.Opaque_s
open Vale.Def.Words_s
open Vale.Def.Words.Seq_s
open Vale.Def.Words.Four_s
open Vale.Def.Types_s
open Vale.Arch.Types
open FStar.Mul
open FStar.Seq
open Vale.AES.AES_s
open Vale.AES.GCTR_s
open Vale.AES.GCM_helpers
open FStar.Math.Lemmas
open Vale.Lib.Seqs
open Vale.AES.Types_helpers
#set-options "--z3rlimit 20 --max_fuel 1 --max_ifuel 0"
let lemma_counter_init x low64 low8 =
Vale.Poly1305.Bitvectors.lemma_bytes_and_mod1 low64;
Vale.Def.TypesNative_s.reveal_iand 64 low64 0xff;
assert (low8 == low64 % 256);
lo64_reveal ();
assert_norm (pow2_norm 32 == pow2_32); // OBSERVE
assert (low64 == x.lo0 + x.lo1 * pow2_32); // OBSERVE
assert (low64 % 256 == x.lo0 % 256);
()
let gctr_encrypt_block_offset (icb_BE:quad32) (plain_LE:quad32) (alg:algorithm) (key:seq nat32) (i:int) =
()
let gctr_encrypt_empty (icb_BE:quad32) (plain_LE cipher_LE:seq quad32) (alg:algorithm) (key:seq nat32) =
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let plain = slice (le_seq_quad32_to_bytes plain_LE) 0 0 in
let cipher = slice (le_seq_quad32_to_bytes cipher_LE) 0 0 in
assert (plain == empty);
assert (cipher == empty);
assert (length plain == 0);
assert (make_gctr_plain_LE plain == empty);
let num_extra = (length (make_gctr_plain_LE plain)) % 16 in
assert (num_extra == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
assert (equal plain_quads_LE empty); // OBSERVE
assert (plain_quads_LE == empty);
assert (cipher_quads_LE == empty);
assert (equal (le_seq_quad32_to_bytes cipher_quads_LE) empty); // OBSERVEs
()
let gctr_partial_opaque_init alg plain cipher key icb =
gctr_partial_reveal ();
()
#restart-solver
let lemma_gctr_partial_append alg b1 b2 p1 c1 p2 c2 key icb1 icb2 =
gctr_partial_reveal ();
()
let gctr_partial_opaque_ignores_postfix alg bound plain plain' cipher cipher' key icb =
gctr_partial_reveal ();
// OBSERVE:
assert (forall i . 0 <= i /\ i < bound ==> index plain i == index (slice plain 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index plain' i == index (slice plain' 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher i == index (slice cipher 0 bound) i);
assert (forall i . 0 <= i /\ i < bound ==> index cipher' i == index (slice cipher' 0 bound) i);
()
let gctr_partial_extend6 (alg:algorithm) (bound:nat) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32)
=
gctr_partial_reveal ();
()
(*
let rec seq_map_i_indexed' (#a:Type) (#b:Type) (f:int->a->b) (s:seq a) (i:int) :
Tot (s':seq b { length s' == length s /\
(forall j . {:pattern index s' j} 0 <= j /\ j < length s ==> index s' j == f (i + j) (index s j))
})
(decreases (length s))
=
if length s = 0 then empty
else cons (f i (head s)) (seq_map_i_indexed f (tail s) (i + 1))
let rec test (icb_BE:quad32) (plain_LE:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) :
Lemma (ensures
(let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
gctr_encrypt_recursive icb_BE plain_LE alg key i == seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i))
(decreases (length plain_LE))
=
let gctr_encrypt_block_curried (j:int) (p:quad32) = gctr_encrypt_block icb_BE p alg key j in
let g = gctr_encrypt_recursive icb_BE plain_LE alg key i in
let s = seq_map_i_indexed' gctr_encrypt_block_curried plain_LE i in
if length plain_LE = 0 then (
assert(equal (g) (s));
()
) else (
test icb_BE (tail plain_LE) alg key (i+1);
assert (gctr_encrypt_recursive icb_BE (tail plain_LE) alg key (i+1) == seq_map_i_indexed' gctr_encrypt_block_curried (tail plain_LE) (i+1))
)
*)
let rec gctr_encrypt_recursive_length (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures length (gctr_encrypt_recursive icb plain alg key i) == length plain)
(decreases %[length plain])
[SMTPat (length (gctr_encrypt_recursive icb plain alg key i))]
=
if length plain = 0 then ()
else gctr_encrypt_recursive_length icb (tail plain) alg key (i + 1)
#reset-options "--z3rlimit 40"
let gctr_encrypt_length (icb_BE:quad32) (plain:gctr_plain_LE)
(alg:algorithm) (key:aes_key_LE alg) :
Lemma(length (gctr_encrypt_LE icb_BE plain alg key) == length plain)
[SMTPat (length (gctr_encrypt_LE icb_BE plain alg key))]
=
reveal_opaque (`%le_bytes_to_seq_quad32) le_bytes_to_seq_quad32;
gctr_encrypt_LE_reveal ();
let num_extra = (length plain) % 16 in
let result = gctr_encrypt_LE icb_BE plain alg key in
if num_extra = 0 then (
let plain_quads_LE = le_bytes_to_seq_quad32 plain in
gctr_encrypt_recursive_length icb_BE plain_quads_LE alg key 0
) else (
let full_bytes_len = (length plain) - num_extra in
let full_blocks, final_block = split plain full_bytes_len in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let final_quad_LE = le_bytes_to_quad32 (pad_to_128_bits final_block) in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE full_quads_LE alg key 0 in
let final_cipher_quad_LE = gctr_encrypt_block icb_BE final_quad_LE alg key (full_bytes_len / 16) in
let cipher_bytes_full_LE = le_seq_quad32_to_bytes cipher_quads_LE in
let final_cipher_bytes_LE = slice (le_quad32_to_bytes final_cipher_quad_LE) 0 num_extra in
gctr_encrypt_recursive_length icb_BE full_quads_LE alg key 0;
assert (length result == length cipher_bytes_full_LE + length final_cipher_bytes_LE);
assert (length cipher_quads_LE == length full_quads_LE);
assert (length cipher_bytes_full_LE == 16 * length cipher_quads_LE);
assert (16 * length full_quads_LE == length full_blocks);
assert (length cipher_bytes_full_LE == length full_blocks);
()
)
#reset-options
let rec gctr_indexed_helper (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (i:int) : Lemma
(requires True)
(ensures (let cipher = gctr_encrypt_recursive icb plain alg key i in
length cipher == length plain /\
(forall j . {:pattern index cipher j} 0 <= j /\ j < length plain ==>
index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) ))))
(decreases %[length plain])
=
if length plain = 0 then ()
else
let tl = tail plain in
let cipher = gctr_encrypt_recursive icb plain alg key i in
let r_cipher = gctr_encrypt_recursive icb tl alg key (i+1) in
let helper (j:int) :
Lemma ((0 <= j /\ j < length plain) ==> (index cipher j == quad32_xor (index plain j) (aes_encrypt_BE alg key (inc32 icb (i + j)) )))
=
aes_encrypt_LE_reveal ();
if 0 < j && j < length plain then (
gctr_indexed_helper icb tl alg key (i+1);
assert(index r_cipher (j-1) == quad32_xor (index tl (j-1)) (aes_encrypt_BE alg key (inc32 icb (i + 1 + j - 1)) )) // OBSERVE
) else ()
in
FStar.Classical.forall_intro helper
let gctr_indexed (icb:quad32) (plain:gctr_plain_internal_LE)
(alg:algorithm) (key:aes_key_LE alg) (cipher:seq quad32) : Lemma
(requires length cipher == length plain /\
(forall i . {:pattern index cipher i} 0 <= i /\ i < length cipher ==>
index cipher i == quad32_xor (index plain i) (aes_encrypt_BE alg key (inc32 icb i) )))
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_indexed_helper icb plain alg key 0;
let c = gctr_encrypt_recursive icb plain alg key 0 in
assert(equal cipher c) // OBSERVE: Invoke extensionality lemmas
let gctr_partial_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) =
gctr_indexed icb plain alg key cipher;
()
let gctr_partial_opaque_completed (alg:algorithm) (plain cipher:seq quad32) (key:seq nat32) (icb:quad32) : Lemma
(requires
is_aes_key_LE alg key /\
length plain == length cipher /\
length plain < pow2_32 /\
gctr_partial alg (length cipher) plain cipher key icb
)
(ensures cipher == gctr_encrypt_recursive icb plain alg key 0)
=
gctr_partial_reveal ();
gctr_partial_completed alg plain cipher key icb
let gctr_partial_to_full_basic (icb_BE:quad32) (plain:seq quad32) (alg:algorithm) (key:seq nat32) (cipher:seq quad32) =
gctr_encrypt_LE_reveal ();
let p = le_seq_quad32_to_bytes plain in
assert (length p % 16 == 0);
let plain_quads_LE = le_bytes_to_seq_quad32 p in
let cipher_quads_LE = gctr_encrypt_recursive icb_BE plain_quads_LE alg key 0 in
let cipher_bytes = le_seq_quad32_to_bytes cipher_quads_LE in
le_bytes_to_seq_quad32_to_bytes plain;
()
(*
Want to show that:
slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, out_b))) 0 num_bytes
==
gctr_encrypt_LE icb_BE (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) ...
We know that
slice (buffer128_as_seq(mem, out_b) 0 num_blocks
==
gctr_encrypt_recursive icb_BE (slice buffer128_as_seq(mem, in_b) 0 num_blocks) ...
And we know that:
get_mem out_b num_blocks
==
gctr_encrypt_block(icb_BE, (get_mem inb num_blocks), alg, key, num_blocks);
Internally gctr_encrypt_LE will compute:
full_blocks, final_block = split (slice (le_seq_quad32_to_bytes (buffer128_as_seq(mem, in_b))) 0 num_bytes) (num_blocks * 16)
We'd like to show that
Step1: le_bytes_to_seq_quad32 full_blocks == slice buffer128_as_seq(mem, in_b) 0 num_blocks
and
Step2: final_block == slice (le_quad32_to_bytes (get_mem inb num_blocks)) 0 num_extra
Then we need to break down the byte-level effects of gctr_encrypt_block to show that even though the
padded version of final_block differs from (get_mem inb num_blocks), after we slice it at the end,
we end up with the same value
*)
let step1 (p:seq quad32) (num_bytes:nat{ num_bytes < 16 * length p }) : Lemma
(let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
p_prefix == full_quads_LE)
=
let num_extra = num_bytes % 16 in
let num_blocks = num_bytes / 16 in
let full_blocks, final_block = split (slice (le_seq_quad32_to_bytes p) 0 num_bytes) (num_blocks * 16) in
let full_quads_LE = le_bytes_to_seq_quad32 full_blocks in
let p_prefix = slice p 0 num_blocks in
assert (length full_blocks == num_blocks * 16);
assert (full_blocks == slice (slice (le_seq_quad32_to_bytes p) 0 num_bytes) 0 (num_blocks * 16));
assert (full_blocks == slice (le_seq_quad32_to_bytes p) 0 (num_blocks * 16));
slice_commutes_le_seq_quad32_to_bytes0 p num_blocks;
assert (full_blocks == le_seq_quad32_to_bytes (slice p 0 num_blocks));
le_bytes_to_seq_quad32_to_bytes (slice p 0 num_blocks);
assert (full_quads_LE == (slice p 0 num_blocks));
()
#reset-options "--smtencoding.elim_box true --z3rlimit 30"
let lemma_slice_orig_index (#a:Type) (s s':seq a) (m n:nat) : Lemma
(requires length s == length s' /\ m <= n /\ n <= length s /\ slice s m n == slice s' m n)
(ensures (forall (i:int).{:pattern (index s i) \/ (index s' i)} m <= i /\ i < n ==> index s i == index s' i))
=
let aux (i:nat{m <= i /\ i < n}) : Lemma (index s i == index s' i) =
lemma_index_slice s m n (i - m);
lemma_index_slice s' m n (i - m)
in Classical.forall_intro aux
let lemma_ishl_32 (x:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 x k == x * pow2 k % pow2_32)
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
FStar.UInt.shift_left_value_lemma #32 x k;
()
let lemma_ishl_ixor_32 (x y:nat32) (k:nat) : Lemma
(ensures ishl #pow2_32 (ixor x y) k == ixor (ishl x k) (ishl y k))
=
Vale.Def.TypesNative_s.reveal_ishl 32 x k;
Vale.Def.TypesNative_s.reveal_ishl 32 y k;
Vale.Def.TypesNative_s.reveal_ishl 32 (ixor x y) k;
Vale.Def.TypesNative_s.reveal_ixor 32 x y;
Vale.Def.TypesNative_s.reveal_ixor 32 (ishl x k) (ishl y k);
FStar.UInt.shift_left_logxor_lemma #32 x y k;
()
unfold let pow2_24 = 0x1000000
let nat24 = natN pow2_24
let nat32_xor_bytewise_1_helper1 (x0 x0':nat8) (x1 x1':nat24) (x x':nat32) : Lemma
(requires
x == x0 + 0x100 * x1 /\
x' == x0' + 0x100 * x1' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_2_helper1 (x0 x0' x1 x1':nat16) (x x':nat32) : Lemma
(requires
x == x0 + 0x10000 * x1 /\
x' == x0' + 0x10000 * x1' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_3_helper1 (x0 x0':nat24) (x1 x1':nat8) (x x':nat32) : Lemma
(requires
x == x0 + 0x1000000 * x1 /\
x' == x0' + 0x1000000 * x1' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures x0 == x0')
=
()
let nat32_xor_bytewise_1_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x1000000 % 0x100000000 == x' * 0x1000000 % 0x100000000
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t123 = t1 + 0x100 * t2 + 0x10000 * t3 in
let t123' = t1' + 0x100 * t2' + 0x10000 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_1_helper1 t0 t0' t123 t123' x x';
()
let nat32_xor_bytewise_2_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x10000 % 0x100000000 == x' * 0x10000 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t01 = t0 + 0x100 * t1 in
let t23 = t2 + 0x100 * t3 in
let t01' = t0' + 0x100 * t1' in
let t23' = t2' + 0x100 * t3' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_2_helper1 t01 t01' t23 t23' x x';
()
let nat32_xor_bytewise_3_helper2 (x x':nat32) (t t':four nat8) : Lemma
(requires
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
x * 0x100 % 0x100000000 == x' * 0x100 % 0x100000000
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
let t012 = t0 + 0x100 * t1 + 0x10000 * t2 in
let t012' = t0' + 0x100 * t1' + 0x10000 * t2' in
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
nat32_xor_bytewise_3_helper1 t012 t012' t3 t3' x x';
()
let nat32_xor_bytewise_1_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0
)
(ensures k * 0x1000000 % 0x100000000 == k' * 0x1000000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_2_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures k * 0x10000 % 0x100000000 == k' * 0x10000 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_3_helper3 (k k':nat32) (s s':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures k * 0x100 % 0x100000000 == k' * 0x100 % 0x100000000)
=
let Mkfour _ _ _ _ = s in
let Mkfour _ _ _ _ = s' in
assert_norm (four_to_nat 8 s == four_to_nat_unfold 8 s );
assert_norm (four_to_nat 8 s' == four_to_nat_unfold 8 s');
()
let nat32_xor_bytewise_1 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0
)
(ensures t.lo0 == t'.lo0)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_1_helper3 k k' s s';
lemma_ishl_32 k 24;
lemma_ishl_32 k' 24;
lemma_ishl_32 x 24;
lemma_ishl_32 x' 24;
lemma_ishl_ixor_32 k m 24;
lemma_ishl_ixor_32 k' m 24;
assert_norm (pow2 24 == pow2_24);
nat32_xor_bytewise_1_helper2 x x' t t';
()
let nat32_xor_bytewise_2 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_2_helper3 k k' s s';
lemma_ishl_32 k 16;
lemma_ishl_32 k' 16;
lemma_ishl_32 x 16;
lemma_ishl_32 x' 16;
lemma_ishl_ixor_32 k m 16;
lemma_ishl_ixor_32 k' m 16;
// assert (ishl #pow2_32 k 16 == k * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 k' 16 == k' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == x * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x' 16 == x' * 0x10000 % 0x100000000);
// assert (ishl #pow2_32 x 16 == ixor (ishl k 16) (ishl m 16));
// assert (ishl #pow2_32 x' 16 == ixor (ishl k' 16) (ishl m 16));
// assert (x * 0x10000 % 0x100000000 == ixor (k * 0x10000 % 0x100000000) (ishl m 16));
// assert (x' * 0x10000 % 0x100000000 == ixor (k' * 0x10000 % 0x100000000) (ishl m 16));
nat32_xor_bytewise_2_helper2 x x' t t';
()
let nat32_xor_bytewise_3 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s.lo0 == s'.lo0 /\ s.lo1 == s'.lo1 /\ s.hi2 == s'.hi2
)
(ensures t.lo0 == t'.lo0 /\ t.lo1 == t'.lo1 /\ t.hi2 == t'.hi2)
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
nat32_xor_bytewise_3_helper3 k k' s s';
lemma_ishl_32 k 8;
lemma_ishl_32 k' 8;
lemma_ishl_32 x 8;
lemma_ishl_32 x' 8;
lemma_ishl_ixor_32 k m 8;
lemma_ishl_ixor_32 k' m 8;
nat32_xor_bytewise_3_helper2 x x' t t';
()
#reset-options "--z3rlimit 50 --smtencoding.nl_arith_repr boxwrap --smtencoding.l_arith_repr boxwrap"
let nat32_xor_bytewise_4 (k k' x x' m:nat32) (s s' t t':four nat8) : Lemma
(requires
k == four_to_nat 8 s /\
k' == four_to_nat 8 s' /\
x == four_to_nat 8 t /\
x' == four_to_nat 8 t' /\
ixor k m == x /\
ixor k' m == x' /\
s == s'
)
(ensures t == t')
=
let Mkfour s0 s1 s2 s3 = s in
let Mkfour s0' s1' s2' s3' = s' in
let Mkfour t0 t1 t2 t3 = t in
let Mkfour t0' t1' t2' t3' = t' in
assert_norm (four_to_nat 8 t' == four_to_nat_unfold 8 t');
assert_norm (four_to_nat 8 t == four_to_nat_unfold 8 t );
()
#reset-options
let nat32_xor_bytewise (k k' m:nat32) (s s' t t':seq4 nat8) (n:nat) : Lemma
(requires
n <= 4 /\
k == four_to_nat 8 (seq_to_four_LE s) /\
k' == four_to_nat 8 (seq_to_four_LE s') /\
ixor k m == four_to_nat 8 (seq_to_four_LE t) /\
ixor k' m == four_to_nat 8 (seq_to_four_LE t') /\
equal (slice s 0 n) (slice s' 0 n)
)
// (ensures equal (slice t 0 n) (slice t' 0 n))
(ensures (forall (i:nat).{:pattern (index t i) \/ (index t' i)} i < n ==> index t i == index t' i))
=
assert (n > 0 ==> index (slice s 0 n) 0 == index (slice s' 0 n) 0);
assert (n > 1 ==> index (slice s 0 n) 1 == index (slice s' 0 n) 1);
assert (n > 2 ==> index (slice s 0 n) 2 == index (slice s' 0 n) 2);
assert (n > 3 ==> index (slice s 0 n) 3 == index (slice s' 0 n) 3);
let x = ixor k m in
let x' = ixor k' m in
if n = 1 then nat32_xor_bytewise_1 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 2 then nat32_xor_bytewise_2 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 3 then nat32_xor_bytewise_3 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
if n = 4 then nat32_xor_bytewise_4 k k' x x' m (seq_to_four_LE s) (seq_to_four_LE s') (seq_to_four_LE t) (seq_to_four_LE t');
assert (equal (slice t 0 n) (slice t' 0 n));
lemma_slice_orig_index t t' 0 n;
()
let quad32_xor_bytewise (q q' r:quad32) (n:nat{ n <= 16 }) : Lemma
(requires (let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
slice q_bytes 0 n == slice q'_bytes 0 n))
(ensures (let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
let qr_bytes = le_quad32_to_bytes (quad32_xor q r) in
let q'r_bytes = le_quad32_to_bytes (quad32_xor q' r) in | false | false | Vale.AES.GCTR.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 5,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val quad32_xor_bytewise (q q' r: quad32) (n: nat{n <= 16})
: Lemma
(requires
(let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
slice q_bytes 0 n == slice q'_bytes 0 n))
(ensures
(let q_bytes = le_quad32_to_bytes q in
let q'_bytes = le_quad32_to_bytes q' in
let qr_bytes = le_quad32_to_bytes (quad32_xor q r) in
let q'r_bytes = le_quad32_to_bytes (quad32_xor q' r) in
slice qr_bytes 0 n == slice q'r_bytes 0 n)) | [] | Vale.AES.GCTR.quad32_xor_bytewise | {
"file_name": "vale/code/crypto/aes/Vale.AES.GCTR.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
q: Vale.Def.Types_s.quad32 ->
q': Vale.Def.Types_s.quad32 ->
r: Vale.Def.Types_s.quad32 ->
n: Prims.nat{n <= 16}
-> FStar.Pervasives.Lemma
(requires
(let q_bytes = Vale.Def.Types_s.le_quad32_to_bytes q in
let q'_bytes = Vale.Def.Types_s.le_quad32_to_bytes q' in
FStar.Seq.Base.slice q_bytes 0 n == FStar.Seq.Base.slice q'_bytes 0 n))
(ensures
(let q_bytes = Vale.Def.Types_s.le_quad32_to_bytes q in
let q'_bytes = Vale.Def.Types_s.le_quad32_to_bytes q' in
let qr_bytes = Vale.Def.Types_s.le_quad32_to_bytes (Vale.Def.Types_s.quad32_xor q r) in
let q'r_bytes = Vale.Def.Types_s.le_quad32_to_bytes (Vale.Def.Types_s.quad32_xor q' r) in
FStar.Seq.Base.slice qr_bytes 0 n == FStar.Seq.Base.slice q'r_bytes 0 n)) | {
"end_col": 4,
"end_line": 598,
"start_col": 3,
"start_line": 568
} |
Prims.Tot | val va_quick_KeyExpansion256Stdcall (win: bool) (input_key_b output_key_expansion_b: buffer128)
: (va_quickCode unit (va_code_KeyExpansion256Stdcall win)) | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES256_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES256_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_quick_KeyExpansion256Stdcall (win:bool) (input_key_b:buffer128)
(output_key_expansion_b:buffer128) : (va_quickCode unit (va_code_KeyExpansion256Stdcall win)) =
(va_QProc (va_code_KeyExpansion256Stdcall win) ([va_Mod_flags; va_Mod_xmm 4; va_Mod_xmm 3;
va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_mem_heaplet 1; va_Mod_reg64 rRdx; va_Mod_mem])
(va_wp_KeyExpansion256Stdcall win input_key_b output_key_expansion_b)
(va_wpProof_KeyExpansion256Stdcall win input_key_b output_key_expansion_b)) | val va_quick_KeyExpansion256Stdcall (win: bool) (input_key_b output_key_expansion_b: buffer128)
: (va_quickCode unit (va_code_KeyExpansion256Stdcall win))
let va_quick_KeyExpansion256Stdcall (win: bool) (input_key_b output_key_expansion_b: buffer128)
: (va_quickCode unit (va_code_KeyExpansion256Stdcall win)) = | false | null | false | (va_QProc (va_code_KeyExpansion256Stdcall win)
([
va_Mod_flags;
va_Mod_xmm 4;
va_Mod_xmm 3;
va_Mod_xmm 2;
va_Mod_xmm 1;
va_Mod_mem_heaplet 1;
va_Mod_reg64 rRdx;
va_Mod_mem
])
(va_wp_KeyExpansion256Stdcall win input_key_b output_key_expansion_b)
(va_wpProof_KeyExpansion256Stdcall win input_key_b output_key_expansion_b)) | {
"checked_file": "Vale.AES.X64.AES256.fsti.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"Vale.AES.AES256_helpers.fsti.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "Vale.AES.X64.AES256.fsti"
} | [
"total"
] | [
"Prims.bool",
"Vale.X64.Memory.buffer128",
"Vale.X64.QuickCode.va_QProc",
"Prims.unit",
"Vale.AES.X64.AES256.va_code_KeyExpansion256Stdcall",
"Prims.Cons",
"Vale.X64.QuickCode.mod_t",
"Vale.X64.QuickCode.va_Mod_flags",
"Vale.X64.QuickCode.va_Mod_xmm",
"Vale.X64.QuickCode.va_Mod_mem_heaplet",
"Vale.X64.QuickCode.va_Mod_reg64",
"Vale.X64.Machine_s.rRdx",
"Vale.X64.QuickCode.va_Mod_mem",
"Prims.Nil",
"Vale.AES.X64.AES256.va_wp_KeyExpansion256Stdcall",
"Vale.AES.X64.AES256.va_wpProof_KeyExpansion256Stdcall",
"Vale.X64.QuickCode.va_quickCode"
] | [] | module Vale.AES.X64.AES256
open Vale.Def.Opaque_s
open Vale.Def.Types_s
open FStar.Seq
open Vale.AES.AES_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.Arch.Types
open Vale.AES.AES256_helpers
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 20"
//-- KeyExpansion256Stdcall
val va_code_KeyExpansion256Stdcall : win:bool -> Tot va_code
val va_codegen_success_KeyExpansion256Stdcall : win:bool -> Tot va_pbool
val va_lemma_KeyExpansion256Stdcall : va_b0:va_code -> va_s0:va_state -> win:bool ->
input_key_b:buffer128 -> output_key_expansion_b:buffer128
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_KeyExpansion256Stdcall win) va_s0 /\ va_get_ok va_s0
/\ (let (key_ptr:(va_int_range 0 18446744073709551615)) = (if win then va_get_reg64 rRcx va_s0
else va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0 18446744073709551615))
= (if win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in let
(key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in aesni_enabled /\
avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
key_ptr input_key_b 2 (va_get_mem_layout va_s0) Secret /\ Vale.X64.Decls.validDstAddrs128
(va_get_mem_heaplet 1 va_s0) key_expansion_ptr output_key_expansion_b 15 (va_get_mem_layout
va_s0) Secret)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
(let (key_ptr:(va_int_range 0 18446744073709551615)) = (if win then va_get_reg64 rRcx va_s0
else va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0 18446744073709551615))
= (if win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in let
(key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in aesni_enabled /\
avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_sM)
key_ptr input_key_b 2 (va_get_mem_layout va_sM) Secret /\ Vale.X64.Decls.validDstAddrs128
(va_get_mem_heaplet 1 va_sM) key_expansion_ptr output_key_expansion_b 15 (va_get_mem_layout
va_sM) Secret) /\ (let (key_ptr:(va_int_range 0 18446744073709551615)) = (if win then
va_get_reg64 rRcx va_s0 else va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0
18446744073709551615)) = (if win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in
let (key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in
Vale.X64.Decls.modifies_buffer128 output_key_expansion_b (va_get_mem_heaplet 1 va_s0)
(va_get_mem_heaplet 1 va_sM) /\ (forall (j:nat) . {:pattern(buffer128_read
output_key_expansion_b j (va_get_mem_heaplet 1 va_sM))}j <= 14 ==>
Vale.X64.Decls.buffer128_read output_key_expansion_b j (va_get_mem_heaplet 1 va_sM) ==
FStar.Seq.Base.index #Vale.Def.Types_s.quad32 (Vale.AES.AES_s.key_to_round_keys_LE AES_256 key)
j)) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_xmm 4 va_sM (va_update_xmm 3 va_sM
(va_update_xmm 2 va_sM (va_update_xmm 1 va_sM (va_update_mem_heaplet 1 va_sM (va_update_reg64
rRdx va_sM (va_update_ok va_sM (va_update_mem va_sM va_s0)))))))))))
[@ va_qattr]
let va_wp_KeyExpansion256Stdcall (win:bool) (input_key_b:buffer128)
(output_key_expansion_b:buffer128) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ (let (key_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ ->
va_get_reg64 rRcx va_s0) (fun _ -> va_get_reg64 rRdi va_s0) in let
(key_expansion_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ -> va_get_reg64
rRdx va_s0) (fun _ -> va_get_reg64 rRsi va_s0) in let (key:(FStar.Seq.Base.seq
Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in aesni_enabled /\
avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
key_ptr input_key_b 2 (va_get_mem_layout va_s0) Secret /\ Vale.X64.Decls.validDstAddrs128
(va_get_mem_heaplet 1 va_s0) key_expansion_ptr output_key_expansion_b 15 (va_get_mem_layout
va_s0) Secret) /\ (forall (va_x_mem:vale_heap) (va_x_rdx:nat64) (va_x_heap1:vale_heap)
(va_x_xmm1:quad32) (va_x_xmm2:quad32) (va_x_xmm3:quad32) (va_x_xmm4:quad32)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_xmm 4 va_x_xmm4
(va_upd_xmm 3 va_x_xmm3 (va_upd_xmm 2 va_x_xmm2 (va_upd_xmm 1 va_x_xmm1 (va_upd_mem_heaplet 1
va_x_heap1 (va_upd_reg64 rRdx va_x_rdx (va_upd_mem va_x_mem va_s0))))))) in va_get_ok va_sM /\
(let (key_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRcx
va_s0) (fun _ -> va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0
18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRdx va_s0) (fun _ -> va_get_reg64
rRsi va_s0) in let (key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) =
Vale.AES.AES256_helpers.make_AES256_key (Vale.X64.Decls.buffer128_read input_key_b 0
(va_get_mem_heaplet 0 va_s0)) (Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet
0 va_s0)) in aesni_enabled /\ avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128
(va_get_mem_heaplet 0 va_sM) key_ptr input_key_b 2 (va_get_mem_layout va_sM) Secret /\
Vale.X64.Decls.validDstAddrs128 (va_get_mem_heaplet 1 va_sM) key_expansion_ptr
output_key_expansion_b 15 (va_get_mem_layout va_sM) Secret) /\ (let (key_ptr:(va_int_range 0
18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRcx va_s0) (fun _ -> va_get_reg64
rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _
-> va_get_reg64 rRdx va_s0) (fun _ -> va_get_reg64 rRsi va_s0) in let (key:(FStar.Seq.Base.seq
Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in
Vale.X64.Decls.modifies_buffer128 output_key_expansion_b (va_get_mem_heaplet 1 va_s0)
(va_get_mem_heaplet 1 va_sM) /\ (forall (j:nat) . {:pattern(buffer128_read
output_key_expansion_b j (va_get_mem_heaplet 1 va_sM))}j <= 14 ==>
Vale.X64.Decls.buffer128_read output_key_expansion_b j (va_get_mem_heaplet 1 va_sM) ==
FStar.Seq.Base.index #Vale.Def.Types_s.quad32 (Vale.AES.AES_s.key_to_round_keys_LE AES_256 key)
j)) ==> va_k va_sM (())))
val va_wpProof_KeyExpansion256Stdcall : win:bool -> input_key_b:buffer128 ->
output_key_expansion_b:buffer128 -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_KeyExpansion256Stdcall win input_key_b
output_key_expansion_b va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_KeyExpansion256Stdcall win)
([va_Mod_flags; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_mem_heaplet 1;
va_Mod_reg64 rRdx; va_Mod_mem]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_KeyExpansion256Stdcall (win:bool) (input_key_b:buffer128) | false | false | Vale.AES.X64.AES256.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_quick_KeyExpansion256Stdcall (win: bool) (input_key_b output_key_expansion_b: buffer128)
: (va_quickCode unit (va_code_KeyExpansion256Stdcall win)) | [] | Vale.AES.X64.AES256.va_quick_KeyExpansion256Stdcall | {
"file_name": "obj/Vale.AES.X64.AES256.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
win: Prims.bool ->
input_key_b: Vale.X64.Memory.buffer128 ->
output_key_expansion_b: Vale.X64.Memory.buffer128
-> Vale.X64.QuickCode.va_quickCode Prims.unit
(Vale.AES.X64.AES256.va_code_KeyExpansion256Stdcall win) | {
"end_col": 79,
"end_line": 119,
"start_col": 2,
"start_line": 116
} |
Prims.Tot | val va_quick_AES256EncryptBlock
(input: quad32)
(key: (seq nat32))
(round_keys: (seq quad32))
(keys_buffer: buffer128)
: (va_quickCode unit (va_code_AES256EncryptBlock ())) | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES256_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES256_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_quick_AES256EncryptBlock (input:quad32) (key:(seq nat32)) (round_keys:(seq quad32))
(keys_buffer:buffer128) : (va_quickCode unit (va_code_AES256EncryptBlock ())) =
(va_QProc (va_code_AES256EncryptBlock ()) ([va_Mod_flags; va_Mod_xmm 2; va_Mod_xmm 0])
(va_wp_AES256EncryptBlock input key round_keys keys_buffer) (va_wpProof_AES256EncryptBlock
input key round_keys keys_buffer)) | val va_quick_AES256EncryptBlock
(input: quad32)
(key: (seq nat32))
(round_keys: (seq quad32))
(keys_buffer: buffer128)
: (va_quickCode unit (va_code_AES256EncryptBlock ()))
let va_quick_AES256EncryptBlock
(input: quad32)
(key: (seq nat32))
(round_keys: (seq quad32))
(keys_buffer: buffer128)
: (va_quickCode unit (va_code_AES256EncryptBlock ())) = | false | null | false | (va_QProc (va_code_AES256EncryptBlock ())
([va_Mod_flags; va_Mod_xmm 2; va_Mod_xmm 0])
(va_wp_AES256EncryptBlock input key round_keys keys_buffer)
(va_wpProof_AES256EncryptBlock input key round_keys keys_buffer)) | {
"checked_file": "Vale.AES.X64.AES256.fsti.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"Vale.AES.AES256_helpers.fsti.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "Vale.AES.X64.AES256.fsti"
} | [
"total"
] | [
"Vale.X64.Decls.quad32",
"FStar.Seq.Base.seq",
"Vale.X64.Memory.nat32",
"Vale.X64.Memory.buffer128",
"Vale.X64.QuickCode.va_QProc",
"Prims.unit",
"Vale.AES.X64.AES256.va_code_AES256EncryptBlock",
"Prims.Cons",
"Vale.X64.QuickCode.mod_t",
"Vale.X64.QuickCode.va_Mod_flags",
"Vale.X64.QuickCode.va_Mod_xmm",
"Prims.Nil",
"Vale.AES.X64.AES256.va_wp_AES256EncryptBlock",
"Vale.AES.X64.AES256.va_wpProof_AES256EncryptBlock",
"Vale.X64.QuickCode.va_quickCode"
] | [] | module Vale.AES.X64.AES256
open Vale.Def.Opaque_s
open Vale.Def.Types_s
open FStar.Seq
open Vale.AES.AES_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.Arch.Types
open Vale.AES.AES256_helpers
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 20"
//-- KeyExpansion256Stdcall
val va_code_KeyExpansion256Stdcall : win:bool -> Tot va_code
val va_codegen_success_KeyExpansion256Stdcall : win:bool -> Tot va_pbool
val va_lemma_KeyExpansion256Stdcall : va_b0:va_code -> va_s0:va_state -> win:bool ->
input_key_b:buffer128 -> output_key_expansion_b:buffer128
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_KeyExpansion256Stdcall win) va_s0 /\ va_get_ok va_s0
/\ (let (key_ptr:(va_int_range 0 18446744073709551615)) = (if win then va_get_reg64 rRcx va_s0
else va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0 18446744073709551615))
= (if win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in let
(key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in aesni_enabled /\
avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
key_ptr input_key_b 2 (va_get_mem_layout va_s0) Secret /\ Vale.X64.Decls.validDstAddrs128
(va_get_mem_heaplet 1 va_s0) key_expansion_ptr output_key_expansion_b 15 (va_get_mem_layout
va_s0) Secret)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
(let (key_ptr:(va_int_range 0 18446744073709551615)) = (if win then va_get_reg64 rRcx va_s0
else va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0 18446744073709551615))
= (if win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in let
(key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in aesni_enabled /\
avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_sM)
key_ptr input_key_b 2 (va_get_mem_layout va_sM) Secret /\ Vale.X64.Decls.validDstAddrs128
(va_get_mem_heaplet 1 va_sM) key_expansion_ptr output_key_expansion_b 15 (va_get_mem_layout
va_sM) Secret) /\ (let (key_ptr:(va_int_range 0 18446744073709551615)) = (if win then
va_get_reg64 rRcx va_s0 else va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0
18446744073709551615)) = (if win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in
let (key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in
Vale.X64.Decls.modifies_buffer128 output_key_expansion_b (va_get_mem_heaplet 1 va_s0)
(va_get_mem_heaplet 1 va_sM) /\ (forall (j:nat) . {:pattern(buffer128_read
output_key_expansion_b j (va_get_mem_heaplet 1 va_sM))}j <= 14 ==>
Vale.X64.Decls.buffer128_read output_key_expansion_b j (va_get_mem_heaplet 1 va_sM) ==
FStar.Seq.Base.index #Vale.Def.Types_s.quad32 (Vale.AES.AES_s.key_to_round_keys_LE AES_256 key)
j)) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_xmm 4 va_sM (va_update_xmm 3 va_sM
(va_update_xmm 2 va_sM (va_update_xmm 1 va_sM (va_update_mem_heaplet 1 va_sM (va_update_reg64
rRdx va_sM (va_update_ok va_sM (va_update_mem va_sM va_s0)))))))))))
[@ va_qattr]
let va_wp_KeyExpansion256Stdcall (win:bool) (input_key_b:buffer128)
(output_key_expansion_b:buffer128) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ (let (key_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ ->
va_get_reg64 rRcx va_s0) (fun _ -> va_get_reg64 rRdi va_s0) in let
(key_expansion_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ -> va_get_reg64
rRdx va_s0) (fun _ -> va_get_reg64 rRsi va_s0) in let (key:(FStar.Seq.Base.seq
Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in aesni_enabled /\
avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
key_ptr input_key_b 2 (va_get_mem_layout va_s0) Secret /\ Vale.X64.Decls.validDstAddrs128
(va_get_mem_heaplet 1 va_s0) key_expansion_ptr output_key_expansion_b 15 (va_get_mem_layout
va_s0) Secret) /\ (forall (va_x_mem:vale_heap) (va_x_rdx:nat64) (va_x_heap1:vale_heap)
(va_x_xmm1:quad32) (va_x_xmm2:quad32) (va_x_xmm3:quad32) (va_x_xmm4:quad32)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_xmm 4 va_x_xmm4
(va_upd_xmm 3 va_x_xmm3 (va_upd_xmm 2 va_x_xmm2 (va_upd_xmm 1 va_x_xmm1 (va_upd_mem_heaplet 1
va_x_heap1 (va_upd_reg64 rRdx va_x_rdx (va_upd_mem va_x_mem va_s0))))))) in va_get_ok va_sM /\
(let (key_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRcx
va_s0) (fun _ -> va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0
18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRdx va_s0) (fun _ -> va_get_reg64
rRsi va_s0) in let (key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) =
Vale.AES.AES256_helpers.make_AES256_key (Vale.X64.Decls.buffer128_read input_key_b 0
(va_get_mem_heaplet 0 va_s0)) (Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet
0 va_s0)) in aesni_enabled /\ avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128
(va_get_mem_heaplet 0 va_sM) key_ptr input_key_b 2 (va_get_mem_layout va_sM) Secret /\
Vale.X64.Decls.validDstAddrs128 (va_get_mem_heaplet 1 va_sM) key_expansion_ptr
output_key_expansion_b 15 (va_get_mem_layout va_sM) Secret) /\ (let (key_ptr:(va_int_range 0
18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRcx va_s0) (fun _ -> va_get_reg64
rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _
-> va_get_reg64 rRdx va_s0) (fun _ -> va_get_reg64 rRsi va_s0) in let (key:(FStar.Seq.Base.seq
Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in
Vale.X64.Decls.modifies_buffer128 output_key_expansion_b (va_get_mem_heaplet 1 va_s0)
(va_get_mem_heaplet 1 va_sM) /\ (forall (j:nat) . {:pattern(buffer128_read
output_key_expansion_b j (va_get_mem_heaplet 1 va_sM))}j <= 14 ==>
Vale.X64.Decls.buffer128_read output_key_expansion_b j (va_get_mem_heaplet 1 va_sM) ==
FStar.Seq.Base.index #Vale.Def.Types_s.quad32 (Vale.AES.AES_s.key_to_round_keys_LE AES_256 key)
j)) ==> va_k va_sM (())))
val va_wpProof_KeyExpansion256Stdcall : win:bool -> input_key_b:buffer128 ->
output_key_expansion_b:buffer128 -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_KeyExpansion256Stdcall win input_key_b
output_key_expansion_b va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_KeyExpansion256Stdcall win)
([va_Mod_flags; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_mem_heaplet 1;
va_Mod_reg64 rRdx; va_Mod_mem]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_KeyExpansion256Stdcall (win:bool) (input_key_b:buffer128)
(output_key_expansion_b:buffer128) : (va_quickCode unit (va_code_KeyExpansion256Stdcall win)) =
(va_QProc (va_code_KeyExpansion256Stdcall win) ([va_Mod_flags; va_Mod_xmm 4; va_Mod_xmm 3;
va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_mem_heaplet 1; va_Mod_reg64 rRdx; va_Mod_mem])
(va_wp_KeyExpansion256Stdcall win input_key_b output_key_expansion_b)
(va_wpProof_KeyExpansion256Stdcall win input_key_b output_key_expansion_b))
//--
//-- AES256EncryptBlock
val va_code_AES256EncryptBlock : va_dummy:unit -> Tot va_code
val va_codegen_success_AES256EncryptBlock : va_dummy:unit -> Tot va_pbool
val va_lemma_AES256EncryptBlock : va_b0:va_code -> va_s0:va_state -> input:quad32 -> key:(seq
nat32) -> round_keys:(seq quad32) -> keys_buffer:buffer128
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_AES256EncryptBlock ()) va_s0 /\ va_get_ok va_s0 /\
(aesni_enabled /\ sse_enabled) /\ Vale.AES.AES_s.is_aes_key_LE AES_256 key /\
FStar.Seq.Base.length #quad32 round_keys == 15 /\ round_keys ==
Vale.AES.AES_s.key_to_round_keys_LE AES_256 key /\ va_get_xmm 0 va_s0 == input /\ va_get_reg64
rR8 va_s0 == Vale.X64.Memory.buffer_addr #Vale.X64.Memory.vuint128 keys_buffer
(va_get_mem_heaplet 0 va_s0) /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
(va_get_reg64 rR8 va_s0) keys_buffer 15 (va_get_mem_layout va_s0) Secret /\ (forall (i:nat) . i
< 15 ==> Vale.X64.Decls.buffer128_read keys_buffer i (va_get_mem_heaplet 0 va_s0) ==
FStar.Seq.Base.index #quad32 round_keys i)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 0 va_sM == Vale.AES.AES_s.aes_encrypt_LE AES_256 key input /\ va_state_eq va_sM
(va_update_flags va_sM (va_update_xmm 2 va_sM (va_update_xmm 0 va_sM (va_update_ok va_sM
va_s0))))))
[@ va_qattr]
let va_wp_AES256EncryptBlock (input:quad32) (key:(seq nat32)) (round_keys:(seq quad32))
(keys_buffer:buffer128) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ (aesni_enabled /\ sse_enabled) /\ Vale.AES.AES_s.is_aes_key_LE AES_256 key /\
FStar.Seq.Base.length #quad32 round_keys == 15 /\ round_keys ==
Vale.AES.AES_s.key_to_round_keys_LE AES_256 key /\ va_get_xmm 0 va_s0 == input /\ va_get_reg64
rR8 va_s0 == Vale.X64.Memory.buffer_addr #Vale.X64.Memory.vuint128 keys_buffer
(va_get_mem_heaplet 0 va_s0) /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
(va_get_reg64 rR8 va_s0) keys_buffer 15 (va_get_mem_layout va_s0) Secret /\ (forall (i:nat) . i
< 15 ==> Vale.X64.Decls.buffer128_read keys_buffer i (va_get_mem_heaplet 0 va_s0) ==
FStar.Seq.Base.index #quad32 round_keys i) /\ (forall (va_x_xmm0:quad32) (va_x_xmm2:quad32)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_xmm 2 va_x_xmm2
(va_upd_xmm 0 va_x_xmm0 va_s0)) in va_get_ok va_sM /\ va_get_xmm 0 va_sM ==
Vale.AES.AES_s.aes_encrypt_LE AES_256 key input ==> va_k va_sM (())))
val va_wpProof_AES256EncryptBlock : input:quad32 -> key:(seq nat32) -> round_keys:(seq quad32) ->
keys_buffer:buffer128 -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_AES256EncryptBlock input key round_keys keys_buffer va_s0
va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_AES256EncryptBlock ()) ([va_Mod_flags;
va_Mod_xmm 2; va_Mod_xmm 0]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_AES256EncryptBlock (input:quad32) (key:(seq nat32)) (round_keys:(seq quad32)) | false | false | Vale.AES.X64.AES256.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_quick_AES256EncryptBlock
(input: quad32)
(key: (seq nat32))
(round_keys: (seq quad32))
(keys_buffer: buffer128)
: (va_quickCode unit (va_code_AES256EncryptBlock ())) | [] | Vale.AES.X64.AES256.va_quick_AES256EncryptBlock | {
"file_name": "obj/Vale.AES.X64.AES256.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
input: Vale.X64.Decls.quad32 ->
key: FStar.Seq.Base.seq Vale.X64.Memory.nat32 ->
round_keys: FStar.Seq.Base.seq Vale.X64.Decls.quad32 ->
keys_buffer: Vale.X64.Memory.buffer128
-> Vale.X64.QuickCode.va_quickCode Prims.unit (Vale.AES.X64.AES256.va_code_AES256EncryptBlock ()) | {
"end_col": 38,
"end_line": 170,
"start_col": 2,
"start_line": 168
} |
Prims.Tot | val va_wp_AES256EncryptBlock
(input: quad32)
(key: (seq nat32))
(round_keys: (seq quad32))
(keys_buffer: buffer128)
(va_s0: va_state)
(va_k: (va_state -> unit -> Type0))
: Type0 | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES256_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES256_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_wp_AES256EncryptBlock (input:quad32) (key:(seq nat32)) (round_keys:(seq quad32))
(keys_buffer:buffer128) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ (aesni_enabled /\ sse_enabled) /\ Vale.AES.AES_s.is_aes_key_LE AES_256 key /\
FStar.Seq.Base.length #quad32 round_keys == 15 /\ round_keys ==
Vale.AES.AES_s.key_to_round_keys_LE AES_256 key /\ va_get_xmm 0 va_s0 == input /\ va_get_reg64
rR8 va_s0 == Vale.X64.Memory.buffer_addr #Vale.X64.Memory.vuint128 keys_buffer
(va_get_mem_heaplet 0 va_s0) /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
(va_get_reg64 rR8 va_s0) keys_buffer 15 (va_get_mem_layout va_s0) Secret /\ (forall (i:nat) . i
< 15 ==> Vale.X64.Decls.buffer128_read keys_buffer i (va_get_mem_heaplet 0 va_s0) ==
FStar.Seq.Base.index #quad32 round_keys i) /\ (forall (va_x_xmm0:quad32) (va_x_xmm2:quad32)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_xmm 2 va_x_xmm2
(va_upd_xmm 0 va_x_xmm0 va_s0)) in va_get_ok va_sM /\ va_get_xmm 0 va_sM ==
Vale.AES.AES_s.aes_encrypt_LE AES_256 key input ==> va_k va_sM (()))) | val va_wp_AES256EncryptBlock
(input: quad32)
(key: (seq nat32))
(round_keys: (seq quad32))
(keys_buffer: buffer128)
(va_s0: va_state)
(va_k: (va_state -> unit -> Type0))
: Type0
let va_wp_AES256EncryptBlock
(input: quad32)
(key: (seq nat32))
(round_keys: (seq quad32))
(keys_buffer: buffer128)
(va_s0: va_state)
(va_k: (va_state -> unit -> Type0))
: Type0 = | false | null | false | (va_get_ok va_s0 /\ (aesni_enabled /\ sse_enabled) /\ Vale.AES.AES_s.is_aes_key_LE AES_256 key /\
FStar.Seq.Base.length #quad32 round_keys == 15 /\
round_keys == Vale.AES.AES_s.key_to_round_keys_LE AES_256 key /\ va_get_xmm 0 va_s0 == input /\
va_get_reg64 rR8 va_s0 ==
Vale.X64.Memory.buffer_addr #Vale.X64.Memory.vuint128 keys_buffer (va_get_mem_heaplet 0 va_s0) /\
Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
(va_get_reg64 rR8 va_s0)
keys_buffer
15
(va_get_mem_layout va_s0)
Secret /\
(forall (i: nat).
i < 15 ==>
Vale.X64.Decls.buffer128_read keys_buffer i (va_get_mem_heaplet 0 va_s0) ==
FStar.Seq.Base.index #quad32 round_keys i) /\
(forall (va_x_xmm0: quad32) (va_x_xmm2: quad32) (va_x_efl: Vale.X64.Flags.t).
let va_sM = va_upd_flags va_x_efl (va_upd_xmm 2 va_x_xmm2 (va_upd_xmm 0 va_x_xmm0 va_s0)) in
va_get_ok va_sM /\ va_get_xmm 0 va_sM == Vale.AES.AES_s.aes_encrypt_LE AES_256 key input ==>
va_k va_sM (()))) | {
"checked_file": "Vale.AES.X64.AES256.fsti.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"Vale.AES.AES256_helpers.fsti.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "Vale.AES.X64.AES256.fsti"
} | [
"total"
] | [
"Vale.X64.Decls.quad32",
"FStar.Seq.Base.seq",
"Vale.X64.Memory.nat32",
"Vale.X64.Memory.buffer128",
"Vale.X64.Decls.va_state",
"Prims.unit",
"Prims.l_and",
"Prims.b2t",
"Vale.X64.Decls.va_get_ok",
"Vale.X64.CPU_Features_s.aesni_enabled",
"Vale.X64.CPU_Features_s.sse_enabled",
"Vale.AES.AES_s.is_aes_key_LE",
"Vale.AES.AES_common_s.AES_256",
"Prims.eq2",
"Prims.int",
"FStar.Seq.Base.length",
"Vale.AES.AES_s.key_to_round_keys_LE",
"Vale.X64.Decls.va_get_xmm",
"Vale.X64.Decls.va_get_reg64",
"Vale.X64.Machine_s.rR8",
"Vale.X64.Memory.buffer_addr",
"Vale.X64.Memory.vuint128",
"Vale.X64.Decls.va_get_mem_heaplet",
"Vale.X64.Decls.validSrcAddrs128",
"Vale.X64.Decls.va_get_mem_layout",
"Vale.Arch.HeapTypes_s.Secret",
"Prims.l_Forall",
"Prims.nat",
"Prims.l_imp",
"Prims.op_LessThan",
"Vale.X64.Decls.buffer128_read",
"FStar.Seq.Base.index",
"Vale.X64.Flags.t",
"Vale.Def.Types_s.quad32",
"Vale.AES.AES_s.aes_encrypt_LE",
"Vale.X64.State.vale_state",
"Vale.X64.Decls.va_upd_flags",
"Vale.X64.Decls.va_upd_xmm"
] | [] | module Vale.AES.X64.AES256
open Vale.Def.Opaque_s
open Vale.Def.Types_s
open FStar.Seq
open Vale.AES.AES_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.Arch.Types
open Vale.AES.AES256_helpers
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 20"
//-- KeyExpansion256Stdcall
val va_code_KeyExpansion256Stdcall : win:bool -> Tot va_code
val va_codegen_success_KeyExpansion256Stdcall : win:bool -> Tot va_pbool
val va_lemma_KeyExpansion256Stdcall : va_b0:va_code -> va_s0:va_state -> win:bool ->
input_key_b:buffer128 -> output_key_expansion_b:buffer128
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_KeyExpansion256Stdcall win) va_s0 /\ va_get_ok va_s0
/\ (let (key_ptr:(va_int_range 0 18446744073709551615)) = (if win then va_get_reg64 rRcx va_s0
else va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0 18446744073709551615))
= (if win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in let
(key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in aesni_enabled /\
avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
key_ptr input_key_b 2 (va_get_mem_layout va_s0) Secret /\ Vale.X64.Decls.validDstAddrs128
(va_get_mem_heaplet 1 va_s0) key_expansion_ptr output_key_expansion_b 15 (va_get_mem_layout
va_s0) Secret)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
(let (key_ptr:(va_int_range 0 18446744073709551615)) = (if win then va_get_reg64 rRcx va_s0
else va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0 18446744073709551615))
= (if win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in let
(key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in aesni_enabled /\
avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_sM)
key_ptr input_key_b 2 (va_get_mem_layout va_sM) Secret /\ Vale.X64.Decls.validDstAddrs128
(va_get_mem_heaplet 1 va_sM) key_expansion_ptr output_key_expansion_b 15 (va_get_mem_layout
va_sM) Secret) /\ (let (key_ptr:(va_int_range 0 18446744073709551615)) = (if win then
va_get_reg64 rRcx va_s0 else va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0
18446744073709551615)) = (if win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in
let (key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in
Vale.X64.Decls.modifies_buffer128 output_key_expansion_b (va_get_mem_heaplet 1 va_s0)
(va_get_mem_heaplet 1 va_sM) /\ (forall (j:nat) . {:pattern(buffer128_read
output_key_expansion_b j (va_get_mem_heaplet 1 va_sM))}j <= 14 ==>
Vale.X64.Decls.buffer128_read output_key_expansion_b j (va_get_mem_heaplet 1 va_sM) ==
FStar.Seq.Base.index #Vale.Def.Types_s.quad32 (Vale.AES.AES_s.key_to_round_keys_LE AES_256 key)
j)) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_xmm 4 va_sM (va_update_xmm 3 va_sM
(va_update_xmm 2 va_sM (va_update_xmm 1 va_sM (va_update_mem_heaplet 1 va_sM (va_update_reg64
rRdx va_sM (va_update_ok va_sM (va_update_mem va_sM va_s0)))))))))))
[@ va_qattr]
let va_wp_KeyExpansion256Stdcall (win:bool) (input_key_b:buffer128)
(output_key_expansion_b:buffer128) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ (let (key_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ ->
va_get_reg64 rRcx va_s0) (fun _ -> va_get_reg64 rRdi va_s0) in let
(key_expansion_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ -> va_get_reg64
rRdx va_s0) (fun _ -> va_get_reg64 rRsi va_s0) in let (key:(FStar.Seq.Base.seq
Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in aesni_enabled /\
avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
key_ptr input_key_b 2 (va_get_mem_layout va_s0) Secret /\ Vale.X64.Decls.validDstAddrs128
(va_get_mem_heaplet 1 va_s0) key_expansion_ptr output_key_expansion_b 15 (va_get_mem_layout
va_s0) Secret) /\ (forall (va_x_mem:vale_heap) (va_x_rdx:nat64) (va_x_heap1:vale_heap)
(va_x_xmm1:quad32) (va_x_xmm2:quad32) (va_x_xmm3:quad32) (va_x_xmm4:quad32)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_xmm 4 va_x_xmm4
(va_upd_xmm 3 va_x_xmm3 (va_upd_xmm 2 va_x_xmm2 (va_upd_xmm 1 va_x_xmm1 (va_upd_mem_heaplet 1
va_x_heap1 (va_upd_reg64 rRdx va_x_rdx (va_upd_mem va_x_mem va_s0))))))) in va_get_ok va_sM /\
(let (key_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRcx
va_s0) (fun _ -> va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0
18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRdx va_s0) (fun _ -> va_get_reg64
rRsi va_s0) in let (key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) =
Vale.AES.AES256_helpers.make_AES256_key (Vale.X64.Decls.buffer128_read input_key_b 0
(va_get_mem_heaplet 0 va_s0)) (Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet
0 va_s0)) in aesni_enabled /\ avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128
(va_get_mem_heaplet 0 va_sM) key_ptr input_key_b 2 (va_get_mem_layout va_sM) Secret /\
Vale.X64.Decls.validDstAddrs128 (va_get_mem_heaplet 1 va_sM) key_expansion_ptr
output_key_expansion_b 15 (va_get_mem_layout va_sM) Secret) /\ (let (key_ptr:(va_int_range 0
18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRcx va_s0) (fun _ -> va_get_reg64
rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _
-> va_get_reg64 rRdx va_s0) (fun _ -> va_get_reg64 rRsi va_s0) in let (key:(FStar.Seq.Base.seq
Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in
Vale.X64.Decls.modifies_buffer128 output_key_expansion_b (va_get_mem_heaplet 1 va_s0)
(va_get_mem_heaplet 1 va_sM) /\ (forall (j:nat) . {:pattern(buffer128_read
output_key_expansion_b j (va_get_mem_heaplet 1 va_sM))}j <= 14 ==>
Vale.X64.Decls.buffer128_read output_key_expansion_b j (va_get_mem_heaplet 1 va_sM) ==
FStar.Seq.Base.index #Vale.Def.Types_s.quad32 (Vale.AES.AES_s.key_to_round_keys_LE AES_256 key)
j)) ==> va_k va_sM (())))
val va_wpProof_KeyExpansion256Stdcall : win:bool -> input_key_b:buffer128 ->
output_key_expansion_b:buffer128 -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_KeyExpansion256Stdcall win input_key_b
output_key_expansion_b va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_KeyExpansion256Stdcall win)
([va_Mod_flags; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_mem_heaplet 1;
va_Mod_reg64 rRdx; va_Mod_mem]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_KeyExpansion256Stdcall (win:bool) (input_key_b:buffer128)
(output_key_expansion_b:buffer128) : (va_quickCode unit (va_code_KeyExpansion256Stdcall win)) =
(va_QProc (va_code_KeyExpansion256Stdcall win) ([va_Mod_flags; va_Mod_xmm 4; va_Mod_xmm 3;
va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_mem_heaplet 1; va_Mod_reg64 rRdx; va_Mod_mem])
(va_wp_KeyExpansion256Stdcall win input_key_b output_key_expansion_b)
(va_wpProof_KeyExpansion256Stdcall win input_key_b output_key_expansion_b))
//--
//-- AES256EncryptBlock
val va_code_AES256EncryptBlock : va_dummy:unit -> Tot va_code
val va_codegen_success_AES256EncryptBlock : va_dummy:unit -> Tot va_pbool
val va_lemma_AES256EncryptBlock : va_b0:va_code -> va_s0:va_state -> input:quad32 -> key:(seq
nat32) -> round_keys:(seq quad32) -> keys_buffer:buffer128
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_AES256EncryptBlock ()) va_s0 /\ va_get_ok va_s0 /\
(aesni_enabled /\ sse_enabled) /\ Vale.AES.AES_s.is_aes_key_LE AES_256 key /\
FStar.Seq.Base.length #quad32 round_keys == 15 /\ round_keys ==
Vale.AES.AES_s.key_to_round_keys_LE AES_256 key /\ va_get_xmm 0 va_s0 == input /\ va_get_reg64
rR8 va_s0 == Vale.X64.Memory.buffer_addr #Vale.X64.Memory.vuint128 keys_buffer
(va_get_mem_heaplet 0 va_s0) /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
(va_get_reg64 rR8 va_s0) keys_buffer 15 (va_get_mem_layout va_s0) Secret /\ (forall (i:nat) . i
< 15 ==> Vale.X64.Decls.buffer128_read keys_buffer i (va_get_mem_heaplet 0 va_s0) ==
FStar.Seq.Base.index #quad32 round_keys i)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 0 va_sM == Vale.AES.AES_s.aes_encrypt_LE AES_256 key input /\ va_state_eq va_sM
(va_update_flags va_sM (va_update_xmm 2 va_sM (va_update_xmm 0 va_sM (va_update_ok va_sM
va_s0))))))
[@ va_qattr]
let va_wp_AES256EncryptBlock (input:quad32) (key:(seq nat32)) (round_keys:(seq quad32)) | false | true | Vale.AES.X64.AES256.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_wp_AES256EncryptBlock
(input: quad32)
(key: (seq nat32))
(round_keys: (seq quad32))
(keys_buffer: buffer128)
(va_s0: va_state)
(va_k: (va_state -> unit -> Type0))
: Type0 | [] | Vale.AES.X64.AES256.va_wp_AES256EncryptBlock | {
"file_name": "obj/Vale.AES.X64.AES256.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
input: Vale.X64.Decls.quad32 ->
key: FStar.Seq.Base.seq Vale.X64.Memory.nat32 ->
round_keys: FStar.Seq.Base.seq Vale.X64.Decls.quad32 ->
keys_buffer: Vale.X64.Memory.buffer128 ->
va_s0: Vale.X64.Decls.va_state ->
va_k: (_: Vale.X64.Decls.va_state -> _: Prims.unit -> Type0)
-> Type0 | {
"end_col": 73,
"end_line": 156,
"start_col": 2,
"start_line": 146
} |
Prims.Tot | val va_quick_AES256EncryptBlockStdcall
(win: bool)
(input: quad32)
(key: (seq nat32))
(input_buffer output_buffer keys_buffer: buffer128)
: (va_quickCode unit (va_code_AES256EncryptBlockStdcall win)) | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES256_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES256_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_quick_AES256EncryptBlockStdcall (win:bool) (input:quad32) (key:(seq nat32))
(input_buffer:buffer128) (output_buffer:buffer128) (keys_buffer:buffer128) : (va_quickCode unit
(va_code_AES256EncryptBlockStdcall win)) =
(va_QProc (va_code_AES256EncryptBlockStdcall win) ([va_Mod_flags; va_Mod_xmm 2; va_Mod_xmm 0;
va_Mod_mem_heaplet 1; va_Mod_reg64 rR8; va_Mod_mem]) (va_wp_AES256EncryptBlockStdcall win input
key input_buffer output_buffer keys_buffer) (va_wpProof_AES256EncryptBlockStdcall win input key
input_buffer output_buffer keys_buffer)) | val va_quick_AES256EncryptBlockStdcall
(win: bool)
(input: quad32)
(key: (seq nat32))
(input_buffer output_buffer keys_buffer: buffer128)
: (va_quickCode unit (va_code_AES256EncryptBlockStdcall win))
let va_quick_AES256EncryptBlockStdcall
(win: bool)
(input: quad32)
(key: (seq nat32))
(input_buffer output_buffer keys_buffer: buffer128)
: (va_quickCode unit (va_code_AES256EncryptBlockStdcall win)) = | false | null | false | (va_QProc (va_code_AES256EncryptBlockStdcall win)
([va_Mod_flags; va_Mod_xmm 2; va_Mod_xmm 0; va_Mod_mem_heaplet 1; va_Mod_reg64 rR8; va_Mod_mem])
(va_wp_AES256EncryptBlockStdcall win input key input_buffer output_buffer keys_buffer)
(va_wpProof_AES256EncryptBlockStdcall win input key input_buffer output_buffer keys_buffer)) | {
"checked_file": "Vale.AES.X64.AES256.fsti.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"Vale.AES.AES256_helpers.fsti.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "Vale.AES.X64.AES256.fsti"
} | [
"total"
] | [
"Prims.bool",
"Vale.X64.Decls.quad32",
"FStar.Seq.Base.seq",
"Vale.X64.Memory.nat32",
"Vale.X64.Memory.buffer128",
"Vale.X64.QuickCode.va_QProc",
"Prims.unit",
"Vale.AES.X64.AES256.va_code_AES256EncryptBlockStdcall",
"Prims.Cons",
"Vale.X64.QuickCode.mod_t",
"Vale.X64.QuickCode.va_Mod_flags",
"Vale.X64.QuickCode.va_Mod_xmm",
"Vale.X64.QuickCode.va_Mod_mem_heaplet",
"Vale.X64.QuickCode.va_Mod_reg64",
"Vale.X64.Machine_s.rR8",
"Vale.X64.QuickCode.va_Mod_mem",
"Prims.Nil",
"Vale.AES.X64.AES256.va_wp_AES256EncryptBlockStdcall",
"Vale.AES.X64.AES256.va_wpProof_AES256EncryptBlockStdcall",
"Vale.X64.QuickCode.va_quickCode"
] | [] | module Vale.AES.X64.AES256
open Vale.Def.Opaque_s
open Vale.Def.Types_s
open FStar.Seq
open Vale.AES.AES_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.Arch.Types
open Vale.AES.AES256_helpers
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 20"
//-- KeyExpansion256Stdcall
val va_code_KeyExpansion256Stdcall : win:bool -> Tot va_code
val va_codegen_success_KeyExpansion256Stdcall : win:bool -> Tot va_pbool
val va_lemma_KeyExpansion256Stdcall : va_b0:va_code -> va_s0:va_state -> win:bool ->
input_key_b:buffer128 -> output_key_expansion_b:buffer128
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_KeyExpansion256Stdcall win) va_s0 /\ va_get_ok va_s0
/\ (let (key_ptr:(va_int_range 0 18446744073709551615)) = (if win then va_get_reg64 rRcx va_s0
else va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0 18446744073709551615))
= (if win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in let
(key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in aesni_enabled /\
avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
key_ptr input_key_b 2 (va_get_mem_layout va_s0) Secret /\ Vale.X64.Decls.validDstAddrs128
(va_get_mem_heaplet 1 va_s0) key_expansion_ptr output_key_expansion_b 15 (va_get_mem_layout
va_s0) Secret)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
(let (key_ptr:(va_int_range 0 18446744073709551615)) = (if win then va_get_reg64 rRcx va_s0
else va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0 18446744073709551615))
= (if win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in let
(key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in aesni_enabled /\
avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_sM)
key_ptr input_key_b 2 (va_get_mem_layout va_sM) Secret /\ Vale.X64.Decls.validDstAddrs128
(va_get_mem_heaplet 1 va_sM) key_expansion_ptr output_key_expansion_b 15 (va_get_mem_layout
va_sM) Secret) /\ (let (key_ptr:(va_int_range 0 18446744073709551615)) = (if win then
va_get_reg64 rRcx va_s0 else va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0
18446744073709551615)) = (if win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in
let (key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in
Vale.X64.Decls.modifies_buffer128 output_key_expansion_b (va_get_mem_heaplet 1 va_s0)
(va_get_mem_heaplet 1 va_sM) /\ (forall (j:nat) . {:pattern(buffer128_read
output_key_expansion_b j (va_get_mem_heaplet 1 va_sM))}j <= 14 ==>
Vale.X64.Decls.buffer128_read output_key_expansion_b j (va_get_mem_heaplet 1 va_sM) ==
FStar.Seq.Base.index #Vale.Def.Types_s.quad32 (Vale.AES.AES_s.key_to_round_keys_LE AES_256 key)
j)) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_xmm 4 va_sM (va_update_xmm 3 va_sM
(va_update_xmm 2 va_sM (va_update_xmm 1 va_sM (va_update_mem_heaplet 1 va_sM (va_update_reg64
rRdx va_sM (va_update_ok va_sM (va_update_mem va_sM va_s0)))))))))))
[@ va_qattr]
let va_wp_KeyExpansion256Stdcall (win:bool) (input_key_b:buffer128)
(output_key_expansion_b:buffer128) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ (let (key_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ ->
va_get_reg64 rRcx va_s0) (fun _ -> va_get_reg64 rRdi va_s0) in let
(key_expansion_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ -> va_get_reg64
rRdx va_s0) (fun _ -> va_get_reg64 rRsi va_s0) in let (key:(FStar.Seq.Base.seq
Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in aesni_enabled /\
avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
key_ptr input_key_b 2 (va_get_mem_layout va_s0) Secret /\ Vale.X64.Decls.validDstAddrs128
(va_get_mem_heaplet 1 va_s0) key_expansion_ptr output_key_expansion_b 15 (va_get_mem_layout
va_s0) Secret) /\ (forall (va_x_mem:vale_heap) (va_x_rdx:nat64) (va_x_heap1:vale_heap)
(va_x_xmm1:quad32) (va_x_xmm2:quad32) (va_x_xmm3:quad32) (va_x_xmm4:quad32)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_xmm 4 va_x_xmm4
(va_upd_xmm 3 va_x_xmm3 (va_upd_xmm 2 va_x_xmm2 (va_upd_xmm 1 va_x_xmm1 (va_upd_mem_heaplet 1
va_x_heap1 (va_upd_reg64 rRdx va_x_rdx (va_upd_mem va_x_mem va_s0))))))) in va_get_ok va_sM /\
(let (key_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRcx
va_s0) (fun _ -> va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0
18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRdx va_s0) (fun _ -> va_get_reg64
rRsi va_s0) in let (key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) =
Vale.AES.AES256_helpers.make_AES256_key (Vale.X64.Decls.buffer128_read input_key_b 0
(va_get_mem_heaplet 0 va_s0)) (Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet
0 va_s0)) in aesni_enabled /\ avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128
(va_get_mem_heaplet 0 va_sM) key_ptr input_key_b 2 (va_get_mem_layout va_sM) Secret /\
Vale.X64.Decls.validDstAddrs128 (va_get_mem_heaplet 1 va_sM) key_expansion_ptr
output_key_expansion_b 15 (va_get_mem_layout va_sM) Secret) /\ (let (key_ptr:(va_int_range 0
18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRcx va_s0) (fun _ -> va_get_reg64
rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _
-> va_get_reg64 rRdx va_s0) (fun _ -> va_get_reg64 rRsi va_s0) in let (key:(FStar.Seq.Base.seq
Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in
Vale.X64.Decls.modifies_buffer128 output_key_expansion_b (va_get_mem_heaplet 1 va_s0)
(va_get_mem_heaplet 1 va_sM) /\ (forall (j:nat) . {:pattern(buffer128_read
output_key_expansion_b j (va_get_mem_heaplet 1 va_sM))}j <= 14 ==>
Vale.X64.Decls.buffer128_read output_key_expansion_b j (va_get_mem_heaplet 1 va_sM) ==
FStar.Seq.Base.index #Vale.Def.Types_s.quad32 (Vale.AES.AES_s.key_to_round_keys_LE AES_256 key)
j)) ==> va_k va_sM (())))
val va_wpProof_KeyExpansion256Stdcall : win:bool -> input_key_b:buffer128 ->
output_key_expansion_b:buffer128 -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_KeyExpansion256Stdcall win input_key_b
output_key_expansion_b va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_KeyExpansion256Stdcall win)
([va_Mod_flags; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_mem_heaplet 1;
va_Mod_reg64 rRdx; va_Mod_mem]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_KeyExpansion256Stdcall (win:bool) (input_key_b:buffer128)
(output_key_expansion_b:buffer128) : (va_quickCode unit (va_code_KeyExpansion256Stdcall win)) =
(va_QProc (va_code_KeyExpansion256Stdcall win) ([va_Mod_flags; va_Mod_xmm 4; va_Mod_xmm 3;
va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_mem_heaplet 1; va_Mod_reg64 rRdx; va_Mod_mem])
(va_wp_KeyExpansion256Stdcall win input_key_b output_key_expansion_b)
(va_wpProof_KeyExpansion256Stdcall win input_key_b output_key_expansion_b))
//--
//-- AES256EncryptBlock
val va_code_AES256EncryptBlock : va_dummy:unit -> Tot va_code
val va_codegen_success_AES256EncryptBlock : va_dummy:unit -> Tot va_pbool
val va_lemma_AES256EncryptBlock : va_b0:va_code -> va_s0:va_state -> input:quad32 -> key:(seq
nat32) -> round_keys:(seq quad32) -> keys_buffer:buffer128
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_AES256EncryptBlock ()) va_s0 /\ va_get_ok va_s0 /\
(aesni_enabled /\ sse_enabled) /\ Vale.AES.AES_s.is_aes_key_LE AES_256 key /\
FStar.Seq.Base.length #quad32 round_keys == 15 /\ round_keys ==
Vale.AES.AES_s.key_to_round_keys_LE AES_256 key /\ va_get_xmm 0 va_s0 == input /\ va_get_reg64
rR8 va_s0 == Vale.X64.Memory.buffer_addr #Vale.X64.Memory.vuint128 keys_buffer
(va_get_mem_heaplet 0 va_s0) /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
(va_get_reg64 rR8 va_s0) keys_buffer 15 (va_get_mem_layout va_s0) Secret /\ (forall (i:nat) . i
< 15 ==> Vale.X64.Decls.buffer128_read keys_buffer i (va_get_mem_heaplet 0 va_s0) ==
FStar.Seq.Base.index #quad32 round_keys i)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 0 va_sM == Vale.AES.AES_s.aes_encrypt_LE AES_256 key input /\ va_state_eq va_sM
(va_update_flags va_sM (va_update_xmm 2 va_sM (va_update_xmm 0 va_sM (va_update_ok va_sM
va_s0))))))
[@ va_qattr]
let va_wp_AES256EncryptBlock (input:quad32) (key:(seq nat32)) (round_keys:(seq quad32))
(keys_buffer:buffer128) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ (aesni_enabled /\ sse_enabled) /\ Vale.AES.AES_s.is_aes_key_LE AES_256 key /\
FStar.Seq.Base.length #quad32 round_keys == 15 /\ round_keys ==
Vale.AES.AES_s.key_to_round_keys_LE AES_256 key /\ va_get_xmm 0 va_s0 == input /\ va_get_reg64
rR8 va_s0 == Vale.X64.Memory.buffer_addr #Vale.X64.Memory.vuint128 keys_buffer
(va_get_mem_heaplet 0 va_s0) /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
(va_get_reg64 rR8 va_s0) keys_buffer 15 (va_get_mem_layout va_s0) Secret /\ (forall (i:nat) . i
< 15 ==> Vale.X64.Decls.buffer128_read keys_buffer i (va_get_mem_heaplet 0 va_s0) ==
FStar.Seq.Base.index #quad32 round_keys i) /\ (forall (va_x_xmm0:quad32) (va_x_xmm2:quad32)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_xmm 2 va_x_xmm2
(va_upd_xmm 0 va_x_xmm0 va_s0)) in va_get_ok va_sM /\ va_get_xmm 0 va_sM ==
Vale.AES.AES_s.aes_encrypt_LE AES_256 key input ==> va_k va_sM (())))
val va_wpProof_AES256EncryptBlock : input:quad32 -> key:(seq nat32) -> round_keys:(seq quad32) ->
keys_buffer:buffer128 -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_AES256EncryptBlock input key round_keys keys_buffer va_s0
va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_AES256EncryptBlock ()) ([va_Mod_flags;
va_Mod_xmm 2; va_Mod_xmm 0]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_AES256EncryptBlock (input:quad32) (key:(seq nat32)) (round_keys:(seq quad32))
(keys_buffer:buffer128) : (va_quickCode unit (va_code_AES256EncryptBlock ())) =
(va_QProc (va_code_AES256EncryptBlock ()) ([va_Mod_flags; va_Mod_xmm 2; va_Mod_xmm 0])
(va_wp_AES256EncryptBlock input key round_keys keys_buffer) (va_wpProof_AES256EncryptBlock
input key round_keys keys_buffer))
//--
//-- AES256EncryptBlockStdcall
val va_code_AES256EncryptBlockStdcall : win:bool -> Tot va_code
val va_codegen_success_AES256EncryptBlockStdcall : win:bool -> Tot va_pbool
val va_lemma_AES256EncryptBlockStdcall : va_b0:va_code -> va_s0:va_state -> win:bool ->
input:quad32 -> key:(seq nat32) -> input_buffer:buffer128 -> output_buffer:buffer128 ->
keys_buffer:buffer128
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_AES256EncryptBlockStdcall win) va_s0 /\ va_get_ok
va_s0 /\ (let (output_ptr:(va_int_range 0 18446744073709551615)) = (if win then va_get_reg64
rRcx va_s0 else va_get_reg64 rRdi va_s0) in let (input_ptr:(va_int_range 0
18446744073709551615)) = (if win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in
let (expanded_key_ptr:(va_int_range 0 18446744073709551615)) = (if win then va_get_reg64 rR8
va_s0 else va_get_reg64 rRdx va_s0) in aesni_enabled /\ sse_enabled /\
Vale.AES.AES_s.is_aes_key_LE AES_256 key /\ Vale.X64.Decls.buffer128_read input_buffer 0
(va_get_mem_heaplet 0 va_s0) == input /\ expanded_key_ptr == Vale.X64.Memory.buffer_addr
#Vale.X64.Memory.vuint128 keys_buffer (va_get_mem_heaplet 0 va_s0) /\
Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0) input_ptr input_buffer 1
(va_get_mem_layout va_s0) Secret /\ Vale.X64.Decls.validDstAddrs128 (va_get_mem_heaplet 1
va_s0) output_ptr output_buffer 1 (va_get_mem_layout va_s0) Secret /\
Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0) expanded_key_ptr keys_buffer 15
(va_get_mem_layout va_s0) Secret /\ (forall (i:nat) . i < 15 ==> Vale.X64.Decls.buffer128_read
keys_buffer i (va_get_mem_heaplet 0 va_s0) == FStar.Seq.Base.index #Vale.Def.Types_s.quad32
(Vale.AES.AES_s.key_to_round_keys_LE AES_256 key) i))))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
(let (output_ptr:(va_int_range 0 18446744073709551615)) = (if win then va_get_reg64 rRcx va_s0
else va_get_reg64 rRdi va_s0) in let (input_ptr:(va_int_range 0 18446744073709551615)) = (if
win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in let
(expanded_key_ptr:(va_int_range 0 18446744073709551615)) = (if win then va_get_reg64 rR8 va_s0
else va_get_reg64 rRdx va_s0) in Vale.X64.Decls.modifies_mem (Vale.X64.Decls.loc_buffer
#Vale.X64.Memory.vuint128 output_buffer) (va_get_mem_heaplet 1 va_s0) (va_get_mem_heaplet 1
va_sM) /\ Vale.X64.Decls.buffer128_read output_buffer 0 (va_get_mem_heaplet 1 va_sM) ==
Vale.AES.AES_s.aes_encrypt_LE AES_256 key input) /\ va_state_eq va_sM (va_update_flags va_sM
(va_update_xmm 2 va_sM (va_update_xmm 0 va_sM (va_update_mem_heaplet 1 va_sM (va_update_reg64
rR8 va_sM (va_update_ok va_sM (va_update_mem va_sM va_s0)))))))))
[@ va_qattr]
let va_wp_AES256EncryptBlockStdcall (win:bool) (input:quad32) (key:(seq nat32))
(input_buffer:buffer128) (output_buffer:buffer128) (keys_buffer:buffer128) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ (let (output_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ ->
va_get_reg64 rRcx va_s0) (fun _ -> va_get_reg64 rRdi va_s0) in let (input_ptr:(va_int_range 0
18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRdx va_s0) (fun _ -> va_get_reg64
rRsi va_s0) in let (expanded_key_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _
-> va_get_reg64 rR8 va_s0) (fun _ -> va_get_reg64 rRdx va_s0) in aesni_enabled /\ sse_enabled
/\ Vale.AES.AES_s.is_aes_key_LE AES_256 key /\ Vale.X64.Decls.buffer128_read input_buffer 0
(va_get_mem_heaplet 0 va_s0) == input /\ expanded_key_ptr == Vale.X64.Memory.buffer_addr
#Vale.X64.Memory.vuint128 keys_buffer (va_get_mem_heaplet 0 va_s0) /\
Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0) input_ptr input_buffer 1
(va_get_mem_layout va_s0) Secret /\ Vale.X64.Decls.validDstAddrs128 (va_get_mem_heaplet 1
va_s0) output_ptr output_buffer 1 (va_get_mem_layout va_s0) Secret /\
Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0) expanded_key_ptr keys_buffer 15
(va_get_mem_layout va_s0) Secret /\ (forall (i:nat) . i < 15 ==> Vale.X64.Decls.buffer128_read
keys_buffer i (va_get_mem_heaplet 0 va_s0) == FStar.Seq.Base.index #Vale.Def.Types_s.quad32
(Vale.AES.AES_s.key_to_round_keys_LE AES_256 key) i)) /\ (forall (va_x_mem:vale_heap)
(va_x_r8:nat64) (va_x_heap1:vale_heap) (va_x_xmm0:quad32) (va_x_xmm2:quad32)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_xmm 2 va_x_xmm2
(va_upd_xmm 0 va_x_xmm0 (va_upd_mem_heaplet 1 va_x_heap1 (va_upd_reg64 rR8 va_x_r8 (va_upd_mem
va_x_mem va_s0))))) in va_get_ok va_sM /\ (let (output_ptr:(va_int_range 0
18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRcx va_s0) (fun _ -> va_get_reg64
rRdi va_s0) in let (input_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ ->
va_get_reg64 rRdx va_s0) (fun _ -> va_get_reg64 rRsi va_s0) in let
(expanded_key_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rR8
va_s0) (fun _ -> va_get_reg64 rRdx va_s0) in Vale.X64.Decls.modifies_mem
(Vale.X64.Decls.loc_buffer #Vale.X64.Memory.vuint128 output_buffer) (va_get_mem_heaplet 1
va_s0) (va_get_mem_heaplet 1 va_sM) /\ Vale.X64.Decls.buffer128_read output_buffer 0
(va_get_mem_heaplet 1 va_sM) == Vale.AES.AES_s.aes_encrypt_LE AES_256 key input) ==> va_k va_sM
(())))
val va_wpProof_AES256EncryptBlockStdcall : win:bool -> input:quad32 -> key:(seq nat32) ->
input_buffer:buffer128 -> output_buffer:buffer128 -> keys_buffer:buffer128 -> va_s0:va_state ->
va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_AES256EncryptBlockStdcall win input key input_buffer
output_buffer keys_buffer va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_AES256EncryptBlockStdcall win)
([va_Mod_flags; va_Mod_xmm 2; va_Mod_xmm 0; va_Mod_mem_heaplet 1; va_Mod_reg64 rR8;
va_Mod_mem]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_AES256EncryptBlockStdcall (win:bool) (input:quad32) (key:(seq nat32))
(input_buffer:buffer128) (output_buffer:buffer128) (keys_buffer:buffer128) : (va_quickCode unit | false | false | Vale.AES.X64.AES256.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_quick_AES256EncryptBlockStdcall
(win: bool)
(input: quad32)
(key: (seq nat32))
(input_buffer output_buffer keys_buffer: buffer128)
: (va_quickCode unit (va_code_AES256EncryptBlockStdcall win)) | [] | Vale.AES.X64.AES256.va_quick_AES256EncryptBlockStdcall | {
"file_name": "obj/Vale.AES.X64.AES256.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
win: Prims.bool ->
input: Vale.X64.Decls.quad32 ->
key: FStar.Seq.Base.seq Vale.X64.Memory.nat32 ->
input_buffer: Vale.X64.Memory.buffer128 ->
output_buffer: Vale.X64.Memory.buffer128 ->
keys_buffer: Vale.X64.Memory.buffer128
-> Vale.X64.QuickCode.va_quickCode Prims.unit
(Vale.AES.X64.AES256.va_code_AES256EncryptBlockStdcall win) | {
"end_col": 44,
"end_line": 258,
"start_col": 2,
"start_line": 255
} |
Prims.Tot | val va_wp_AES256EncryptBlockStdcall
(win: bool)
(input: quad32)
(key: (seq nat32))
(input_buffer output_buffer keys_buffer: buffer128)
(va_s0: va_state)
(va_k: (va_state -> unit -> Type0))
: Type0 | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES256_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES256_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_wp_AES256EncryptBlockStdcall (win:bool) (input:quad32) (key:(seq nat32))
(input_buffer:buffer128) (output_buffer:buffer128) (keys_buffer:buffer128) (va_s0:va_state)
(va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ (let (output_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ ->
va_get_reg64 rRcx va_s0) (fun _ -> va_get_reg64 rRdi va_s0) in let (input_ptr:(va_int_range 0
18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRdx va_s0) (fun _ -> va_get_reg64
rRsi va_s0) in let (expanded_key_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _
-> va_get_reg64 rR8 va_s0) (fun _ -> va_get_reg64 rRdx va_s0) in aesni_enabled /\ sse_enabled
/\ Vale.AES.AES_s.is_aes_key_LE AES_256 key /\ Vale.X64.Decls.buffer128_read input_buffer 0
(va_get_mem_heaplet 0 va_s0) == input /\ expanded_key_ptr == Vale.X64.Memory.buffer_addr
#Vale.X64.Memory.vuint128 keys_buffer (va_get_mem_heaplet 0 va_s0) /\
Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0) input_ptr input_buffer 1
(va_get_mem_layout va_s0) Secret /\ Vale.X64.Decls.validDstAddrs128 (va_get_mem_heaplet 1
va_s0) output_ptr output_buffer 1 (va_get_mem_layout va_s0) Secret /\
Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0) expanded_key_ptr keys_buffer 15
(va_get_mem_layout va_s0) Secret /\ (forall (i:nat) . i < 15 ==> Vale.X64.Decls.buffer128_read
keys_buffer i (va_get_mem_heaplet 0 va_s0) == FStar.Seq.Base.index #Vale.Def.Types_s.quad32
(Vale.AES.AES_s.key_to_round_keys_LE AES_256 key) i)) /\ (forall (va_x_mem:vale_heap)
(va_x_r8:nat64) (va_x_heap1:vale_heap) (va_x_xmm0:quad32) (va_x_xmm2:quad32)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_xmm 2 va_x_xmm2
(va_upd_xmm 0 va_x_xmm0 (va_upd_mem_heaplet 1 va_x_heap1 (va_upd_reg64 rR8 va_x_r8 (va_upd_mem
va_x_mem va_s0))))) in va_get_ok va_sM /\ (let (output_ptr:(va_int_range 0
18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRcx va_s0) (fun _ -> va_get_reg64
rRdi va_s0) in let (input_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ ->
va_get_reg64 rRdx va_s0) (fun _ -> va_get_reg64 rRsi va_s0) in let
(expanded_key_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rR8
va_s0) (fun _ -> va_get_reg64 rRdx va_s0) in Vale.X64.Decls.modifies_mem
(Vale.X64.Decls.loc_buffer #Vale.X64.Memory.vuint128 output_buffer) (va_get_mem_heaplet 1
va_s0) (va_get_mem_heaplet 1 va_sM) /\ Vale.X64.Decls.buffer128_read output_buffer 0
(va_get_mem_heaplet 1 va_sM) == Vale.AES.AES_s.aes_encrypt_LE AES_256 key input) ==> va_k va_sM
(()))) | val va_wp_AES256EncryptBlockStdcall
(win: bool)
(input: quad32)
(key: (seq nat32))
(input_buffer output_buffer keys_buffer: buffer128)
(va_s0: va_state)
(va_k: (va_state -> unit -> Type0))
: Type0
let va_wp_AES256EncryptBlockStdcall
(win: bool)
(input: quad32)
(key: (seq nat32))
(input_buffer output_buffer keys_buffer: buffer128)
(va_s0: va_state)
(va_k: (va_state -> unit -> Type0))
: Type0 = | false | null | false | (va_get_ok va_s0 /\
(let output_ptr:(va_int_range 0 18446744073709551615) =
va_if win (fun _ -> va_get_reg64 rRcx va_s0) (fun _ -> va_get_reg64 rRdi va_s0)
in
let input_ptr:(va_int_range 0 18446744073709551615) =
va_if win (fun _ -> va_get_reg64 rRdx va_s0) (fun _ -> va_get_reg64 rRsi va_s0)
in
let expanded_key_ptr:(va_int_range 0 18446744073709551615) =
va_if win (fun _ -> va_get_reg64 rR8 va_s0) (fun _ -> va_get_reg64 rRdx va_s0)
in
aesni_enabled /\ sse_enabled /\ Vale.AES.AES_s.is_aes_key_LE AES_256 key /\
Vale.X64.Decls.buffer128_read input_buffer 0 (va_get_mem_heaplet 0 va_s0) == input /\
expanded_key_ptr ==
Vale.X64.Memory.buffer_addr #Vale.X64.Memory.vuint128 keys_buffer (va_get_mem_heaplet 0 va_s0) /\
Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
input_ptr
input_buffer
1
(va_get_mem_layout va_s0)
Secret /\
Vale.X64.Decls.validDstAddrs128 (va_get_mem_heaplet 1 va_s0)
output_ptr
output_buffer
1
(va_get_mem_layout va_s0)
Secret /\
Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
expanded_key_ptr
keys_buffer
15
(va_get_mem_layout va_s0)
Secret /\
(forall (i: nat).
i < 15 ==>
Vale.X64.Decls.buffer128_read keys_buffer i (va_get_mem_heaplet 0 va_s0) ==
FStar.Seq.Base.index #Vale.Def.Types_s.quad32
(Vale.AES.AES_s.key_to_round_keys_LE AES_256 key)
i)) /\
(forall (va_x_mem: vale_heap)
(va_x_r8: nat64)
(va_x_heap1: vale_heap)
(va_x_xmm0: quad32)
(va_x_xmm2: quad32)
(va_x_efl: Vale.X64.Flags.t).
let va_sM =
va_upd_flags va_x_efl
(va_upd_xmm 2
va_x_xmm2
(va_upd_xmm 0
va_x_xmm0
(va_upd_mem_heaplet 1
va_x_heap1
(va_upd_reg64 rR8 va_x_r8 (va_upd_mem va_x_mem va_s0)))))
in
va_get_ok va_sM /\
(let output_ptr:(va_int_range 0 18446744073709551615) =
va_if win (fun _ -> va_get_reg64 rRcx va_s0) (fun _ -> va_get_reg64 rRdi va_s0)
in
let input_ptr:(va_int_range 0 18446744073709551615) =
va_if win (fun _ -> va_get_reg64 rRdx va_s0) (fun _ -> va_get_reg64 rRsi va_s0)
in
let expanded_key_ptr:(va_int_range 0 18446744073709551615) =
va_if win (fun _ -> va_get_reg64 rR8 va_s0) (fun _ -> va_get_reg64 rRdx va_s0)
in
Vale.X64.Decls.modifies_mem (Vale.X64.Decls.loc_buffer #Vale.X64.Memory.vuint128
output_buffer)
(va_get_mem_heaplet 1 va_s0)
(va_get_mem_heaplet 1 va_sM) /\
Vale.X64.Decls.buffer128_read output_buffer 0 (va_get_mem_heaplet 1 va_sM) ==
Vale.AES.AES_s.aes_encrypt_LE AES_256 key input) ==>
va_k va_sM (()))) | {
"checked_file": "Vale.AES.X64.AES256.fsti.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"Vale.AES.AES256_helpers.fsti.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "Vale.AES.X64.AES256.fsti"
} | [
"total"
] | [
"Prims.bool",
"Vale.X64.Decls.quad32",
"FStar.Seq.Base.seq",
"Vale.X64.Memory.nat32",
"Vale.X64.Memory.buffer128",
"Vale.X64.Decls.va_state",
"Prims.unit",
"Prims.l_and",
"Prims.b2t",
"Vale.X64.Decls.va_get_ok",
"Vale.X64.CPU_Features_s.aesni_enabled",
"Vale.X64.CPU_Features_s.sse_enabled",
"Vale.AES.AES_s.is_aes_key_LE",
"Vale.AES.AES_common_s.AES_256",
"Prims.eq2",
"Vale.X64.Decls.buffer128_read",
"Vale.X64.Decls.va_get_mem_heaplet",
"Prims.int",
"Vale.X64.Memory.buffer_addr",
"Vale.X64.Memory.vuint128",
"Vale.X64.Decls.validSrcAddrs128",
"Vale.X64.Decls.va_get_mem_layout",
"Vale.Arch.HeapTypes_s.Secret",
"Vale.X64.Decls.validDstAddrs128",
"Prims.l_Forall",
"Prims.nat",
"Prims.l_imp",
"Prims.op_LessThan",
"Vale.Def.Types_s.quad32",
"FStar.Seq.Base.index",
"Vale.AES.AES_s.key_to_round_keys_LE",
"Vale.X64.Decls.va_int_range",
"Vale.X64.Decls.va_if",
"Vale.Def.Types_s.nat64",
"Vale.X64.Decls.va_get_reg64",
"Vale.X64.Machine_s.rR8",
"Prims.l_not",
"Vale.X64.Machine_s.rRdx",
"Vale.X64.Machine_s.rRsi",
"Vale.X64.Machine_s.rRcx",
"Vale.X64.Machine_s.rRdi",
"Vale.X64.InsBasic.vale_heap",
"Vale.X64.Memory.nat64",
"Vale.X64.Flags.t",
"Vale.X64.Decls.modifies_mem",
"Vale.X64.Decls.loc_buffer",
"Vale.AES.AES_s.aes_encrypt_LE",
"Vale.X64.State.vale_state",
"Vale.X64.Decls.va_upd_flags",
"Vale.X64.Decls.va_upd_xmm",
"Vale.X64.Decls.va_upd_mem_heaplet",
"Vale.X64.Decls.va_upd_reg64",
"Vale.X64.Decls.va_upd_mem"
] | [] | module Vale.AES.X64.AES256
open Vale.Def.Opaque_s
open Vale.Def.Types_s
open FStar.Seq
open Vale.AES.AES_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.Arch.Types
open Vale.AES.AES256_helpers
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 20"
//-- KeyExpansion256Stdcall
val va_code_KeyExpansion256Stdcall : win:bool -> Tot va_code
val va_codegen_success_KeyExpansion256Stdcall : win:bool -> Tot va_pbool
val va_lemma_KeyExpansion256Stdcall : va_b0:va_code -> va_s0:va_state -> win:bool ->
input_key_b:buffer128 -> output_key_expansion_b:buffer128
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_KeyExpansion256Stdcall win) va_s0 /\ va_get_ok va_s0
/\ (let (key_ptr:(va_int_range 0 18446744073709551615)) = (if win then va_get_reg64 rRcx va_s0
else va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0 18446744073709551615))
= (if win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in let
(key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in aesni_enabled /\
avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
key_ptr input_key_b 2 (va_get_mem_layout va_s0) Secret /\ Vale.X64.Decls.validDstAddrs128
(va_get_mem_heaplet 1 va_s0) key_expansion_ptr output_key_expansion_b 15 (va_get_mem_layout
va_s0) Secret)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
(let (key_ptr:(va_int_range 0 18446744073709551615)) = (if win then va_get_reg64 rRcx va_s0
else va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0 18446744073709551615))
= (if win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in let
(key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in aesni_enabled /\
avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_sM)
key_ptr input_key_b 2 (va_get_mem_layout va_sM) Secret /\ Vale.X64.Decls.validDstAddrs128
(va_get_mem_heaplet 1 va_sM) key_expansion_ptr output_key_expansion_b 15 (va_get_mem_layout
va_sM) Secret) /\ (let (key_ptr:(va_int_range 0 18446744073709551615)) = (if win then
va_get_reg64 rRcx va_s0 else va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0
18446744073709551615)) = (if win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in
let (key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in
Vale.X64.Decls.modifies_buffer128 output_key_expansion_b (va_get_mem_heaplet 1 va_s0)
(va_get_mem_heaplet 1 va_sM) /\ (forall (j:nat) . {:pattern(buffer128_read
output_key_expansion_b j (va_get_mem_heaplet 1 va_sM))}j <= 14 ==>
Vale.X64.Decls.buffer128_read output_key_expansion_b j (va_get_mem_heaplet 1 va_sM) ==
FStar.Seq.Base.index #Vale.Def.Types_s.quad32 (Vale.AES.AES_s.key_to_round_keys_LE AES_256 key)
j)) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_xmm 4 va_sM (va_update_xmm 3 va_sM
(va_update_xmm 2 va_sM (va_update_xmm 1 va_sM (va_update_mem_heaplet 1 va_sM (va_update_reg64
rRdx va_sM (va_update_ok va_sM (va_update_mem va_sM va_s0)))))))))))
[@ va_qattr]
let va_wp_KeyExpansion256Stdcall (win:bool) (input_key_b:buffer128)
(output_key_expansion_b:buffer128) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ (let (key_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ ->
va_get_reg64 rRcx va_s0) (fun _ -> va_get_reg64 rRdi va_s0) in let
(key_expansion_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ -> va_get_reg64
rRdx va_s0) (fun _ -> va_get_reg64 rRsi va_s0) in let (key:(FStar.Seq.Base.seq
Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in aesni_enabled /\
avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
key_ptr input_key_b 2 (va_get_mem_layout va_s0) Secret /\ Vale.X64.Decls.validDstAddrs128
(va_get_mem_heaplet 1 va_s0) key_expansion_ptr output_key_expansion_b 15 (va_get_mem_layout
va_s0) Secret) /\ (forall (va_x_mem:vale_heap) (va_x_rdx:nat64) (va_x_heap1:vale_heap)
(va_x_xmm1:quad32) (va_x_xmm2:quad32) (va_x_xmm3:quad32) (va_x_xmm4:quad32)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_xmm 4 va_x_xmm4
(va_upd_xmm 3 va_x_xmm3 (va_upd_xmm 2 va_x_xmm2 (va_upd_xmm 1 va_x_xmm1 (va_upd_mem_heaplet 1
va_x_heap1 (va_upd_reg64 rRdx va_x_rdx (va_upd_mem va_x_mem va_s0))))))) in va_get_ok va_sM /\
(let (key_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRcx
va_s0) (fun _ -> va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0
18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRdx va_s0) (fun _ -> va_get_reg64
rRsi va_s0) in let (key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) =
Vale.AES.AES256_helpers.make_AES256_key (Vale.X64.Decls.buffer128_read input_key_b 0
(va_get_mem_heaplet 0 va_s0)) (Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet
0 va_s0)) in aesni_enabled /\ avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128
(va_get_mem_heaplet 0 va_sM) key_ptr input_key_b 2 (va_get_mem_layout va_sM) Secret /\
Vale.X64.Decls.validDstAddrs128 (va_get_mem_heaplet 1 va_sM) key_expansion_ptr
output_key_expansion_b 15 (va_get_mem_layout va_sM) Secret) /\ (let (key_ptr:(va_int_range 0
18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRcx va_s0) (fun _ -> va_get_reg64
rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _
-> va_get_reg64 rRdx va_s0) (fun _ -> va_get_reg64 rRsi va_s0) in let (key:(FStar.Seq.Base.seq
Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in
Vale.X64.Decls.modifies_buffer128 output_key_expansion_b (va_get_mem_heaplet 1 va_s0)
(va_get_mem_heaplet 1 va_sM) /\ (forall (j:nat) . {:pattern(buffer128_read
output_key_expansion_b j (va_get_mem_heaplet 1 va_sM))}j <= 14 ==>
Vale.X64.Decls.buffer128_read output_key_expansion_b j (va_get_mem_heaplet 1 va_sM) ==
FStar.Seq.Base.index #Vale.Def.Types_s.quad32 (Vale.AES.AES_s.key_to_round_keys_LE AES_256 key)
j)) ==> va_k va_sM (())))
val va_wpProof_KeyExpansion256Stdcall : win:bool -> input_key_b:buffer128 ->
output_key_expansion_b:buffer128 -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_KeyExpansion256Stdcall win input_key_b
output_key_expansion_b va_s0 va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_KeyExpansion256Stdcall win)
([va_Mod_flags; va_Mod_xmm 4; va_Mod_xmm 3; va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_mem_heaplet 1;
va_Mod_reg64 rRdx; va_Mod_mem]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_KeyExpansion256Stdcall (win:bool) (input_key_b:buffer128)
(output_key_expansion_b:buffer128) : (va_quickCode unit (va_code_KeyExpansion256Stdcall win)) =
(va_QProc (va_code_KeyExpansion256Stdcall win) ([va_Mod_flags; va_Mod_xmm 4; va_Mod_xmm 3;
va_Mod_xmm 2; va_Mod_xmm 1; va_Mod_mem_heaplet 1; va_Mod_reg64 rRdx; va_Mod_mem])
(va_wp_KeyExpansion256Stdcall win input_key_b output_key_expansion_b)
(va_wpProof_KeyExpansion256Stdcall win input_key_b output_key_expansion_b))
//--
//-- AES256EncryptBlock
val va_code_AES256EncryptBlock : va_dummy:unit -> Tot va_code
val va_codegen_success_AES256EncryptBlock : va_dummy:unit -> Tot va_pbool
val va_lemma_AES256EncryptBlock : va_b0:va_code -> va_s0:va_state -> input:quad32 -> key:(seq
nat32) -> round_keys:(seq quad32) -> keys_buffer:buffer128
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_AES256EncryptBlock ()) va_s0 /\ va_get_ok va_s0 /\
(aesni_enabled /\ sse_enabled) /\ Vale.AES.AES_s.is_aes_key_LE AES_256 key /\
FStar.Seq.Base.length #quad32 round_keys == 15 /\ round_keys ==
Vale.AES.AES_s.key_to_round_keys_LE AES_256 key /\ va_get_xmm 0 va_s0 == input /\ va_get_reg64
rR8 va_s0 == Vale.X64.Memory.buffer_addr #Vale.X64.Memory.vuint128 keys_buffer
(va_get_mem_heaplet 0 va_s0) /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
(va_get_reg64 rR8 va_s0) keys_buffer 15 (va_get_mem_layout va_s0) Secret /\ (forall (i:nat) . i
< 15 ==> Vale.X64.Decls.buffer128_read keys_buffer i (va_get_mem_heaplet 0 va_s0) ==
FStar.Seq.Base.index #quad32 round_keys i)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
va_get_xmm 0 va_sM == Vale.AES.AES_s.aes_encrypt_LE AES_256 key input /\ va_state_eq va_sM
(va_update_flags va_sM (va_update_xmm 2 va_sM (va_update_xmm 0 va_sM (va_update_ok va_sM
va_s0))))))
[@ va_qattr]
let va_wp_AES256EncryptBlock (input:quad32) (key:(seq nat32)) (round_keys:(seq quad32))
(keys_buffer:buffer128) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ (aesni_enabled /\ sse_enabled) /\ Vale.AES.AES_s.is_aes_key_LE AES_256 key /\
FStar.Seq.Base.length #quad32 round_keys == 15 /\ round_keys ==
Vale.AES.AES_s.key_to_round_keys_LE AES_256 key /\ va_get_xmm 0 va_s0 == input /\ va_get_reg64
rR8 va_s0 == Vale.X64.Memory.buffer_addr #Vale.X64.Memory.vuint128 keys_buffer
(va_get_mem_heaplet 0 va_s0) /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
(va_get_reg64 rR8 va_s0) keys_buffer 15 (va_get_mem_layout va_s0) Secret /\ (forall (i:nat) . i
< 15 ==> Vale.X64.Decls.buffer128_read keys_buffer i (va_get_mem_heaplet 0 va_s0) ==
FStar.Seq.Base.index #quad32 round_keys i) /\ (forall (va_x_xmm0:quad32) (va_x_xmm2:quad32)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_xmm 2 va_x_xmm2
(va_upd_xmm 0 va_x_xmm0 va_s0)) in va_get_ok va_sM /\ va_get_xmm 0 va_sM ==
Vale.AES.AES_s.aes_encrypt_LE AES_256 key input ==> va_k va_sM (())))
val va_wpProof_AES256EncryptBlock : input:quad32 -> key:(seq nat32) -> round_keys:(seq quad32) ->
keys_buffer:buffer128 -> va_s0:va_state -> va_k:(va_state -> unit -> Type0)
-> Ghost (va_state & va_fuel & unit)
(requires (va_t_require va_s0 /\ va_wp_AES256EncryptBlock input key round_keys keys_buffer va_s0
va_k))
(ensures (fun (va_sM, va_f0, va_g) -> va_t_ensure (va_code_AES256EncryptBlock ()) ([va_Mod_flags;
va_Mod_xmm 2; va_Mod_xmm 0]) va_s0 va_k ((va_sM, va_f0, va_g))))
[@ "opaque_to_smt" va_qattr]
let va_quick_AES256EncryptBlock (input:quad32) (key:(seq nat32)) (round_keys:(seq quad32))
(keys_buffer:buffer128) : (va_quickCode unit (va_code_AES256EncryptBlock ())) =
(va_QProc (va_code_AES256EncryptBlock ()) ([va_Mod_flags; va_Mod_xmm 2; va_Mod_xmm 0])
(va_wp_AES256EncryptBlock input key round_keys keys_buffer) (va_wpProof_AES256EncryptBlock
input key round_keys keys_buffer))
//--
//-- AES256EncryptBlockStdcall
val va_code_AES256EncryptBlockStdcall : win:bool -> Tot va_code
val va_codegen_success_AES256EncryptBlockStdcall : win:bool -> Tot va_pbool
val va_lemma_AES256EncryptBlockStdcall : va_b0:va_code -> va_s0:va_state -> win:bool ->
input:quad32 -> key:(seq nat32) -> input_buffer:buffer128 -> output_buffer:buffer128 ->
keys_buffer:buffer128
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_AES256EncryptBlockStdcall win) va_s0 /\ va_get_ok
va_s0 /\ (let (output_ptr:(va_int_range 0 18446744073709551615)) = (if win then va_get_reg64
rRcx va_s0 else va_get_reg64 rRdi va_s0) in let (input_ptr:(va_int_range 0
18446744073709551615)) = (if win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in
let (expanded_key_ptr:(va_int_range 0 18446744073709551615)) = (if win then va_get_reg64 rR8
va_s0 else va_get_reg64 rRdx va_s0) in aesni_enabled /\ sse_enabled /\
Vale.AES.AES_s.is_aes_key_LE AES_256 key /\ Vale.X64.Decls.buffer128_read input_buffer 0
(va_get_mem_heaplet 0 va_s0) == input /\ expanded_key_ptr == Vale.X64.Memory.buffer_addr
#Vale.X64.Memory.vuint128 keys_buffer (va_get_mem_heaplet 0 va_s0) /\
Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0) input_ptr input_buffer 1
(va_get_mem_layout va_s0) Secret /\ Vale.X64.Decls.validDstAddrs128 (va_get_mem_heaplet 1
va_s0) output_ptr output_buffer 1 (va_get_mem_layout va_s0) Secret /\
Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0) expanded_key_ptr keys_buffer 15
(va_get_mem_layout va_s0) Secret /\ (forall (i:nat) . i < 15 ==> Vale.X64.Decls.buffer128_read
keys_buffer i (va_get_mem_heaplet 0 va_s0) == FStar.Seq.Base.index #Vale.Def.Types_s.quad32
(Vale.AES.AES_s.key_to_round_keys_LE AES_256 key) i))))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
(let (output_ptr:(va_int_range 0 18446744073709551615)) = (if win then va_get_reg64 rRcx va_s0
else va_get_reg64 rRdi va_s0) in let (input_ptr:(va_int_range 0 18446744073709551615)) = (if
win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in let
(expanded_key_ptr:(va_int_range 0 18446744073709551615)) = (if win then va_get_reg64 rR8 va_s0
else va_get_reg64 rRdx va_s0) in Vale.X64.Decls.modifies_mem (Vale.X64.Decls.loc_buffer
#Vale.X64.Memory.vuint128 output_buffer) (va_get_mem_heaplet 1 va_s0) (va_get_mem_heaplet 1
va_sM) /\ Vale.X64.Decls.buffer128_read output_buffer 0 (va_get_mem_heaplet 1 va_sM) ==
Vale.AES.AES_s.aes_encrypt_LE AES_256 key input) /\ va_state_eq va_sM (va_update_flags va_sM
(va_update_xmm 2 va_sM (va_update_xmm 0 va_sM (va_update_mem_heaplet 1 va_sM (va_update_reg64
rR8 va_sM (va_update_ok va_sM (va_update_mem va_sM va_s0)))))))))
[@ va_qattr]
let va_wp_AES256EncryptBlockStdcall (win:bool) (input:quad32) (key:(seq nat32))
(input_buffer:buffer128) (output_buffer:buffer128) (keys_buffer:buffer128) (va_s0:va_state) | false | true | Vale.AES.X64.AES256.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_wp_AES256EncryptBlockStdcall
(win: bool)
(input: quad32)
(key: (seq nat32))
(input_buffer output_buffer keys_buffer: buffer128)
(va_s0: va_state)
(va_k: (va_state -> unit -> Type0))
: Type0 | [] | Vale.AES.X64.AES256.va_wp_AES256EncryptBlockStdcall | {
"file_name": "obj/Vale.AES.X64.AES256.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
win: Prims.bool ->
input: Vale.X64.Decls.quad32 ->
key: FStar.Seq.Base.seq Vale.X64.Memory.nat32 ->
input_buffer: Vale.X64.Memory.buffer128 ->
output_buffer: Vale.X64.Memory.buffer128 ->
keys_buffer: Vale.X64.Memory.buffer128 ->
va_s0: Vale.X64.Decls.va_state ->
va_k: (_: Vale.X64.Decls.va_state -> _: Prims.unit -> Type0)
-> Type0 | {
"end_col": 10,
"end_line": 240,
"start_col": 2,
"start_line": 213
} |
Prims.Tot | val va_wp_KeyExpansion256Stdcall
(win: bool)
(input_key_b output_key_expansion_b: buffer128)
(va_s0: va_state)
(va_k: (va_state -> unit -> Type0))
: Type0 | [
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES256_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.CPU_Features_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES256_helpers",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Arch.Types",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCodes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.QuickCode",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsAes",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsVector",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsMem",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.InsBasic",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Decls",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.State",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Memory",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.X64.Machine_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.AES_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Seq",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Types_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.Def.Opaque_s",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "Vale.AES.X64",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let va_wp_KeyExpansion256Stdcall (win:bool) (input_key_b:buffer128)
(output_key_expansion_b:buffer128) (va_s0:va_state) (va_k:(va_state -> unit -> Type0)) : Type0 =
(va_get_ok va_s0 /\ (let (key_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ ->
va_get_reg64 rRcx va_s0) (fun _ -> va_get_reg64 rRdi va_s0) in let
(key_expansion_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ -> va_get_reg64
rRdx va_s0) (fun _ -> va_get_reg64 rRsi va_s0) in let (key:(FStar.Seq.Base.seq
Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in aesni_enabled /\
avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
key_ptr input_key_b 2 (va_get_mem_layout va_s0) Secret /\ Vale.X64.Decls.validDstAddrs128
(va_get_mem_heaplet 1 va_s0) key_expansion_ptr output_key_expansion_b 15 (va_get_mem_layout
va_s0) Secret) /\ (forall (va_x_mem:vale_heap) (va_x_rdx:nat64) (va_x_heap1:vale_heap)
(va_x_xmm1:quad32) (va_x_xmm2:quad32) (va_x_xmm3:quad32) (va_x_xmm4:quad32)
(va_x_efl:Vale.X64.Flags.t) . let va_sM = va_upd_flags va_x_efl (va_upd_xmm 4 va_x_xmm4
(va_upd_xmm 3 va_x_xmm3 (va_upd_xmm 2 va_x_xmm2 (va_upd_xmm 1 va_x_xmm1 (va_upd_mem_heaplet 1
va_x_heap1 (va_upd_reg64 rRdx va_x_rdx (va_upd_mem va_x_mem va_s0))))))) in va_get_ok va_sM /\
(let (key_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRcx
va_s0) (fun _ -> va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0
18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRdx va_s0) (fun _ -> va_get_reg64
rRsi va_s0) in let (key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) =
Vale.AES.AES256_helpers.make_AES256_key (Vale.X64.Decls.buffer128_read input_key_b 0
(va_get_mem_heaplet 0 va_s0)) (Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet
0 va_s0)) in aesni_enabled /\ avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128
(va_get_mem_heaplet 0 va_sM) key_ptr input_key_b 2 (va_get_mem_layout va_sM) Secret /\
Vale.X64.Decls.validDstAddrs128 (va_get_mem_heaplet 1 va_sM) key_expansion_ptr
output_key_expansion_b 15 (va_get_mem_layout va_sM) Secret) /\ (let (key_ptr:(va_int_range 0
18446744073709551615)) = va_if win (fun _ -> va_get_reg64 rRcx va_s0) (fun _ -> va_get_reg64
rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0 18446744073709551615)) = va_if win (fun _
-> va_get_reg64 rRdx va_s0) (fun _ -> va_get_reg64 rRsi va_s0) in let (key:(FStar.Seq.Base.seq
Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in
Vale.X64.Decls.modifies_buffer128 output_key_expansion_b (va_get_mem_heaplet 1 va_s0)
(va_get_mem_heaplet 1 va_sM) /\ (forall (j:nat) . {:pattern(buffer128_read
output_key_expansion_b j (va_get_mem_heaplet 1 va_sM))}j <= 14 ==>
Vale.X64.Decls.buffer128_read output_key_expansion_b j (va_get_mem_heaplet 1 va_sM) ==
FStar.Seq.Base.index #Vale.Def.Types_s.quad32 (Vale.AES.AES_s.key_to_round_keys_LE AES_256 key)
j)) ==> va_k va_sM (()))) | val va_wp_KeyExpansion256Stdcall
(win: bool)
(input_key_b output_key_expansion_b: buffer128)
(va_s0: va_state)
(va_k: (va_state -> unit -> Type0))
: Type0
let va_wp_KeyExpansion256Stdcall
(win: bool)
(input_key_b output_key_expansion_b: buffer128)
(va_s0: va_state)
(va_k: (va_state -> unit -> Type0))
: Type0 = | false | null | false | (va_get_ok va_s0 /\
(let key_ptr:(va_int_range 0 18446744073709551615) =
va_if win (fun _ -> va_get_reg64 rRcx va_s0) (fun _ -> va_get_reg64 rRdi va_s0)
in
let key_expansion_ptr:(va_int_range 0 18446744073709551615) =
va_if win (fun _ -> va_get_reg64 rRdx va_s0) (fun _ -> va_get_reg64 rRsi va_s0)
in
let key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32) =
Vale.AES.AES256_helpers.make_AES256_key (Vale.X64.Decls.buffer128_read input_key_b
0
(va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0))
in
aesni_enabled /\ avx_enabled /\ sse_enabled /\
Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
key_ptr
input_key_b
2
(va_get_mem_layout va_s0)
Secret /\
Vale.X64.Decls.validDstAddrs128 (va_get_mem_heaplet 1 va_s0)
key_expansion_ptr
output_key_expansion_b
15
(va_get_mem_layout va_s0)
Secret) /\
(forall (va_x_mem: vale_heap)
(va_x_rdx: nat64)
(va_x_heap1: vale_heap)
(va_x_xmm1: quad32)
(va_x_xmm2: quad32)
(va_x_xmm3: quad32)
(va_x_xmm4: quad32)
(va_x_efl: Vale.X64.Flags.t).
let va_sM =
va_upd_flags va_x_efl
(va_upd_xmm 4
va_x_xmm4
(va_upd_xmm 3
va_x_xmm3
(va_upd_xmm 2
va_x_xmm2
(va_upd_xmm 1
va_x_xmm1
(va_upd_mem_heaplet 1
va_x_heap1
(va_upd_reg64 rRdx va_x_rdx (va_upd_mem va_x_mem va_s0)))))))
in
va_get_ok va_sM /\
(let key_ptr:(va_int_range 0 18446744073709551615) =
va_if win (fun _ -> va_get_reg64 rRcx va_s0) (fun _ -> va_get_reg64 rRdi va_s0)
in
let key_expansion_ptr:(va_int_range 0 18446744073709551615) =
va_if win (fun _ -> va_get_reg64 rRdx va_s0) (fun _ -> va_get_reg64 rRsi va_s0)
in
let key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32) =
Vale.AES.AES256_helpers.make_AES256_key (Vale.X64.Decls.buffer128_read input_key_b
0
(va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0))
in
aesni_enabled /\ avx_enabled /\ sse_enabled /\
Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_sM)
key_ptr
input_key_b
2
(va_get_mem_layout va_sM)
Secret /\
Vale.X64.Decls.validDstAddrs128 (va_get_mem_heaplet 1 va_sM)
key_expansion_ptr
output_key_expansion_b
15
(va_get_mem_layout va_sM)
Secret) /\
(let key_ptr:(va_int_range 0 18446744073709551615) =
va_if win (fun _ -> va_get_reg64 rRcx va_s0) (fun _ -> va_get_reg64 rRdi va_s0)
in
let key_expansion_ptr:(va_int_range 0 18446744073709551615) =
va_if win (fun _ -> va_get_reg64 rRdx va_s0) (fun _ -> va_get_reg64 rRsi va_s0)
in
let key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32) =
Vale.AES.AES256_helpers.make_AES256_key (Vale.X64.Decls.buffer128_read input_key_b
0
(va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0))
in
Vale.X64.Decls.modifies_buffer128 output_key_expansion_b
(va_get_mem_heaplet 1 va_s0)
(va_get_mem_heaplet 1 va_sM) /\
(forall (j: nat).
{:pattern (buffer128_read output_key_expansion_b j (va_get_mem_heaplet 1 va_sM))}
j <= 14 ==>
Vale.X64.Decls.buffer128_read output_key_expansion_b j (va_get_mem_heaplet 1 va_sM) ==
FStar.Seq.Base.index #Vale.Def.Types_s.quad32
(Vale.AES.AES_s.key_to_round_keys_LE AES_256 key)
j)) ==>
va_k va_sM (()))) | {
"checked_file": "Vale.AES.X64.AES256.fsti.checked",
"dependencies": [
"Vale.X64.State.fsti.checked",
"Vale.X64.QuickCodes.fsti.checked",
"Vale.X64.QuickCode.fst.checked",
"Vale.X64.Memory.fsti.checked",
"Vale.X64.Machine_s.fst.checked",
"Vale.X64.InsVector.fsti.checked",
"Vale.X64.InsMem.fsti.checked",
"Vale.X64.InsBasic.fsti.checked",
"Vale.X64.InsAes.fsti.checked",
"Vale.X64.Flags.fsti.checked",
"Vale.X64.Decls.fsti.checked",
"Vale.X64.CPU_Features_s.fst.checked",
"Vale.Def.Types_s.fst.checked",
"Vale.Def.Opaque_s.fsti.checked",
"Vale.Arch.Types.fsti.checked",
"Vale.AES.AES_s.fst.checked",
"Vale.AES.AES256_helpers.fsti.checked",
"prims.fst.checked",
"FStar.Seq.Base.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked"
],
"interface_file": false,
"source_file": "Vale.AES.X64.AES256.fsti"
} | [
"total"
] | [
"Prims.bool",
"Vale.X64.Memory.buffer128",
"Vale.X64.Decls.va_state",
"Prims.unit",
"Prims.l_and",
"Prims.b2t",
"Vale.X64.Decls.va_get_ok",
"Vale.X64.CPU_Features_s.aesni_enabled",
"Vale.X64.CPU_Features_s.avx_enabled",
"Vale.X64.CPU_Features_s.sse_enabled",
"Vale.X64.Decls.validSrcAddrs128",
"Vale.X64.Decls.va_get_mem_heaplet",
"Vale.X64.Decls.va_get_mem_layout",
"Vale.Arch.HeapTypes_s.Secret",
"Vale.X64.Decls.validDstAddrs128",
"FStar.Seq.Base.seq",
"Vale.Def.Words_s.nat32",
"Vale.AES.AES256_helpers.make_AES256_key",
"Vale.X64.Decls.buffer128_read",
"Vale.X64.Decls.va_int_range",
"Vale.X64.Decls.va_if",
"Vale.Def.Types_s.nat64",
"Vale.X64.Decls.va_get_reg64",
"Vale.X64.Machine_s.rRdx",
"Prims.l_not",
"Vale.X64.Machine_s.rRsi",
"Vale.X64.Machine_s.rRcx",
"Vale.X64.Machine_s.rRdi",
"Prims.l_Forall",
"Vale.X64.InsBasic.vale_heap",
"Vale.X64.Memory.nat64",
"Vale.X64.Decls.quad32",
"Vale.X64.Flags.t",
"Prims.l_imp",
"Vale.X64.Decls.modifies_buffer128",
"Prims.nat",
"Prims.op_LessThanOrEqual",
"Prims.eq2",
"Vale.Def.Types_s.quad32",
"FStar.Seq.Base.index",
"Vale.AES.AES_s.key_to_round_keys_LE",
"Vale.AES.AES_common_s.AES_256",
"Vale.X64.State.vale_state",
"Vale.X64.Decls.va_upd_flags",
"Vale.X64.Decls.va_upd_xmm",
"Vale.X64.Decls.va_upd_mem_heaplet",
"Vale.X64.Decls.va_upd_reg64",
"Vale.X64.Decls.va_upd_mem"
] | [] | module Vale.AES.X64.AES256
open Vale.Def.Opaque_s
open Vale.Def.Types_s
open FStar.Seq
open Vale.AES.AES_s
open Vale.X64.Machine_s
open Vale.X64.Memory
open Vale.X64.State
open Vale.X64.Decls
open Vale.X64.InsBasic
open Vale.X64.InsMem
open Vale.X64.InsVector
open Vale.X64.InsAes
open Vale.X64.QuickCode
open Vale.X64.QuickCodes
open Vale.Arch.Types
open Vale.AES.AES256_helpers
open Vale.X64.CPU_Features_s
#reset-options "--z3rlimit 20"
//-- KeyExpansion256Stdcall
val va_code_KeyExpansion256Stdcall : win:bool -> Tot va_code
val va_codegen_success_KeyExpansion256Stdcall : win:bool -> Tot va_pbool
val va_lemma_KeyExpansion256Stdcall : va_b0:va_code -> va_s0:va_state -> win:bool ->
input_key_b:buffer128 -> output_key_expansion_b:buffer128
-> Ghost (va_state & va_fuel)
(requires (va_require_total va_b0 (va_code_KeyExpansion256Stdcall win) va_s0 /\ va_get_ok va_s0
/\ (let (key_ptr:(va_int_range 0 18446744073709551615)) = (if win then va_get_reg64 rRcx va_s0
else va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0 18446744073709551615))
= (if win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in let
(key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in aesni_enabled /\
avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_s0)
key_ptr input_key_b 2 (va_get_mem_layout va_s0) Secret /\ Vale.X64.Decls.validDstAddrs128
(va_get_mem_heaplet 1 va_s0) key_expansion_ptr output_key_expansion_b 15 (va_get_mem_layout
va_s0) Secret)))
(ensures (fun (va_sM, va_fM) -> va_ensure_total va_b0 va_s0 va_sM va_fM /\ va_get_ok va_sM /\
(let (key_ptr:(va_int_range 0 18446744073709551615)) = (if win then va_get_reg64 rRcx va_s0
else va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0 18446744073709551615))
= (if win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in let
(key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in aesni_enabled /\
avx_enabled /\ sse_enabled /\ Vale.X64.Decls.validSrcAddrs128 (va_get_mem_heaplet 0 va_sM)
key_ptr input_key_b 2 (va_get_mem_layout va_sM) Secret /\ Vale.X64.Decls.validDstAddrs128
(va_get_mem_heaplet 1 va_sM) key_expansion_ptr output_key_expansion_b 15 (va_get_mem_layout
va_sM) Secret) /\ (let (key_ptr:(va_int_range 0 18446744073709551615)) = (if win then
va_get_reg64 rRcx va_s0 else va_get_reg64 rRdi va_s0) in let (key_expansion_ptr:(va_int_range 0
18446744073709551615)) = (if win then va_get_reg64 rRdx va_s0 else va_get_reg64 rRsi va_s0) in
let (key:(FStar.Seq.Base.seq Vale.Def.Types_s.nat32)) = Vale.AES.AES256_helpers.make_AES256_key
(Vale.X64.Decls.buffer128_read input_key_b 0 (va_get_mem_heaplet 0 va_s0))
(Vale.X64.Decls.buffer128_read input_key_b 1 (va_get_mem_heaplet 0 va_s0)) in
Vale.X64.Decls.modifies_buffer128 output_key_expansion_b (va_get_mem_heaplet 1 va_s0)
(va_get_mem_heaplet 1 va_sM) /\ (forall (j:nat) . {:pattern(buffer128_read
output_key_expansion_b j (va_get_mem_heaplet 1 va_sM))}j <= 14 ==>
Vale.X64.Decls.buffer128_read output_key_expansion_b j (va_get_mem_heaplet 1 va_sM) ==
FStar.Seq.Base.index #Vale.Def.Types_s.quad32 (Vale.AES.AES_s.key_to_round_keys_LE AES_256 key)
j)) /\ va_state_eq va_sM (va_update_flags va_sM (va_update_xmm 4 va_sM (va_update_xmm 3 va_sM
(va_update_xmm 2 va_sM (va_update_xmm 1 va_sM (va_update_mem_heaplet 1 va_sM (va_update_reg64
rRdx va_sM (va_update_ok va_sM (va_update_mem va_sM va_s0)))))))))))
[@ va_qattr]
let va_wp_KeyExpansion256Stdcall (win:bool) (input_key_b:buffer128) | false | true | Vale.AES.X64.AES256.fsti | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 0,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": true,
"smtencoding_l_arith_repr": "native",
"smtencoding_nl_arith_repr": "wrapped",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [
"smt.arith.nl=false",
"smt.QI.EAGER_THRESHOLD=100",
"smt.CASE_SPLIT=3"
],
"z3refresh": false,
"z3rlimit": 20,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val va_wp_KeyExpansion256Stdcall
(win: bool)
(input_key_b output_key_expansion_b: buffer128)
(va_s0: va_state)
(va_k: (va_state -> unit -> Type0))
: Type0 | [] | Vale.AES.X64.AES256.va_wp_KeyExpansion256Stdcall | {
"file_name": "obj/Vale.AES.X64.AES256.fsti",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
win: Prims.bool ->
input_key_b: Vale.X64.Memory.buffer128 ->
output_key_expansion_b: Vale.X64.Memory.buffer128 ->
va_s0: Vale.X64.Decls.va_state ->
va_k: (_: Vale.X64.Decls.va_state -> _: Prims.unit -> Type0)
-> Type0 | {
"end_col": 29,
"end_line": 103,
"start_col": 2,
"start_line": 67
} |
FStar.Pervasives.Lemma | val pow2_127: n:nat -> Lemma (pow2 127 = 0x80000000000000000000000000000000) [SMTPat (pow2 n)] | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000) | val pow2_127: n:nat -> Lemma (pow2 127 = 0x80000000000000000000000000000000) [SMTPat (pow2 n)]
let pow2_127 _ = | false | null | true | assert_norm (pow2 127 = 0x80000000000000000000000000000000) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Prims.nat",
"FStar.Pervasives.assert_norm",
"Prims.b2t",
"Prims.op_Equality",
"Prims.int",
"Prims.pow2",
"Prims.unit"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8) | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val pow2_127: n:nat -> Lemma (pow2 127 = 0x80000000000000000000000000000000) [SMTPat (pow2 n)] | [] | Lib.IntTypes.pow2_127 | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | n: Prims.nat
-> FStar.Pervasives.Lemma (ensures Prims.pow2 127 = 0x80000000000000000000000000000000)
[SMTPat (Prims.pow2 n)] | {
"end_col": 76,
"end_line": 10,
"start_col": 17,
"start_line": 10
} |
Prims.Tot | val sec_int_v: #t:inttype -> sec_int_t t -> range_t t | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let sec_int_v #t u = pub_int_v u | val sec_int_v: #t:inttype -> sec_int_t t -> range_t t
let sec_int_v #t u = | false | null | false | pub_int_v u | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.sec_int_t",
"Lib.IntTypes.pub_int_v",
"Lib.IntTypes.range_t"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val sec_int_v: #t:inttype -> sec_int_t t -> range_t t | [] | Lib.IntTypes.sec_int_v | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | u40: Lib.IntTypes.sec_int_t t -> Lib.IntTypes.range_t t | {
"end_col": 32,
"end_line": 16,
"start_col": 21,
"start_line": 16
} |
FStar.Pervasives.Lemma | val pow2_2: n:nat -> Lemma (pow2 2 = 4) [SMTPat (pow2 n)] | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let pow2_2 _ = assert_norm (pow2 2 = 4) | val pow2_2: n:nat -> Lemma (pow2 2 = 4) [SMTPat (pow2 n)]
let pow2_2 _ = | false | null | true | assert_norm (pow2 2 = 4) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Prims.nat",
"FStar.Pervasives.assert_norm",
"Prims.b2t",
"Prims.op_Equality",
"Prims.int",
"Prims.pow2",
"Prims.unit"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200" | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val pow2_2: n:nat -> Lemma (pow2 2 = 4) [SMTPat (pow2 n)] | [] | Lib.IntTypes.pow2_2 | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | n: Prims.nat -> FStar.Pervasives.Lemma (ensures Prims.pow2 2 = 4) [SMTPat (Prims.pow2 n)] | {
"end_col": 41,
"end_line": 7,
"start_col": 17,
"start_line": 7
} |
Prims.Tot | val mul_s64_wide: int64 -> int64 -> int128 | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mul_s64_wide a b = Int128.mul_wide a b | val mul_s64_wide: int64 -> int64 -> int128
let mul_s64_wide a b = | false | null | false | Int128.mul_wide a b | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.int64",
"FStar.Int128.mul_wide",
"Lib.IntTypes.int128"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = () | false | true | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mul_s64_wide: int64 -> int64 -> int128 | [] | Lib.IntTypes.mul_s64_wide | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int64 -> b: Lib.IntTypes.int64 -> Lib.IntTypes.int128 | {
"end_col": 42,
"end_line": 362,
"start_col": 23,
"start_line": 362
} |
FStar.Pervasives.Lemma | val pow2_3: n:nat -> Lemma (pow2 3 = 8) [SMTPat (pow2 n)] | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let pow2_3 _ = assert_norm (pow2 3 = 8) | val pow2_3: n:nat -> Lemma (pow2 3 = 8) [SMTPat (pow2 n)]
let pow2_3 _ = | false | null | true | assert_norm (pow2 3 = 8) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Prims.nat",
"FStar.Pervasives.assert_norm",
"Prims.b2t",
"Prims.op_Equality",
"Prims.int",
"Prims.pow2",
"Prims.unit"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200" | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val pow2_3: n:nat -> Lemma (pow2 3 = 8) [SMTPat (pow2 n)] | [] | Lib.IntTypes.pow2_3 | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | n: Prims.nat -> FStar.Pervasives.Lemma (ensures Prims.pow2 3 = 8) [SMTPat (Prims.pow2 n)] | {
"end_col": 41,
"end_line": 8,
"start_col": 17,
"start_line": 8
} |
Prims.Tot | val sec_int_t: inttype -> Type0 | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let sec_int_t t = pub_int_t t | val sec_int_t: inttype -> Type0
let sec_int_t t = | false | null | false | pub_int_t t | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.pub_int_t"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = () | false | true | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val sec_int_t: inttype -> Type0 | [] | Lib.IntTypes.sec_int_t | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | t: Lib.IntTypes.inttype -> Type0 | {
"end_col": 29,
"end_line": 14,
"start_col": 18,
"start_line": 14
} |
FStar.Pervasives.Lemma | val pow2_4: n:nat -> Lemma (pow2 4 = 16) [SMTPat (pow2 n)] | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let pow2_4 _ = assert_norm (pow2 4 = 16) | val pow2_4: n:nat -> Lemma (pow2 4 = 16) [SMTPat (pow2 n)]
let pow2_4 _ = | false | null | true | assert_norm (pow2 4 = 16) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Prims.nat",
"FStar.Pervasives.assert_norm",
"Prims.b2t",
"Prims.op_Equality",
"Prims.int",
"Prims.pow2",
"Prims.unit"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4) | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val pow2_4: n:nat -> Lemma (pow2 4 = 16) [SMTPat (pow2 n)] | [] | Lib.IntTypes.pow2_4 | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | n: Prims.nat -> FStar.Pervasives.Lemma (ensures Prims.pow2 4 = 16) [SMTPat (Prims.pow2 n)] | {
"end_col": 42,
"end_line": 9,
"start_col": 17,
"start_line": 9
} |
FStar.Pervasives.Lemma | val v_injective: #t:inttype -> #l:secrecy_level -> a:int_t t l -> Lemma
(mk_int (v #t #l a) == a)
[SMTPat (v #t #l a)] | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let v_injective #t #l a =
v_extensionality a (mk_int (v a)) | val v_injective: #t:inttype -> #l:secrecy_level -> a:int_t t l -> Lemma
(mk_int (v #t #l a) == a)
[SMTPat (v #t #l a)]
let v_injective #t #l a = | false | null | true | v_extensionality a (mk_int (v a)) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"Lib.IntTypes.v_extensionality",
"Lib.IntTypes.mk_int",
"Lib.IntTypes.v",
"Prims.unit"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val v_injective: #t:inttype -> #l:secrecy_level -> a:int_t t l -> Lemma
(mk_int (v #t #l a) == a)
[SMTPat (v #t #l a)] | [] | Lib.IntTypes.v_injective | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t l
-> FStar.Pervasives.Lemma (ensures Lib.IntTypes.mk_int (Lib.IntTypes.v a) == a)
[SMTPat (Lib.IntTypes.v a)] | {
"end_col": 35,
"end_line": 58,
"start_col": 2,
"start_line": 58
} |
Prims.Tot | val size_to_uint64: s:size_t -> u:uint64{u == u64 (v s)} | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let size_to_uint64 x = Int.Cast.uint32_to_uint64 x | val size_to_uint64: s:size_t -> u:uint64{u == u64 (v s)}
let size_to_uint64 x = | false | null | false | Int.Cast.uint32_to_uint64 x | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.size_t",
"FStar.Int.Cast.uint32_to_uint64",
"Lib.IntTypes.uint64",
"Prims.eq2",
"Lib.IntTypes.u64",
"Lib.IntTypes.v",
"Lib.IntTypes.U32",
"Lib.IntTypes.PUB"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val size_to_uint64: s:size_t -> u:uint64{u == u64 (v s)} | [] | Lib.IntTypes.size_to_uint64 | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | s: Lib.IntTypes.size_t -> u93: Lib.IntTypes.uint64{u93 == Lib.IntTypes.u64 (Lib.IntTypes.v s)} | {
"end_col": 50,
"end_line": 72,
"start_col": 23,
"start_line": 72
} |
Prims.Tot | val sub_mod: #t:inttype{unsigned t} -> #l:secrecy_level
-> int_t t l
-> int_t t l
-> int_t t l | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b | val sub_mod: #t:inttype{unsigned t} -> #l:secrecy_level
-> int_t t l
-> int_t t l
-> int_t t l
let sub_mod #t #l a b = | false | null | false | match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Prims.b2t",
"Lib.IntTypes.unsigned",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"FStar.UInt8.rem",
"FStar.UInt8.sub_mod",
"FStar.UInt8.__uint_to_t",
"FStar.UInt16.sub_mod",
"FStar.UInt32.sub_mod",
"FStar.UInt64.sub_mod",
"FStar.UInt128.sub_mod"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])] | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val sub_mod: #t:inttype{unsigned t} -> #l:secrecy_level
-> int_t t l
-> int_t t l
-> int_t t l | [] | Lib.IntTypes.sub_mod | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t l -> b: Lib.IntTypes.int_t t l -> Lib.IntTypes.int_t t l | {
"end_col": 31,
"end_line": 374,
"start_col": 2,
"start_line": 368
} |
Prims.Tot | val gte_mask: #t:inttype{unsigned t} -> int_t t SEC -> b:int_t t SEC -> int_t t SEC | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let gte_mask #t a b =
match t with
| U1 -> logor a (lognot b)
| U8 -> UInt8.gte_mask a b
| U16 -> UInt16.gte_mask a b
| U32 -> UInt32.gte_mask a b
| U64 -> UInt64.gte_mask a b
| U128 -> UInt128.gte_mask a b | val gte_mask: #t:inttype{unsigned t} -> int_t t SEC -> b:int_t t SEC -> int_t t SEC
let gte_mask #t a b = | false | null | false | match t with
| U1 -> logor a (lognot b)
| U8 -> UInt8.gte_mask a b
| U16 -> UInt16.gte_mask a b
| U32 -> UInt32.gte_mask a b
| U64 -> UInt64.gte_mask a b
| U128 -> UInt128.gte_mask a b | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Prims.b2t",
"Lib.IntTypes.unsigned",
"Lib.IntTypes.int_t",
"Lib.IntTypes.SEC",
"Lib.IntTypes.logor",
"Lib.IntTypes.lognot",
"FStar.UInt8.gte_mask",
"FStar.UInt16.gte_mask",
"FStar.UInt32.gte_mask",
"FStar.UInt64.gte_mask",
"FStar.UInt128.gte_mask"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a)
let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t))
let logor_lemma #t #l a b =
logor_zeros #t #l b;
logor_ones #t #l b;
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (v b)) (Int.logor #(bits t) (v b) (v a))
let logor_spec #t #l a b =
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1);
assert_norm (0 `logor_v #U1` 0 == 0 /\ 0 `logor_v #U1` 1 == 1);
assert_norm (1 `logor_v #U1` 0 == 1 /\ 1 `logor_v #U1` 1 == 1)
| _ -> ()
[@(strict_on_arguments [0])]
let lognot #t #l a =
match t with
| U1 -> UInt8.rem (UInt8.lognot a) 2uy
| U8 -> UInt8.lognot a
| U16 -> UInt16.lognot a
| U32 -> UInt32.lognot a
| U64 -> UInt64.lognot a
| U128 -> UInt128.lognot a
| S8 -> Int8.lognot a
| S16 -> Int16.lognot a
| S32 -> Int32.lognot a
| S64 -> Int64.lognot a
| S128 -> Int128.lognot a
let lognot_lemma #t #l a =
match t with
|U1 -> assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0)
| U8 | U16 | U32 | U64 | U128 ->
FStar.UInt.lognot_lemma_1 #(bits t);
UInt.nth_lemma (FStar.UInt.lognot #(bits t) (UInt.ones (bits t))) (UInt.zero (bits t))
| S8 | S16 | S32 | S64 | S128 ->
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.zero (bits t))) (Int.ones (bits t));
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.ones (bits t))) (Int.zero (bits t))
let lognot_spec #t #l a =
match t with
| U1 ->
assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0);
assert_norm(lognot_v #U1 0 == 1 /\ lognot_v #U1 1 == 0)
| _ -> ()
[@(strict_on_arguments [0])]
let shift_right #t #l a b =
match t with
| U1 -> UInt8.shift_right a b
| U8 -> UInt8.shift_right a b
| U16 -> UInt16.shift_right a b
| U32 -> UInt32.shift_right a b
| U64 -> UInt64.shift_right a b
| U128 -> UInt128.shift_right a b
| S8 -> Int8.shift_arithmetic_right a b
| S16 -> Int16.shift_arithmetic_right a b
| S32 -> Int32.shift_arithmetic_right a b
| S64 -> Int64.shift_arithmetic_right a b
| S128 -> Int128.shift_arithmetic_right a b
val shift_right_value_aux_1: #n:pos{1 < n} -> a:Int.int_t n -> s:nat{n <= s} ->
Lemma (Int.shift_arithmetic_right #n a s = a / pow2 s)
let shift_right_value_aux_1 #n a s =
pow2_le_compat s n;
if a >= 0 then Int.sign_bit_positive a else Int.sign_bit_negative a
#push-options "--z3rlimit 200"
val shift_right_value_aux_2: #n:pos{1 < n} -> a:Int.int_t n ->
Lemma (Int.shift_arithmetic_right #n a 1 = a / 2)
let shift_right_value_aux_2 #n a =
if a >= 0 then
begin
Int.sign_bit_positive a;
UInt.shift_right_value_aux_3 #n a 1
end
else
begin
Int.sign_bit_negative a;
let a1 = Int.to_vec a in
let au = Int.to_uint a in
let sar = Int.shift_arithmetic_right #n a 1 in
let sar1 = Int.to_vec sar in
let sr = UInt.shift_right #n au 1 in
let sr1 = UInt.to_vec sr in
assert (Seq.equal (Seq.slice sar1 1 n) (Seq.slice sr1 1 n));
assert (Seq.equal sar1 (Seq.append (BitVector.ones_vec #1) (Seq.slice sr1 1 n)));
UInt.append_lemma #1 #(n-1) (BitVector.ones_vec #1) (Seq.slice sr1 1 n);
assert (Seq.equal (Seq.slice a1 0 (n-1)) (Seq.slice sar1 1 n));
UInt.slice_left_lemma a1 (n-1);
assert (sar + pow2 n = pow2 (n-1) + (au / 2));
pow2_double_sum (n-1);
assert (sar + pow2 (n-1) = (a + pow2 n) / 2);
pow2_double_mult (n-1);
lemma_div_plus a (pow2 (n-1)) 2;
assert (sar = a / 2)
end
val shift_right_value_aux_3: #n:pos -> a:Int.int_t n -> s:pos{s < n} ->
Lemma (ensures Int.shift_arithmetic_right #n a s = a / pow2 s)
(decreases s)
let rec shift_right_value_aux_3 #n a s =
if s = 1 then
shift_right_value_aux_2 #n a
else
begin
let a1 = Int.to_vec a in
assert (Seq.equal (BitVector.shift_arithmetic_right_vec #n a1 s)
(BitVector.shift_arithmetic_right_vec #n
(BitVector.shift_arithmetic_right_vec #n a1 (s-1)) 1));
assert (Int.shift_arithmetic_right #n a s =
Int.shift_arithmetic_right #n (Int.shift_arithmetic_right #n a (s-1)) 1);
shift_right_value_aux_3 #n a (s-1);
shift_right_value_aux_2 #n (Int.shift_arithmetic_right #n a (s-1));
assert (Int.shift_arithmetic_right #n a s = (a / pow2 (s-1)) / 2);
pow2_double_mult (s-1);
division_multiplication_lemma a (pow2 (s-1)) 2
end
let shift_right_lemma #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> ()
| S8 | S16 | S32 | S64 | S128 ->
if v b = 0 then ()
else if v b >= bits t then
shift_right_value_aux_1 #(bits t) (v a) (v b)
else
shift_right_value_aux_3 #(bits t) (v a) (v b)
[@(strict_on_arguments [0])]
let shift_left #t #l a b =
match t with
| U1 -> UInt8.shift_left a b
| U8 -> UInt8.shift_left a b
| U16 -> UInt16.shift_left a b
| U32 -> UInt32.shift_left a b
| U64 -> UInt64.shift_left a b
| U128 -> UInt128.shift_left a b
| S8 -> Int8.shift_left a b
| S16 -> Int16.shift_left a b
| S32 -> Int32.shift_left a b
| S64 -> Int64.shift_left a b
| S128 -> Int128.shift_left a b
#push-options "--max_fuel 1"
let shift_left_lemma #t #l a b = ()
let rotate_right #t #l a b =
logor (shift_right a b) (shift_left a (sub #U32 (size (bits t)) b))
let rotate_left #t #l a b =
logor (shift_left a b) (shift_right a (sub #U32 (size (bits t)) b))
[@(strict_on_arguments [0])]
let ct_abs #t #l a =
match t with
| S8 -> Int8.ct_abs a
| S16 -> Int16.ct_abs a
| S32 -> Int32.ct_abs a
| S64 -> Int64.ct_abs a
#pop-options
[@(strict_on_arguments [0])]
let eq_mask #t a b =
match t with
| U1 -> lognot (logxor a b)
| U8 -> UInt8.eq_mask a b
| U16 -> UInt16.eq_mask a b
| U32 -> UInt32.eq_mask a b
| U64 -> UInt64.eq_mask a b
| U128 -> UInt128.eq_mask a b
| S8 -> Int.Cast.uint8_to_int8 (UInt8.eq_mask (to_u8 a) (to_u8 b))
| S16 -> Int.Cast.uint16_to_int16 (UInt16.eq_mask (to_u16 a) (to_u16 b))
| S32 -> Int.Cast.uint32_to_int32 (UInt32.eq_mask (to_u32 a) (to_u32 b))
| S64 -> Int.Cast.uint64_to_int64 (UInt64.eq_mask (to_u64 a) (to_u64 b))
val eq_mask_lemma_unsigned: #t:inttype{unsigned t} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
let eq_mask_lemma_unsigned #t a b =
match t with
| U1 ->
assert_norm (
logxor (u1 0) (u1 0) == u1 0 /\ logxor (u1 0) (u1 1) == u1 1 /\
logxor (u1 1) (u1 0) == u1 1 /\ logxor (u1 1) (u1 1) == u1 0 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| U8 | U16 | U32 | U64 | U128 -> ()
#push-options "--z3rlimit 200"
val eq_mask_lemma_signed: #t:inttype{signed t /\ ~(S128? t)} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
let eq_mask_lemma_signed #t a b =
match t with
| S8 ->
begin
assert_norm (pow2 8 = 2 * pow2 7);
if 0 <= v a then modulo_lemma (v a) (pow2 8)
else
begin
modulo_addition_lemma (v a) 1 (pow2 8);
modulo_lemma (v a + pow2 8) (pow2 8)
end
end
| S16 ->
begin
assert_norm (pow2 16 = 2 * pow2 15);
if 0 <= v a then modulo_lemma (v a) (pow2 16)
else
begin
modulo_addition_lemma (v a) 1 (pow2 16);
modulo_lemma (v a + pow2 16) (pow2 16)
end
end
| S32 ->
begin
if 0 <= v a then modulo_lemma (v a) (pow2 32)
else
begin
modulo_addition_lemma (v a) 1 (pow2 32);
modulo_lemma (v a + pow2 32) (pow2 32)
end
end
| S64 ->
begin
if 0 <= v a then modulo_lemma (v a) (pow2 64)
else
begin
modulo_addition_lemma (v a) 1 (pow2 64);
modulo_lemma (v a + pow2 64) (pow2 64)
end
end
#pop-options
let eq_mask_lemma #t a b =
if signed t then eq_mask_lemma_signed a b
else eq_mask_lemma_unsigned a b
let eq_mask_logand_lemma #t a b c =
eq_mask_lemma a b;
logand_zeros c;
logand_ones c;
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v (eq_mask a b)) (v c)
| S8 | S16 | S32 | S64 -> Int.logand_commutative #(bits t) (v (eq_mask a b)) (v c)
[@(strict_on_arguments [0])]
let neq_mask #t a b = lognot (eq_mask #t a b)
let neq_mask_lemma #t a b =
match t with
| U1 -> assert_norm (lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| _ ->
UInt.lognot_lemma_1 #(bits t);
UInt.lognot_self #(bits t) 0
[@(strict_on_arguments [0])] | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val gte_mask: #t:inttype{unsigned t} -> int_t t SEC -> b:int_t t SEC -> int_t t SEC | [] | Lib.IntTypes.gte_mask | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t Lib.IntTypes.SEC -> b: Lib.IntTypes.int_t t Lib.IntTypes.SEC
-> Lib.IntTypes.int_t t Lib.IntTypes.SEC | {
"end_col": 32,
"end_line": 894,
"start_col": 2,
"start_line": 888
} |
FStar.Pervasives.Lemma | val eq_mask_lemma: #t:inttype{~(S128? t)} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
[SMTPat (eq_mask #t a b)] | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let eq_mask_lemma #t a b =
if signed t then eq_mask_lemma_signed a b
else eq_mask_lemma_unsigned a b | val eq_mask_lemma: #t:inttype{~(S128? t)} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
[SMTPat (eq_mask #t a b)]
let eq_mask_lemma #t a b = | false | null | true | if signed t then eq_mask_lemma_signed a b else eq_mask_lemma_unsigned a b | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Lib.IntTypes.inttype",
"Prims.l_not",
"Prims.b2t",
"Lib.IntTypes.uu___is_S128",
"Lib.IntTypes.int_t",
"Lib.IntTypes.SEC",
"Lib.IntTypes.signed",
"Lib.IntTypes.eq_mask_lemma_signed",
"Prims.bool",
"Lib.IntTypes.eq_mask_lemma_unsigned",
"Prims.unit"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a)
let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t))
let logor_lemma #t #l a b =
logor_zeros #t #l b;
logor_ones #t #l b;
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (v b)) (Int.logor #(bits t) (v b) (v a))
let logor_spec #t #l a b =
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1);
assert_norm (0 `logor_v #U1` 0 == 0 /\ 0 `logor_v #U1` 1 == 1);
assert_norm (1 `logor_v #U1` 0 == 1 /\ 1 `logor_v #U1` 1 == 1)
| _ -> ()
[@(strict_on_arguments [0])]
let lognot #t #l a =
match t with
| U1 -> UInt8.rem (UInt8.lognot a) 2uy
| U8 -> UInt8.lognot a
| U16 -> UInt16.lognot a
| U32 -> UInt32.lognot a
| U64 -> UInt64.lognot a
| U128 -> UInt128.lognot a
| S8 -> Int8.lognot a
| S16 -> Int16.lognot a
| S32 -> Int32.lognot a
| S64 -> Int64.lognot a
| S128 -> Int128.lognot a
let lognot_lemma #t #l a =
match t with
|U1 -> assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0)
| U8 | U16 | U32 | U64 | U128 ->
FStar.UInt.lognot_lemma_1 #(bits t);
UInt.nth_lemma (FStar.UInt.lognot #(bits t) (UInt.ones (bits t))) (UInt.zero (bits t))
| S8 | S16 | S32 | S64 | S128 ->
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.zero (bits t))) (Int.ones (bits t));
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.ones (bits t))) (Int.zero (bits t))
let lognot_spec #t #l a =
match t with
| U1 ->
assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0);
assert_norm(lognot_v #U1 0 == 1 /\ lognot_v #U1 1 == 0)
| _ -> ()
[@(strict_on_arguments [0])]
let shift_right #t #l a b =
match t with
| U1 -> UInt8.shift_right a b
| U8 -> UInt8.shift_right a b
| U16 -> UInt16.shift_right a b
| U32 -> UInt32.shift_right a b
| U64 -> UInt64.shift_right a b
| U128 -> UInt128.shift_right a b
| S8 -> Int8.shift_arithmetic_right a b
| S16 -> Int16.shift_arithmetic_right a b
| S32 -> Int32.shift_arithmetic_right a b
| S64 -> Int64.shift_arithmetic_right a b
| S128 -> Int128.shift_arithmetic_right a b
val shift_right_value_aux_1: #n:pos{1 < n} -> a:Int.int_t n -> s:nat{n <= s} ->
Lemma (Int.shift_arithmetic_right #n a s = a / pow2 s)
let shift_right_value_aux_1 #n a s =
pow2_le_compat s n;
if a >= 0 then Int.sign_bit_positive a else Int.sign_bit_negative a
#push-options "--z3rlimit 200"
val shift_right_value_aux_2: #n:pos{1 < n} -> a:Int.int_t n ->
Lemma (Int.shift_arithmetic_right #n a 1 = a / 2)
let shift_right_value_aux_2 #n a =
if a >= 0 then
begin
Int.sign_bit_positive a;
UInt.shift_right_value_aux_3 #n a 1
end
else
begin
Int.sign_bit_negative a;
let a1 = Int.to_vec a in
let au = Int.to_uint a in
let sar = Int.shift_arithmetic_right #n a 1 in
let sar1 = Int.to_vec sar in
let sr = UInt.shift_right #n au 1 in
let sr1 = UInt.to_vec sr in
assert (Seq.equal (Seq.slice sar1 1 n) (Seq.slice sr1 1 n));
assert (Seq.equal sar1 (Seq.append (BitVector.ones_vec #1) (Seq.slice sr1 1 n)));
UInt.append_lemma #1 #(n-1) (BitVector.ones_vec #1) (Seq.slice sr1 1 n);
assert (Seq.equal (Seq.slice a1 0 (n-1)) (Seq.slice sar1 1 n));
UInt.slice_left_lemma a1 (n-1);
assert (sar + pow2 n = pow2 (n-1) + (au / 2));
pow2_double_sum (n-1);
assert (sar + pow2 (n-1) = (a + pow2 n) / 2);
pow2_double_mult (n-1);
lemma_div_plus a (pow2 (n-1)) 2;
assert (sar = a / 2)
end
val shift_right_value_aux_3: #n:pos -> a:Int.int_t n -> s:pos{s < n} ->
Lemma (ensures Int.shift_arithmetic_right #n a s = a / pow2 s)
(decreases s)
let rec shift_right_value_aux_3 #n a s =
if s = 1 then
shift_right_value_aux_2 #n a
else
begin
let a1 = Int.to_vec a in
assert (Seq.equal (BitVector.shift_arithmetic_right_vec #n a1 s)
(BitVector.shift_arithmetic_right_vec #n
(BitVector.shift_arithmetic_right_vec #n a1 (s-1)) 1));
assert (Int.shift_arithmetic_right #n a s =
Int.shift_arithmetic_right #n (Int.shift_arithmetic_right #n a (s-1)) 1);
shift_right_value_aux_3 #n a (s-1);
shift_right_value_aux_2 #n (Int.shift_arithmetic_right #n a (s-1));
assert (Int.shift_arithmetic_right #n a s = (a / pow2 (s-1)) / 2);
pow2_double_mult (s-1);
division_multiplication_lemma a (pow2 (s-1)) 2
end
let shift_right_lemma #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> ()
| S8 | S16 | S32 | S64 | S128 ->
if v b = 0 then ()
else if v b >= bits t then
shift_right_value_aux_1 #(bits t) (v a) (v b)
else
shift_right_value_aux_3 #(bits t) (v a) (v b)
[@(strict_on_arguments [0])]
let shift_left #t #l a b =
match t with
| U1 -> UInt8.shift_left a b
| U8 -> UInt8.shift_left a b
| U16 -> UInt16.shift_left a b
| U32 -> UInt32.shift_left a b
| U64 -> UInt64.shift_left a b
| U128 -> UInt128.shift_left a b
| S8 -> Int8.shift_left a b
| S16 -> Int16.shift_left a b
| S32 -> Int32.shift_left a b
| S64 -> Int64.shift_left a b
| S128 -> Int128.shift_left a b
#push-options "--max_fuel 1"
let shift_left_lemma #t #l a b = ()
let rotate_right #t #l a b =
logor (shift_right a b) (shift_left a (sub #U32 (size (bits t)) b))
let rotate_left #t #l a b =
logor (shift_left a b) (shift_right a (sub #U32 (size (bits t)) b))
[@(strict_on_arguments [0])]
let ct_abs #t #l a =
match t with
| S8 -> Int8.ct_abs a
| S16 -> Int16.ct_abs a
| S32 -> Int32.ct_abs a
| S64 -> Int64.ct_abs a
#pop-options
[@(strict_on_arguments [0])]
let eq_mask #t a b =
match t with
| U1 -> lognot (logxor a b)
| U8 -> UInt8.eq_mask a b
| U16 -> UInt16.eq_mask a b
| U32 -> UInt32.eq_mask a b
| U64 -> UInt64.eq_mask a b
| U128 -> UInt128.eq_mask a b
| S8 -> Int.Cast.uint8_to_int8 (UInt8.eq_mask (to_u8 a) (to_u8 b))
| S16 -> Int.Cast.uint16_to_int16 (UInt16.eq_mask (to_u16 a) (to_u16 b))
| S32 -> Int.Cast.uint32_to_int32 (UInt32.eq_mask (to_u32 a) (to_u32 b))
| S64 -> Int.Cast.uint64_to_int64 (UInt64.eq_mask (to_u64 a) (to_u64 b))
val eq_mask_lemma_unsigned: #t:inttype{unsigned t} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
let eq_mask_lemma_unsigned #t a b =
match t with
| U1 ->
assert_norm (
logxor (u1 0) (u1 0) == u1 0 /\ logxor (u1 0) (u1 1) == u1 1 /\
logxor (u1 1) (u1 0) == u1 1 /\ logxor (u1 1) (u1 1) == u1 0 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| U8 | U16 | U32 | U64 | U128 -> ()
#push-options "--z3rlimit 200"
val eq_mask_lemma_signed: #t:inttype{signed t /\ ~(S128? t)} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
let eq_mask_lemma_signed #t a b =
match t with
| S8 ->
begin
assert_norm (pow2 8 = 2 * pow2 7);
if 0 <= v a then modulo_lemma (v a) (pow2 8)
else
begin
modulo_addition_lemma (v a) 1 (pow2 8);
modulo_lemma (v a + pow2 8) (pow2 8)
end
end
| S16 ->
begin
assert_norm (pow2 16 = 2 * pow2 15);
if 0 <= v a then modulo_lemma (v a) (pow2 16)
else
begin
modulo_addition_lemma (v a) 1 (pow2 16);
modulo_lemma (v a + pow2 16) (pow2 16)
end
end
| S32 ->
begin
if 0 <= v a then modulo_lemma (v a) (pow2 32)
else
begin
modulo_addition_lemma (v a) 1 (pow2 32);
modulo_lemma (v a + pow2 32) (pow2 32)
end
end
| S64 ->
begin
if 0 <= v a then modulo_lemma (v a) (pow2 64)
else
begin
modulo_addition_lemma (v a) 1 (pow2 64);
modulo_lemma (v a + pow2 64) (pow2 64)
end
end
#pop-options | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val eq_mask_lemma: #t:inttype{~(S128? t)} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
[SMTPat (eq_mask #t a b)] | [] | Lib.IntTypes.eq_mask_lemma | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t Lib.IntTypes.SEC -> b: Lib.IntTypes.int_t t Lib.IntTypes.SEC
-> FStar.Pervasives.Lemma
(ensures
((match Lib.IntTypes.v a = Lib.IntTypes.v b with
| true -> Lib.IntTypes.v (Lib.IntTypes.eq_mask a b) == Lib.IntTypes.ones_v t
| _ -> Lib.IntTypes.v (Lib.IntTypes.eq_mask a b) == 0)
<:
Type0)) [SMTPat (Lib.IntTypes.eq_mask a b)] | {
"end_col": 33,
"end_line": 866,
"start_col": 2,
"start_line": 865
} |
Prims.Tot | val zeros: t:inttype -> l:secrecy_level -> n:int_t t l{v n = 0} | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let zeros t l = mk_int 0 | val zeros: t:inttype -> l:secrecy_level -> n:int_t t l{v n = 0}
let zeros t l = | false | null | false | mk_int 0 | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.mk_int",
"Lib.IntTypes.int_t",
"Prims.b2t",
"Prims.op_Equality",
"Prims.int",
"Lib.IntTypes.v"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1) | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val zeros: t:inttype -> l:secrecy_level -> n:int_t t l{v n = 0} | [] | Lib.IntTypes.zeros | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | t: Lib.IntTypes.inttype -> l: Lib.IntTypes.secrecy_level
-> n: Lib.IntTypes.int_t t l {Lib.IntTypes.v n = 0} | {
"end_col": 24,
"end_line": 280,
"start_col": 16,
"start_line": 280
} |
Prims.Tot | val eq: #t:inttype -> int_t t PUB -> int_t t PUB -> bool | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let eq #t x y =
x = y | val eq: #t:inttype -> int_t t PUB -> int_t t PUB -> bool
let eq #t x y = | false | null | false | x = y | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.int_t",
"Lib.IntTypes.PUB",
"Prims.op_Equality",
"Prims.bool"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a)
let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t))
let logor_lemma #t #l a b =
logor_zeros #t #l b;
logor_ones #t #l b;
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (v b)) (Int.logor #(bits t) (v b) (v a))
let logor_spec #t #l a b =
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1);
assert_norm (0 `logor_v #U1` 0 == 0 /\ 0 `logor_v #U1` 1 == 1);
assert_norm (1 `logor_v #U1` 0 == 1 /\ 1 `logor_v #U1` 1 == 1)
| _ -> ()
[@(strict_on_arguments [0])]
let lognot #t #l a =
match t with
| U1 -> UInt8.rem (UInt8.lognot a) 2uy
| U8 -> UInt8.lognot a
| U16 -> UInt16.lognot a
| U32 -> UInt32.lognot a
| U64 -> UInt64.lognot a
| U128 -> UInt128.lognot a
| S8 -> Int8.lognot a
| S16 -> Int16.lognot a
| S32 -> Int32.lognot a
| S64 -> Int64.lognot a
| S128 -> Int128.lognot a
let lognot_lemma #t #l a =
match t with
|U1 -> assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0)
| U8 | U16 | U32 | U64 | U128 ->
FStar.UInt.lognot_lemma_1 #(bits t);
UInt.nth_lemma (FStar.UInt.lognot #(bits t) (UInt.ones (bits t))) (UInt.zero (bits t))
| S8 | S16 | S32 | S64 | S128 ->
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.zero (bits t))) (Int.ones (bits t));
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.ones (bits t))) (Int.zero (bits t))
let lognot_spec #t #l a =
match t with
| U1 ->
assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0);
assert_norm(lognot_v #U1 0 == 1 /\ lognot_v #U1 1 == 0)
| _ -> ()
[@(strict_on_arguments [0])]
let shift_right #t #l a b =
match t with
| U1 -> UInt8.shift_right a b
| U8 -> UInt8.shift_right a b
| U16 -> UInt16.shift_right a b
| U32 -> UInt32.shift_right a b
| U64 -> UInt64.shift_right a b
| U128 -> UInt128.shift_right a b
| S8 -> Int8.shift_arithmetic_right a b
| S16 -> Int16.shift_arithmetic_right a b
| S32 -> Int32.shift_arithmetic_right a b
| S64 -> Int64.shift_arithmetic_right a b
| S128 -> Int128.shift_arithmetic_right a b
val shift_right_value_aux_1: #n:pos{1 < n} -> a:Int.int_t n -> s:nat{n <= s} ->
Lemma (Int.shift_arithmetic_right #n a s = a / pow2 s)
let shift_right_value_aux_1 #n a s =
pow2_le_compat s n;
if a >= 0 then Int.sign_bit_positive a else Int.sign_bit_negative a
#push-options "--z3rlimit 200"
val shift_right_value_aux_2: #n:pos{1 < n} -> a:Int.int_t n ->
Lemma (Int.shift_arithmetic_right #n a 1 = a / 2)
let shift_right_value_aux_2 #n a =
if a >= 0 then
begin
Int.sign_bit_positive a;
UInt.shift_right_value_aux_3 #n a 1
end
else
begin
Int.sign_bit_negative a;
let a1 = Int.to_vec a in
let au = Int.to_uint a in
let sar = Int.shift_arithmetic_right #n a 1 in
let sar1 = Int.to_vec sar in
let sr = UInt.shift_right #n au 1 in
let sr1 = UInt.to_vec sr in
assert (Seq.equal (Seq.slice sar1 1 n) (Seq.slice sr1 1 n));
assert (Seq.equal sar1 (Seq.append (BitVector.ones_vec #1) (Seq.slice sr1 1 n)));
UInt.append_lemma #1 #(n-1) (BitVector.ones_vec #1) (Seq.slice sr1 1 n);
assert (Seq.equal (Seq.slice a1 0 (n-1)) (Seq.slice sar1 1 n));
UInt.slice_left_lemma a1 (n-1);
assert (sar + pow2 n = pow2 (n-1) + (au / 2));
pow2_double_sum (n-1);
assert (sar + pow2 (n-1) = (a + pow2 n) / 2);
pow2_double_mult (n-1);
lemma_div_plus a (pow2 (n-1)) 2;
assert (sar = a / 2)
end
val shift_right_value_aux_3: #n:pos -> a:Int.int_t n -> s:pos{s < n} ->
Lemma (ensures Int.shift_arithmetic_right #n a s = a / pow2 s)
(decreases s)
let rec shift_right_value_aux_3 #n a s =
if s = 1 then
shift_right_value_aux_2 #n a
else
begin
let a1 = Int.to_vec a in
assert (Seq.equal (BitVector.shift_arithmetic_right_vec #n a1 s)
(BitVector.shift_arithmetic_right_vec #n
(BitVector.shift_arithmetic_right_vec #n a1 (s-1)) 1));
assert (Int.shift_arithmetic_right #n a s =
Int.shift_arithmetic_right #n (Int.shift_arithmetic_right #n a (s-1)) 1);
shift_right_value_aux_3 #n a (s-1);
shift_right_value_aux_2 #n (Int.shift_arithmetic_right #n a (s-1));
assert (Int.shift_arithmetic_right #n a s = (a / pow2 (s-1)) / 2);
pow2_double_mult (s-1);
division_multiplication_lemma a (pow2 (s-1)) 2
end
let shift_right_lemma #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> ()
| S8 | S16 | S32 | S64 | S128 ->
if v b = 0 then ()
else if v b >= bits t then
shift_right_value_aux_1 #(bits t) (v a) (v b)
else
shift_right_value_aux_3 #(bits t) (v a) (v b)
[@(strict_on_arguments [0])]
let shift_left #t #l a b =
match t with
| U1 -> UInt8.shift_left a b
| U8 -> UInt8.shift_left a b
| U16 -> UInt16.shift_left a b
| U32 -> UInt32.shift_left a b
| U64 -> UInt64.shift_left a b
| U128 -> UInt128.shift_left a b
| S8 -> Int8.shift_left a b
| S16 -> Int16.shift_left a b
| S32 -> Int32.shift_left a b
| S64 -> Int64.shift_left a b
| S128 -> Int128.shift_left a b
#push-options "--max_fuel 1"
let shift_left_lemma #t #l a b = ()
let rotate_right #t #l a b =
logor (shift_right a b) (shift_left a (sub #U32 (size (bits t)) b))
let rotate_left #t #l a b =
logor (shift_left a b) (shift_right a (sub #U32 (size (bits t)) b))
[@(strict_on_arguments [0])]
let ct_abs #t #l a =
match t with
| S8 -> Int8.ct_abs a
| S16 -> Int16.ct_abs a
| S32 -> Int32.ct_abs a
| S64 -> Int64.ct_abs a
#pop-options
[@(strict_on_arguments [0])]
let eq_mask #t a b =
match t with
| U1 -> lognot (logxor a b)
| U8 -> UInt8.eq_mask a b
| U16 -> UInt16.eq_mask a b
| U32 -> UInt32.eq_mask a b
| U64 -> UInt64.eq_mask a b
| U128 -> UInt128.eq_mask a b
| S8 -> Int.Cast.uint8_to_int8 (UInt8.eq_mask (to_u8 a) (to_u8 b))
| S16 -> Int.Cast.uint16_to_int16 (UInt16.eq_mask (to_u16 a) (to_u16 b))
| S32 -> Int.Cast.uint32_to_int32 (UInt32.eq_mask (to_u32 a) (to_u32 b))
| S64 -> Int.Cast.uint64_to_int64 (UInt64.eq_mask (to_u64 a) (to_u64 b))
val eq_mask_lemma_unsigned: #t:inttype{unsigned t} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
let eq_mask_lemma_unsigned #t a b =
match t with
| U1 ->
assert_norm (
logxor (u1 0) (u1 0) == u1 0 /\ logxor (u1 0) (u1 1) == u1 1 /\
logxor (u1 1) (u1 0) == u1 1 /\ logxor (u1 1) (u1 1) == u1 0 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| U8 | U16 | U32 | U64 | U128 -> ()
#push-options "--z3rlimit 200"
val eq_mask_lemma_signed: #t:inttype{signed t /\ ~(S128? t)} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
let eq_mask_lemma_signed #t a b =
match t with
| S8 ->
begin
assert_norm (pow2 8 = 2 * pow2 7);
if 0 <= v a then modulo_lemma (v a) (pow2 8)
else
begin
modulo_addition_lemma (v a) 1 (pow2 8);
modulo_lemma (v a + pow2 8) (pow2 8)
end
end
| S16 ->
begin
assert_norm (pow2 16 = 2 * pow2 15);
if 0 <= v a then modulo_lemma (v a) (pow2 16)
else
begin
modulo_addition_lemma (v a) 1 (pow2 16);
modulo_lemma (v a + pow2 16) (pow2 16)
end
end
| S32 ->
begin
if 0 <= v a then modulo_lemma (v a) (pow2 32)
else
begin
modulo_addition_lemma (v a) 1 (pow2 32);
modulo_lemma (v a + pow2 32) (pow2 32)
end
end
| S64 ->
begin
if 0 <= v a then modulo_lemma (v a) (pow2 64)
else
begin
modulo_addition_lemma (v a) 1 (pow2 64);
modulo_lemma (v a + pow2 64) (pow2 64)
end
end
#pop-options
let eq_mask_lemma #t a b =
if signed t then eq_mask_lemma_signed a b
else eq_mask_lemma_unsigned a b
let eq_mask_logand_lemma #t a b c =
eq_mask_lemma a b;
logand_zeros c;
logand_ones c;
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v (eq_mask a b)) (v c)
| S8 | S16 | S32 | S64 -> Int.logand_commutative #(bits t) (v (eq_mask a b)) (v c)
[@(strict_on_arguments [0])]
let neq_mask #t a b = lognot (eq_mask #t a b)
let neq_mask_lemma #t a b =
match t with
| U1 -> assert_norm (lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| _ ->
UInt.lognot_lemma_1 #(bits t);
UInt.lognot_self #(bits t) 0
[@(strict_on_arguments [0])]
let gte_mask #t a b =
match t with
| U1 -> logor a (lognot b)
| U8 -> UInt8.gte_mask a b
| U16 -> UInt16.gte_mask a b
| U32 -> UInt32.gte_mask a b
| U64 -> UInt64.gte_mask a b
| U128 -> UInt128.gte_mask a b
let gte_mask_lemma #t a b =
match t with
| U1 ->
begin
assert_norm (
logor (u1 0) (u1 0) == u1 0 /\ logor (u1 1) (u1 1) == u1 1 /\
logor (u1 0) (u1 1) == u1 1 /\ logor (u1 1) (u1 0) == u1 1 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
end
| _ -> ()
let gte_mask_logand_lemma #t a b c =
logand_zeros c;
logand_ones c;
match t with
| U1 ->
assert_norm (
logor (u1 0) (u1 0) == u1 0 /\ logor (u1 1) (u1 1) == u1 1 /\
logor (u1 0) (u1 1) == u1 1 /\ logor (u1 1) (u1 0) == u1 1 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| _ -> UInt.logand_commutative #(bits t) (v (gte_mask a b)) (v c)
let lt_mask #t a b = lognot (gte_mask a b)
let lt_mask_lemma #t a b =
assert_norm (lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1);
UInt.lognot_lemma_1 #(bits t);
UInt.lognot_self #(bits t) 0
let gt_mask #t a b = logand (gte_mask a b) (neq_mask a b)
let gt_mask_lemma #t a b =
logand_zeros (gte_mask a b);
logand_ones (gte_mask a b)
let lte_mask #t a b = logor (lt_mask a b) (eq_mask a b)
let lte_mask_lemma #t a b =
match t with
| U1 ->
assert_norm (
logor (u1 0) (u1 0) == u1 0 /\ logor (u1 1) (u1 1) == u1 1 /\
logor (u1 0) (u1 1) == u1 1 /\ logor (u1 1) (u1 0) == u1 1)
| U8 | U16 | U32 | U64 | U128 ->
if v a > v b then
UInt.logor_lemma_1 #(bits t) (v (lt_mask a b))
else if v a = v b then
UInt.logor_lemma_2 #(bits t) (v (lt_mask a b))
else
UInt.logor_lemma_1 #(bits t) (v (lt_mask a b))
#push-options "--max_fuel 1"
val mod_mask_value: #t:inttype -> #l:secrecy_level -> m:shiftval t{pow2 (uint_v m) <= maxint t} ->
Lemma (v (mod_mask #t #l m) == pow2 (v m) - 1)
let mod_mask_value #t #l m =
shift_left_lemma (mk_int #t #l 1) m;
pow2_double_mult (bits t - 1);
pow2_lt_compat (bits t) (v m);
small_modulo_lemma_1 (pow2 (v m)) (pow2 (bits t));
small_modulo_lemma_1 (pow2 (v m) - 1) (pow2 (bits t))
let mod_mask_lemma #t #l a m =
mod_mask_value #t #l m;
if unsigned t || 0 <= v a then
if v m = 0 then
UInt.logand_lemma_1 #(bits t) (v a)
else
UInt.logand_mask #(bits t) (v a) (v m)
else
begin
let a1 = v a in
let a2 = v a + pow2 (bits t) in
pow2_plus (bits t - v m) (v m);
pow2_le_compat (bits t - 1) (v m);
lemma_mod_plus a1 (pow2 (bits t - v m)) (pow2 (v m));
if v m = 0 then
UInt.logand_lemma_1 #(bits t) a2
else
UInt.logand_mask #(bits t) a2 (v m)
end
#pop-options
#push-options "--max_fuel 0 --max_ifuel 0 --z3rlimit 1000"
(**
Conditionally subtracts 2^(bits t') from a in constant-time,
so that the result fits in t'; i.e.
b = if a >= 2^(bits t' - 1) then a - 2^(bits t') else a
*)
inline_for_extraction
val conditional_subtract:
#t:inttype{signed t}
-> #l:secrecy_level
-> t':inttype{signed t' /\ bits t' < bits t}
-> a:int_t t l{0 <= v a /\ v a <= pow2 (bits t') - 1}
-> b:int_t t l{v b = v a @%. t'}
let conditional_subtract #t #l t' a =
assert_norm (pow2 7 = 128);
assert_norm (pow2 15 = 32768);
let pow2_bits = shift_left #t #l (mk_int 1) (size (bits t')) in
shift_left_lemma #t #l (mk_int 1) (size (bits t'));
let pow2_bits_minus_one = shift_left #t #l (mk_int 1) (size (bits t' - 1)) in
shift_left_lemma #t #l (mk_int 1) (size (bits t' - 1));
// assert (v pow2_bits == pow2 (bits t'));
// assert (v pow2_bits_minus_one == pow2 (bits t' - 1));
let a2 = a `sub` pow2_bits_minus_one in
let mask = shift_right a2 (size (bits t - 1)) in
shift_right_lemma a2 (size (bits t - 1));
// assert (if v a2 < 0 then v mask = -1 else v mask = 0);
let a3 = a `sub` pow2_bits in
logand_lemma mask pow2_bits;
a3 `add` (mask `logand` pow2_bits)
let cast_mod #t #l t' l' a =
assert_norm (pow2 7 = 128);
assert_norm (pow2 15 = 32768);
if bits t' >= bits t then
cast t' l' a
else
begin
let m = size (bits t') in
mod_mask_lemma a m;
let b = conditional_subtract t' (a `logand` mod_mask m) in
cast t' l' b
end
#pop-options
[@(strict_on_arguments [0])]
let div #t x y =
match t with
| U1 -> UInt8.div x y
| U8 -> UInt8.div x y
| U16 -> UInt16.div x y
| U32 -> UInt32.div x y
| U64 -> UInt64.div x y
| S8 -> Int.pow2_values 8; Int8.div x y
| S16 -> Int.pow2_values 16; Int16.div x y
| S32 -> Int.pow2_values 32; Int32.div x y
| S64 -> Int.pow2_values 64; Int64.div x y
let div_lemma #t a b =
match t with
| U1 | U8 | U16 | U32 | U64 -> ()
| S8 -> Int.pow2_values 8
| S16 -> Int.pow2_values 16
| S32 -> Int.pow2_values 32
| S64 -> Int.pow2_values 64
let mod #t x y =
match t with
| U1 -> UInt8.rem x y
| U8 -> UInt8.rem x y
| U16 -> UInt16.rem x y
| U32 -> UInt32.rem x y
| U64 -> UInt64.rem x y
| S8 -> Int.pow2_values 8; Int8.rem x y
| S16 -> Int.pow2_values 16; Int16.rem x y
| S32 -> Int.pow2_values 32; Int32.rem x y
| S64 -> Int.pow2_values 64; Int64.rem x y
let mod_lemma #t a b =
match t with
| U1 | U8 | U16 | U32 | U64 -> ()
| S8 -> Int.pow2_values 8
| S16 -> Int.pow2_values 16
| S32 -> Int.pow2_values 32
| S64 -> Int.pow2_values 64 | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val eq: #t:inttype -> int_t t PUB -> int_t t PUB -> bool | [] | Lib.IntTypes.eq | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | x: Lib.IntTypes.int_t t Lib.IntTypes.PUB -> y: Lib.IntTypes.int_t t Lib.IntTypes.PUB -> Prims.bool | {
"end_col": 7,
"end_line": 1068,
"start_col": 2,
"start_line": 1068
} |
Prims.Tot | val rotate_left: #t:inttype -> #l:secrecy_level
-> a:int_t t l{unsigned t}
-> rotval t
-> int_t t l | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rotate_left #t #l a b =
logor (shift_left a b) (shift_right a (sub #U32 (size (bits t)) b)) | val rotate_left: #t:inttype -> #l:secrecy_level
-> a:int_t t l{unsigned t}
-> rotval t
-> int_t t l
let rotate_left #t #l a b = | false | null | false | logor (shift_left a b) (shift_right a (sub #U32 (size (bits t)) b)) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"Prims.b2t",
"Lib.IntTypes.unsigned",
"Lib.IntTypes.rotval",
"Lib.IntTypes.logor",
"Lib.IntTypes.shift_left",
"Lib.IntTypes.shift_right",
"Lib.IntTypes.sub",
"Lib.IntTypes.U32",
"Lib.IntTypes.PUB",
"Lib.IntTypes.size",
"Lib.IntTypes.bits"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a)
let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t))
let logor_lemma #t #l a b =
logor_zeros #t #l b;
logor_ones #t #l b;
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (v b)) (Int.logor #(bits t) (v b) (v a))
let logor_spec #t #l a b =
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1);
assert_norm (0 `logor_v #U1` 0 == 0 /\ 0 `logor_v #U1` 1 == 1);
assert_norm (1 `logor_v #U1` 0 == 1 /\ 1 `logor_v #U1` 1 == 1)
| _ -> ()
[@(strict_on_arguments [0])]
let lognot #t #l a =
match t with
| U1 -> UInt8.rem (UInt8.lognot a) 2uy
| U8 -> UInt8.lognot a
| U16 -> UInt16.lognot a
| U32 -> UInt32.lognot a
| U64 -> UInt64.lognot a
| U128 -> UInt128.lognot a
| S8 -> Int8.lognot a
| S16 -> Int16.lognot a
| S32 -> Int32.lognot a
| S64 -> Int64.lognot a
| S128 -> Int128.lognot a
let lognot_lemma #t #l a =
match t with
|U1 -> assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0)
| U8 | U16 | U32 | U64 | U128 ->
FStar.UInt.lognot_lemma_1 #(bits t);
UInt.nth_lemma (FStar.UInt.lognot #(bits t) (UInt.ones (bits t))) (UInt.zero (bits t))
| S8 | S16 | S32 | S64 | S128 ->
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.zero (bits t))) (Int.ones (bits t));
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.ones (bits t))) (Int.zero (bits t))
let lognot_spec #t #l a =
match t with
| U1 ->
assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0);
assert_norm(lognot_v #U1 0 == 1 /\ lognot_v #U1 1 == 0)
| _ -> ()
[@(strict_on_arguments [0])]
let shift_right #t #l a b =
match t with
| U1 -> UInt8.shift_right a b
| U8 -> UInt8.shift_right a b
| U16 -> UInt16.shift_right a b
| U32 -> UInt32.shift_right a b
| U64 -> UInt64.shift_right a b
| U128 -> UInt128.shift_right a b
| S8 -> Int8.shift_arithmetic_right a b
| S16 -> Int16.shift_arithmetic_right a b
| S32 -> Int32.shift_arithmetic_right a b
| S64 -> Int64.shift_arithmetic_right a b
| S128 -> Int128.shift_arithmetic_right a b
val shift_right_value_aux_1: #n:pos{1 < n} -> a:Int.int_t n -> s:nat{n <= s} ->
Lemma (Int.shift_arithmetic_right #n a s = a / pow2 s)
let shift_right_value_aux_1 #n a s =
pow2_le_compat s n;
if a >= 0 then Int.sign_bit_positive a else Int.sign_bit_negative a
#push-options "--z3rlimit 200"
val shift_right_value_aux_2: #n:pos{1 < n} -> a:Int.int_t n ->
Lemma (Int.shift_arithmetic_right #n a 1 = a / 2)
let shift_right_value_aux_2 #n a =
if a >= 0 then
begin
Int.sign_bit_positive a;
UInt.shift_right_value_aux_3 #n a 1
end
else
begin
Int.sign_bit_negative a;
let a1 = Int.to_vec a in
let au = Int.to_uint a in
let sar = Int.shift_arithmetic_right #n a 1 in
let sar1 = Int.to_vec sar in
let sr = UInt.shift_right #n au 1 in
let sr1 = UInt.to_vec sr in
assert (Seq.equal (Seq.slice sar1 1 n) (Seq.slice sr1 1 n));
assert (Seq.equal sar1 (Seq.append (BitVector.ones_vec #1) (Seq.slice sr1 1 n)));
UInt.append_lemma #1 #(n-1) (BitVector.ones_vec #1) (Seq.slice sr1 1 n);
assert (Seq.equal (Seq.slice a1 0 (n-1)) (Seq.slice sar1 1 n));
UInt.slice_left_lemma a1 (n-1);
assert (sar + pow2 n = pow2 (n-1) + (au / 2));
pow2_double_sum (n-1);
assert (sar + pow2 (n-1) = (a + pow2 n) / 2);
pow2_double_mult (n-1);
lemma_div_plus a (pow2 (n-1)) 2;
assert (sar = a / 2)
end
val shift_right_value_aux_3: #n:pos -> a:Int.int_t n -> s:pos{s < n} ->
Lemma (ensures Int.shift_arithmetic_right #n a s = a / pow2 s)
(decreases s)
let rec shift_right_value_aux_3 #n a s =
if s = 1 then
shift_right_value_aux_2 #n a
else
begin
let a1 = Int.to_vec a in
assert (Seq.equal (BitVector.shift_arithmetic_right_vec #n a1 s)
(BitVector.shift_arithmetic_right_vec #n
(BitVector.shift_arithmetic_right_vec #n a1 (s-1)) 1));
assert (Int.shift_arithmetic_right #n a s =
Int.shift_arithmetic_right #n (Int.shift_arithmetic_right #n a (s-1)) 1);
shift_right_value_aux_3 #n a (s-1);
shift_right_value_aux_2 #n (Int.shift_arithmetic_right #n a (s-1));
assert (Int.shift_arithmetic_right #n a s = (a / pow2 (s-1)) / 2);
pow2_double_mult (s-1);
division_multiplication_lemma a (pow2 (s-1)) 2
end
let shift_right_lemma #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> ()
| S8 | S16 | S32 | S64 | S128 ->
if v b = 0 then ()
else if v b >= bits t then
shift_right_value_aux_1 #(bits t) (v a) (v b)
else
shift_right_value_aux_3 #(bits t) (v a) (v b)
[@(strict_on_arguments [0])]
let shift_left #t #l a b =
match t with
| U1 -> UInt8.shift_left a b
| U8 -> UInt8.shift_left a b
| U16 -> UInt16.shift_left a b
| U32 -> UInt32.shift_left a b
| U64 -> UInt64.shift_left a b
| U128 -> UInt128.shift_left a b
| S8 -> Int8.shift_left a b
| S16 -> Int16.shift_left a b
| S32 -> Int32.shift_left a b
| S64 -> Int64.shift_left a b
| S128 -> Int128.shift_left a b
#push-options "--max_fuel 1"
let shift_left_lemma #t #l a b = ()
let rotate_right #t #l a b =
logor (shift_right a b) (shift_left a (sub #U32 (size (bits t)) b)) | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val rotate_left: #t:inttype -> #l:secrecy_level
-> a:int_t t l{unsigned t}
-> rotval t
-> int_t t l | [] | Lib.IntTypes.rotate_left | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t l {Lib.IntTypes.unsigned t} -> b: Lib.IntTypes.rotval t
-> Lib.IntTypes.int_t t l | {
"end_col": 69,
"end_line": 778,
"start_col": 2,
"start_line": 778
} |
Prims.Tot | val gt: #t:inttype -> int_t t PUB -> int_t t PUB -> bool | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let gt #t x y =
match t with
| U1 -> UInt8.gt x y
| U8 -> UInt8.gt x y
| U16 -> UInt16.gt x y
| U32 -> UInt32.gt x y
| U64 -> UInt64.gt x y
| U128 -> UInt128.gt x y
| S8 -> Int8.gt x y
| S16 -> Int16.gt x y
| S32 -> Int32.gt x y
| S64 -> Int64.gt x y
| S128 -> Int128.gt x y | val gt: #t:inttype -> int_t t PUB -> int_t t PUB -> bool
let gt #t x y = | false | null | false | match t with
| U1 -> UInt8.gt x y
| U8 -> UInt8.gt x y
| U16 -> UInt16.gt x y
| U32 -> UInt32.gt x y
| U64 -> UInt64.gt x y
| U128 -> UInt128.gt x y
| S8 -> Int8.gt x y
| S16 -> Int16.gt x y
| S32 -> Int32.gt x y
| S64 -> Int64.gt x y
| S128 -> Int128.gt x y | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.int_t",
"Lib.IntTypes.PUB",
"FStar.UInt8.gt",
"FStar.UInt16.gt",
"FStar.UInt32.gt",
"FStar.UInt64.gt",
"FStar.UInt128.gt",
"FStar.Int8.gt",
"FStar.Int16.gt",
"FStar.Int32.gt",
"FStar.Int64.gt",
"FStar.Int128.gt",
"Prims.bool"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a)
let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t))
let logor_lemma #t #l a b =
logor_zeros #t #l b;
logor_ones #t #l b;
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (v b)) (Int.logor #(bits t) (v b) (v a))
let logor_spec #t #l a b =
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1);
assert_norm (0 `logor_v #U1` 0 == 0 /\ 0 `logor_v #U1` 1 == 1);
assert_norm (1 `logor_v #U1` 0 == 1 /\ 1 `logor_v #U1` 1 == 1)
| _ -> ()
[@(strict_on_arguments [0])]
let lognot #t #l a =
match t with
| U1 -> UInt8.rem (UInt8.lognot a) 2uy
| U8 -> UInt8.lognot a
| U16 -> UInt16.lognot a
| U32 -> UInt32.lognot a
| U64 -> UInt64.lognot a
| U128 -> UInt128.lognot a
| S8 -> Int8.lognot a
| S16 -> Int16.lognot a
| S32 -> Int32.lognot a
| S64 -> Int64.lognot a
| S128 -> Int128.lognot a
let lognot_lemma #t #l a =
match t with
|U1 -> assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0)
| U8 | U16 | U32 | U64 | U128 ->
FStar.UInt.lognot_lemma_1 #(bits t);
UInt.nth_lemma (FStar.UInt.lognot #(bits t) (UInt.ones (bits t))) (UInt.zero (bits t))
| S8 | S16 | S32 | S64 | S128 ->
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.zero (bits t))) (Int.ones (bits t));
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.ones (bits t))) (Int.zero (bits t))
let lognot_spec #t #l a =
match t with
| U1 ->
assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0);
assert_norm(lognot_v #U1 0 == 1 /\ lognot_v #U1 1 == 0)
| _ -> ()
[@(strict_on_arguments [0])]
let shift_right #t #l a b =
match t with
| U1 -> UInt8.shift_right a b
| U8 -> UInt8.shift_right a b
| U16 -> UInt16.shift_right a b
| U32 -> UInt32.shift_right a b
| U64 -> UInt64.shift_right a b
| U128 -> UInt128.shift_right a b
| S8 -> Int8.shift_arithmetic_right a b
| S16 -> Int16.shift_arithmetic_right a b
| S32 -> Int32.shift_arithmetic_right a b
| S64 -> Int64.shift_arithmetic_right a b
| S128 -> Int128.shift_arithmetic_right a b
val shift_right_value_aux_1: #n:pos{1 < n} -> a:Int.int_t n -> s:nat{n <= s} ->
Lemma (Int.shift_arithmetic_right #n a s = a / pow2 s)
let shift_right_value_aux_1 #n a s =
pow2_le_compat s n;
if a >= 0 then Int.sign_bit_positive a else Int.sign_bit_negative a
#push-options "--z3rlimit 200"
val shift_right_value_aux_2: #n:pos{1 < n} -> a:Int.int_t n ->
Lemma (Int.shift_arithmetic_right #n a 1 = a / 2)
let shift_right_value_aux_2 #n a =
if a >= 0 then
begin
Int.sign_bit_positive a;
UInt.shift_right_value_aux_3 #n a 1
end
else
begin
Int.sign_bit_negative a;
let a1 = Int.to_vec a in
let au = Int.to_uint a in
let sar = Int.shift_arithmetic_right #n a 1 in
let sar1 = Int.to_vec sar in
let sr = UInt.shift_right #n au 1 in
let sr1 = UInt.to_vec sr in
assert (Seq.equal (Seq.slice sar1 1 n) (Seq.slice sr1 1 n));
assert (Seq.equal sar1 (Seq.append (BitVector.ones_vec #1) (Seq.slice sr1 1 n)));
UInt.append_lemma #1 #(n-1) (BitVector.ones_vec #1) (Seq.slice sr1 1 n);
assert (Seq.equal (Seq.slice a1 0 (n-1)) (Seq.slice sar1 1 n));
UInt.slice_left_lemma a1 (n-1);
assert (sar + pow2 n = pow2 (n-1) + (au / 2));
pow2_double_sum (n-1);
assert (sar + pow2 (n-1) = (a + pow2 n) / 2);
pow2_double_mult (n-1);
lemma_div_plus a (pow2 (n-1)) 2;
assert (sar = a / 2)
end
val shift_right_value_aux_3: #n:pos -> a:Int.int_t n -> s:pos{s < n} ->
Lemma (ensures Int.shift_arithmetic_right #n a s = a / pow2 s)
(decreases s)
let rec shift_right_value_aux_3 #n a s =
if s = 1 then
shift_right_value_aux_2 #n a
else
begin
let a1 = Int.to_vec a in
assert (Seq.equal (BitVector.shift_arithmetic_right_vec #n a1 s)
(BitVector.shift_arithmetic_right_vec #n
(BitVector.shift_arithmetic_right_vec #n a1 (s-1)) 1));
assert (Int.shift_arithmetic_right #n a s =
Int.shift_arithmetic_right #n (Int.shift_arithmetic_right #n a (s-1)) 1);
shift_right_value_aux_3 #n a (s-1);
shift_right_value_aux_2 #n (Int.shift_arithmetic_right #n a (s-1));
assert (Int.shift_arithmetic_right #n a s = (a / pow2 (s-1)) / 2);
pow2_double_mult (s-1);
division_multiplication_lemma a (pow2 (s-1)) 2
end
let shift_right_lemma #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> ()
| S8 | S16 | S32 | S64 | S128 ->
if v b = 0 then ()
else if v b >= bits t then
shift_right_value_aux_1 #(bits t) (v a) (v b)
else
shift_right_value_aux_3 #(bits t) (v a) (v b)
[@(strict_on_arguments [0])]
let shift_left #t #l a b =
match t with
| U1 -> UInt8.shift_left a b
| U8 -> UInt8.shift_left a b
| U16 -> UInt16.shift_left a b
| U32 -> UInt32.shift_left a b
| U64 -> UInt64.shift_left a b
| U128 -> UInt128.shift_left a b
| S8 -> Int8.shift_left a b
| S16 -> Int16.shift_left a b
| S32 -> Int32.shift_left a b
| S64 -> Int64.shift_left a b
| S128 -> Int128.shift_left a b
#push-options "--max_fuel 1"
let shift_left_lemma #t #l a b = ()
let rotate_right #t #l a b =
logor (shift_right a b) (shift_left a (sub #U32 (size (bits t)) b))
let rotate_left #t #l a b =
logor (shift_left a b) (shift_right a (sub #U32 (size (bits t)) b))
[@(strict_on_arguments [0])]
let ct_abs #t #l a =
match t with
| S8 -> Int8.ct_abs a
| S16 -> Int16.ct_abs a
| S32 -> Int32.ct_abs a
| S64 -> Int64.ct_abs a
#pop-options
[@(strict_on_arguments [0])]
let eq_mask #t a b =
match t with
| U1 -> lognot (logxor a b)
| U8 -> UInt8.eq_mask a b
| U16 -> UInt16.eq_mask a b
| U32 -> UInt32.eq_mask a b
| U64 -> UInt64.eq_mask a b
| U128 -> UInt128.eq_mask a b
| S8 -> Int.Cast.uint8_to_int8 (UInt8.eq_mask (to_u8 a) (to_u8 b))
| S16 -> Int.Cast.uint16_to_int16 (UInt16.eq_mask (to_u16 a) (to_u16 b))
| S32 -> Int.Cast.uint32_to_int32 (UInt32.eq_mask (to_u32 a) (to_u32 b))
| S64 -> Int.Cast.uint64_to_int64 (UInt64.eq_mask (to_u64 a) (to_u64 b))
val eq_mask_lemma_unsigned: #t:inttype{unsigned t} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
let eq_mask_lemma_unsigned #t a b =
match t with
| U1 ->
assert_norm (
logxor (u1 0) (u1 0) == u1 0 /\ logxor (u1 0) (u1 1) == u1 1 /\
logxor (u1 1) (u1 0) == u1 1 /\ logxor (u1 1) (u1 1) == u1 0 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| U8 | U16 | U32 | U64 | U128 -> ()
#push-options "--z3rlimit 200"
val eq_mask_lemma_signed: #t:inttype{signed t /\ ~(S128? t)} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
let eq_mask_lemma_signed #t a b =
match t with
| S8 ->
begin
assert_norm (pow2 8 = 2 * pow2 7);
if 0 <= v a then modulo_lemma (v a) (pow2 8)
else
begin
modulo_addition_lemma (v a) 1 (pow2 8);
modulo_lemma (v a + pow2 8) (pow2 8)
end
end
| S16 ->
begin
assert_norm (pow2 16 = 2 * pow2 15);
if 0 <= v a then modulo_lemma (v a) (pow2 16)
else
begin
modulo_addition_lemma (v a) 1 (pow2 16);
modulo_lemma (v a + pow2 16) (pow2 16)
end
end
| S32 ->
begin
if 0 <= v a then modulo_lemma (v a) (pow2 32)
else
begin
modulo_addition_lemma (v a) 1 (pow2 32);
modulo_lemma (v a + pow2 32) (pow2 32)
end
end
| S64 ->
begin
if 0 <= v a then modulo_lemma (v a) (pow2 64)
else
begin
modulo_addition_lemma (v a) 1 (pow2 64);
modulo_lemma (v a + pow2 64) (pow2 64)
end
end
#pop-options
let eq_mask_lemma #t a b =
if signed t then eq_mask_lemma_signed a b
else eq_mask_lemma_unsigned a b
let eq_mask_logand_lemma #t a b c =
eq_mask_lemma a b;
logand_zeros c;
logand_ones c;
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v (eq_mask a b)) (v c)
| S8 | S16 | S32 | S64 -> Int.logand_commutative #(bits t) (v (eq_mask a b)) (v c)
[@(strict_on_arguments [0])]
let neq_mask #t a b = lognot (eq_mask #t a b)
let neq_mask_lemma #t a b =
match t with
| U1 -> assert_norm (lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| _ ->
UInt.lognot_lemma_1 #(bits t);
UInt.lognot_self #(bits t) 0
[@(strict_on_arguments [0])]
let gte_mask #t a b =
match t with
| U1 -> logor a (lognot b)
| U8 -> UInt8.gte_mask a b
| U16 -> UInt16.gte_mask a b
| U32 -> UInt32.gte_mask a b
| U64 -> UInt64.gte_mask a b
| U128 -> UInt128.gte_mask a b
let gte_mask_lemma #t a b =
match t with
| U1 ->
begin
assert_norm (
logor (u1 0) (u1 0) == u1 0 /\ logor (u1 1) (u1 1) == u1 1 /\
logor (u1 0) (u1 1) == u1 1 /\ logor (u1 1) (u1 0) == u1 1 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
end
| _ -> ()
let gte_mask_logand_lemma #t a b c =
logand_zeros c;
logand_ones c;
match t with
| U1 ->
assert_norm (
logor (u1 0) (u1 0) == u1 0 /\ logor (u1 1) (u1 1) == u1 1 /\
logor (u1 0) (u1 1) == u1 1 /\ logor (u1 1) (u1 0) == u1 1 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| _ -> UInt.logand_commutative #(bits t) (v (gte_mask a b)) (v c)
let lt_mask #t a b = lognot (gte_mask a b)
let lt_mask_lemma #t a b =
assert_norm (lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1);
UInt.lognot_lemma_1 #(bits t);
UInt.lognot_self #(bits t) 0
let gt_mask #t a b = logand (gte_mask a b) (neq_mask a b)
let gt_mask_lemma #t a b =
logand_zeros (gte_mask a b);
logand_ones (gte_mask a b)
let lte_mask #t a b = logor (lt_mask a b) (eq_mask a b)
let lte_mask_lemma #t a b =
match t with
| U1 ->
assert_norm (
logor (u1 0) (u1 0) == u1 0 /\ logor (u1 1) (u1 1) == u1 1 /\
logor (u1 0) (u1 1) == u1 1 /\ logor (u1 1) (u1 0) == u1 1)
| U8 | U16 | U32 | U64 | U128 ->
if v a > v b then
UInt.logor_lemma_1 #(bits t) (v (lt_mask a b))
else if v a = v b then
UInt.logor_lemma_2 #(bits t) (v (lt_mask a b))
else
UInt.logor_lemma_1 #(bits t) (v (lt_mask a b))
#push-options "--max_fuel 1"
val mod_mask_value: #t:inttype -> #l:secrecy_level -> m:shiftval t{pow2 (uint_v m) <= maxint t} ->
Lemma (v (mod_mask #t #l m) == pow2 (v m) - 1)
let mod_mask_value #t #l m =
shift_left_lemma (mk_int #t #l 1) m;
pow2_double_mult (bits t - 1);
pow2_lt_compat (bits t) (v m);
small_modulo_lemma_1 (pow2 (v m)) (pow2 (bits t));
small_modulo_lemma_1 (pow2 (v m) - 1) (pow2 (bits t))
let mod_mask_lemma #t #l a m =
mod_mask_value #t #l m;
if unsigned t || 0 <= v a then
if v m = 0 then
UInt.logand_lemma_1 #(bits t) (v a)
else
UInt.logand_mask #(bits t) (v a) (v m)
else
begin
let a1 = v a in
let a2 = v a + pow2 (bits t) in
pow2_plus (bits t - v m) (v m);
pow2_le_compat (bits t - 1) (v m);
lemma_mod_plus a1 (pow2 (bits t - v m)) (pow2 (v m));
if v m = 0 then
UInt.logand_lemma_1 #(bits t) a2
else
UInt.logand_mask #(bits t) a2 (v m)
end
#pop-options
#push-options "--max_fuel 0 --max_ifuel 0 --z3rlimit 1000"
(**
Conditionally subtracts 2^(bits t') from a in constant-time,
so that the result fits in t'; i.e.
b = if a >= 2^(bits t' - 1) then a - 2^(bits t') else a
*)
inline_for_extraction
val conditional_subtract:
#t:inttype{signed t}
-> #l:secrecy_level
-> t':inttype{signed t' /\ bits t' < bits t}
-> a:int_t t l{0 <= v a /\ v a <= pow2 (bits t') - 1}
-> b:int_t t l{v b = v a @%. t'}
let conditional_subtract #t #l t' a =
assert_norm (pow2 7 = 128);
assert_norm (pow2 15 = 32768);
let pow2_bits = shift_left #t #l (mk_int 1) (size (bits t')) in
shift_left_lemma #t #l (mk_int 1) (size (bits t'));
let pow2_bits_minus_one = shift_left #t #l (mk_int 1) (size (bits t' - 1)) in
shift_left_lemma #t #l (mk_int 1) (size (bits t' - 1));
// assert (v pow2_bits == pow2 (bits t'));
// assert (v pow2_bits_minus_one == pow2 (bits t' - 1));
let a2 = a `sub` pow2_bits_minus_one in
let mask = shift_right a2 (size (bits t - 1)) in
shift_right_lemma a2 (size (bits t - 1));
// assert (if v a2 < 0 then v mask = -1 else v mask = 0);
let a3 = a `sub` pow2_bits in
logand_lemma mask pow2_bits;
a3 `add` (mask `logand` pow2_bits)
let cast_mod #t #l t' l' a =
assert_norm (pow2 7 = 128);
assert_norm (pow2 15 = 32768);
if bits t' >= bits t then
cast t' l' a
else
begin
let m = size (bits t') in
mod_mask_lemma a m;
let b = conditional_subtract t' (a `logand` mod_mask m) in
cast t' l' b
end
#pop-options
[@(strict_on_arguments [0])]
let div #t x y =
match t with
| U1 -> UInt8.div x y
| U8 -> UInt8.div x y
| U16 -> UInt16.div x y
| U32 -> UInt32.div x y
| U64 -> UInt64.div x y
| S8 -> Int.pow2_values 8; Int8.div x y
| S16 -> Int.pow2_values 16; Int16.div x y
| S32 -> Int.pow2_values 32; Int32.div x y
| S64 -> Int.pow2_values 64; Int64.div x y
let div_lemma #t a b =
match t with
| U1 | U8 | U16 | U32 | U64 -> ()
| S8 -> Int.pow2_values 8
| S16 -> Int.pow2_values 16
| S32 -> Int.pow2_values 32
| S64 -> Int.pow2_values 64
let mod #t x y =
match t with
| U1 -> UInt8.rem x y
| U8 -> UInt8.rem x y
| U16 -> UInt16.rem x y
| U32 -> UInt32.rem x y
| U64 -> UInt64.rem x y
| S8 -> Int.pow2_values 8; Int8.rem x y
| S16 -> Int.pow2_values 16; Int16.rem x y
| S32 -> Int.pow2_values 32; Int32.rem x y
| S64 -> Int.pow2_values 64; Int64.rem x y
let mod_lemma #t a b =
match t with
| U1 | U8 | U16 | U32 | U64 -> ()
| S8 -> Int.pow2_values 8
| S16 -> Int.pow2_values 16
| S32 -> Int.pow2_values 32
| S64 -> Int.pow2_values 64
let eq #t x y =
x = y
let eq_lemma #t x y = ()
let ne #t x y = not (eq x y)
let ne_lemma #t x y = ()
let lt #t x y =
match t with
| U1 -> UInt8.lt x y
| U8 -> UInt8.lt x y
| U16 -> UInt16.lt x y
| U32 -> UInt32.lt x y
| U64 -> UInt64.lt x y
| U128 -> UInt128.lt x y
| S8 -> Int8.lt x y
| S16 -> Int16.lt x y
| S32 -> Int32.lt x y
| S64 -> Int64.lt x y
| S128 -> Int128.lt x y
let lt_lemma #t x y = ()
let lte #t x y =
match t with
| U1 -> UInt8.lte x y
| U8 -> UInt8.lte x y
| U16 -> UInt16.lte x y
| U32 -> UInt32.lte x y
| U64 -> UInt64.lte x y
| U128 -> UInt128.lte x y
| S8 -> Int8.lte x y
| S16 -> Int16.lte x y
| S32 -> Int32.lte x y
| S64 -> Int64.lte x y
| S128 -> Int128.lte x y
let lte_lemma #t x y = () | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val gt: #t:inttype -> int_t t PUB -> int_t t PUB -> bool | [] | Lib.IntTypes.gt | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | x: Lib.IntTypes.int_t t Lib.IntTypes.PUB -> y: Lib.IntTypes.int_t t Lib.IntTypes.PUB -> Prims.bool | {
"end_col": 25,
"end_line": 1120,
"start_col": 2,
"start_line": 1109
} |
Prims.Tot | val lognot: #t:inttype -> #l:secrecy_level -> int_t t l -> int_t t l | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let lognot #t #l a =
match t with
| U1 -> UInt8.rem (UInt8.lognot a) 2uy
| U8 -> UInt8.lognot a
| U16 -> UInt16.lognot a
| U32 -> UInt32.lognot a
| U64 -> UInt64.lognot a
| U128 -> UInt128.lognot a
| S8 -> Int8.lognot a
| S16 -> Int16.lognot a
| S32 -> Int32.lognot a
| S64 -> Int64.lognot a
| S128 -> Int128.lognot a | val lognot: #t:inttype -> #l:secrecy_level -> int_t t l -> int_t t l
let lognot #t #l a = | false | null | false | match t with
| U1 -> UInt8.rem (UInt8.lognot a) 2uy
| U8 -> UInt8.lognot a
| U16 -> UInt16.lognot a
| U32 -> UInt32.lognot a
| U64 -> UInt64.lognot a
| U128 -> UInt128.lognot a
| S8 -> Int8.lognot a
| S16 -> Int16.lognot a
| S32 -> Int32.lognot a
| S64 -> Int64.lognot a
| S128 -> Int128.lognot a | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"FStar.UInt8.rem",
"FStar.UInt8.lognot",
"FStar.UInt8.__uint_to_t",
"FStar.UInt16.lognot",
"FStar.UInt32.lognot",
"FStar.UInt64.lognot",
"FStar.UInt128.lognot",
"FStar.Int8.lognot",
"FStar.Int16.lognot",
"FStar.Int32.lognot",
"FStar.Int64.lognot",
"FStar.Int128.lognot"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a)
let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t))
let logor_lemma #t #l a b =
logor_zeros #t #l b;
logor_ones #t #l b;
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (v b)) (Int.logor #(bits t) (v b) (v a))
let logor_spec #t #l a b =
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1);
assert_norm (0 `logor_v #U1` 0 == 0 /\ 0 `logor_v #U1` 1 == 1);
assert_norm (1 `logor_v #U1` 0 == 1 /\ 1 `logor_v #U1` 1 == 1)
| _ -> ()
[@(strict_on_arguments [0])] | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val lognot: #t:inttype -> #l:secrecy_level -> int_t t l -> int_t t l | [] | Lib.IntTypes.lognot | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t l -> Lib.IntTypes.int_t t l | {
"end_col": 27,
"end_line": 651,
"start_col": 2,
"start_line": 640
} |
Prims.Tot | val incr: #t:inttype -> #l:secrecy_level
-> a:int_t t l{v a < maxint t}
-> int_t t l | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1) | val incr: #t:inttype -> #l:secrecy_level
-> a:int_t t l{v a < maxint t}
-> int_t t l
let incr #t #l a = | false | null | false | match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"Prims.b2t",
"Prims.op_LessThan",
"Lib.IntTypes.v",
"Lib.IntTypes.maxint",
"FStar.UInt8.add",
"FStar.UInt8.__uint_to_t",
"FStar.UInt16.add",
"FStar.UInt16.__uint_to_t",
"FStar.UInt32.add",
"FStar.UInt32.__uint_to_t",
"FStar.UInt64.add",
"FStar.UInt64.__uint_to_t",
"FStar.UInt128.add",
"FStar.UInt128.uint_to_t",
"FStar.Int8.add",
"FStar.Int8.__int_to_t",
"FStar.Int16.add",
"FStar.Int16.__int_to_t",
"FStar.Int32.add",
"FStar.Int32.__int_to_t",
"FStar.Int64.add",
"FStar.Int64.__int_to_t",
"FStar.Int128.add",
"FStar.Int128.int_to_t"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])] | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val incr: #t:inttype -> #l:secrecy_level
-> a:int_t t l{v a < maxint t}
-> int_t t l | [] | Lib.IntTypes.incr | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t l {Lib.IntTypes.v a < Lib.IntTypes.maxint t} -> Lib.IntTypes.int_t t l | {
"end_col": 44,
"end_line": 326,
"start_col": 2,
"start_line": 315
} |
Prims.Tot | val gte: #t:inttype -> int_t t PUB -> int_t t PUB -> bool | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let gte #t x y =
match t with
| U1 -> UInt8.gte x y
| U8 -> UInt8.gte x y
| U16 -> UInt16.gte x y
| U32 -> UInt32.gte x y
| U64 -> UInt64.gte x y
| U128 -> UInt128.gte x y
| S8 -> Int8.gte x y
| S16 -> Int16.gte x y
| S32 -> Int32.gte x y
| S64 -> Int64.gte x y
| S128 -> Int128.gte x y | val gte: #t:inttype -> int_t t PUB -> int_t t PUB -> bool
let gte #t x y = | false | null | false | match t with
| U1 -> UInt8.gte x y
| U8 -> UInt8.gte x y
| U16 -> UInt16.gte x y
| U32 -> UInt32.gte x y
| U64 -> UInt64.gte x y
| U128 -> UInt128.gte x y
| S8 -> Int8.gte x y
| S16 -> Int16.gte x y
| S32 -> Int32.gte x y
| S64 -> Int64.gte x y
| S128 -> Int128.gte x y | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.int_t",
"Lib.IntTypes.PUB",
"FStar.UInt8.gte",
"FStar.UInt16.gte",
"FStar.UInt32.gte",
"FStar.UInt64.gte",
"FStar.UInt128.gte",
"FStar.Int8.gte",
"FStar.Int16.gte",
"FStar.Int32.gte",
"FStar.Int64.gte",
"FStar.Int128.gte",
"Prims.bool"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a)
let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t))
let logor_lemma #t #l a b =
logor_zeros #t #l b;
logor_ones #t #l b;
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (v b)) (Int.logor #(bits t) (v b) (v a))
let logor_spec #t #l a b =
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1);
assert_norm (0 `logor_v #U1` 0 == 0 /\ 0 `logor_v #U1` 1 == 1);
assert_norm (1 `logor_v #U1` 0 == 1 /\ 1 `logor_v #U1` 1 == 1)
| _ -> ()
[@(strict_on_arguments [0])]
let lognot #t #l a =
match t with
| U1 -> UInt8.rem (UInt8.lognot a) 2uy
| U8 -> UInt8.lognot a
| U16 -> UInt16.lognot a
| U32 -> UInt32.lognot a
| U64 -> UInt64.lognot a
| U128 -> UInt128.lognot a
| S8 -> Int8.lognot a
| S16 -> Int16.lognot a
| S32 -> Int32.lognot a
| S64 -> Int64.lognot a
| S128 -> Int128.lognot a
let lognot_lemma #t #l a =
match t with
|U1 -> assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0)
| U8 | U16 | U32 | U64 | U128 ->
FStar.UInt.lognot_lemma_1 #(bits t);
UInt.nth_lemma (FStar.UInt.lognot #(bits t) (UInt.ones (bits t))) (UInt.zero (bits t))
| S8 | S16 | S32 | S64 | S128 ->
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.zero (bits t))) (Int.ones (bits t));
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.ones (bits t))) (Int.zero (bits t))
let lognot_spec #t #l a =
match t with
| U1 ->
assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0);
assert_norm(lognot_v #U1 0 == 1 /\ lognot_v #U1 1 == 0)
| _ -> ()
[@(strict_on_arguments [0])]
let shift_right #t #l a b =
match t with
| U1 -> UInt8.shift_right a b
| U8 -> UInt8.shift_right a b
| U16 -> UInt16.shift_right a b
| U32 -> UInt32.shift_right a b
| U64 -> UInt64.shift_right a b
| U128 -> UInt128.shift_right a b
| S8 -> Int8.shift_arithmetic_right a b
| S16 -> Int16.shift_arithmetic_right a b
| S32 -> Int32.shift_arithmetic_right a b
| S64 -> Int64.shift_arithmetic_right a b
| S128 -> Int128.shift_arithmetic_right a b
val shift_right_value_aux_1: #n:pos{1 < n} -> a:Int.int_t n -> s:nat{n <= s} ->
Lemma (Int.shift_arithmetic_right #n a s = a / pow2 s)
let shift_right_value_aux_1 #n a s =
pow2_le_compat s n;
if a >= 0 then Int.sign_bit_positive a else Int.sign_bit_negative a
#push-options "--z3rlimit 200"
val shift_right_value_aux_2: #n:pos{1 < n} -> a:Int.int_t n ->
Lemma (Int.shift_arithmetic_right #n a 1 = a / 2)
let shift_right_value_aux_2 #n a =
if a >= 0 then
begin
Int.sign_bit_positive a;
UInt.shift_right_value_aux_3 #n a 1
end
else
begin
Int.sign_bit_negative a;
let a1 = Int.to_vec a in
let au = Int.to_uint a in
let sar = Int.shift_arithmetic_right #n a 1 in
let sar1 = Int.to_vec sar in
let sr = UInt.shift_right #n au 1 in
let sr1 = UInt.to_vec sr in
assert (Seq.equal (Seq.slice sar1 1 n) (Seq.slice sr1 1 n));
assert (Seq.equal sar1 (Seq.append (BitVector.ones_vec #1) (Seq.slice sr1 1 n)));
UInt.append_lemma #1 #(n-1) (BitVector.ones_vec #1) (Seq.slice sr1 1 n);
assert (Seq.equal (Seq.slice a1 0 (n-1)) (Seq.slice sar1 1 n));
UInt.slice_left_lemma a1 (n-1);
assert (sar + pow2 n = pow2 (n-1) + (au / 2));
pow2_double_sum (n-1);
assert (sar + pow2 (n-1) = (a + pow2 n) / 2);
pow2_double_mult (n-1);
lemma_div_plus a (pow2 (n-1)) 2;
assert (sar = a / 2)
end
val shift_right_value_aux_3: #n:pos -> a:Int.int_t n -> s:pos{s < n} ->
Lemma (ensures Int.shift_arithmetic_right #n a s = a / pow2 s)
(decreases s)
let rec shift_right_value_aux_3 #n a s =
if s = 1 then
shift_right_value_aux_2 #n a
else
begin
let a1 = Int.to_vec a in
assert (Seq.equal (BitVector.shift_arithmetic_right_vec #n a1 s)
(BitVector.shift_arithmetic_right_vec #n
(BitVector.shift_arithmetic_right_vec #n a1 (s-1)) 1));
assert (Int.shift_arithmetic_right #n a s =
Int.shift_arithmetic_right #n (Int.shift_arithmetic_right #n a (s-1)) 1);
shift_right_value_aux_3 #n a (s-1);
shift_right_value_aux_2 #n (Int.shift_arithmetic_right #n a (s-1));
assert (Int.shift_arithmetic_right #n a s = (a / pow2 (s-1)) / 2);
pow2_double_mult (s-1);
division_multiplication_lemma a (pow2 (s-1)) 2
end
let shift_right_lemma #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> ()
| S8 | S16 | S32 | S64 | S128 ->
if v b = 0 then ()
else if v b >= bits t then
shift_right_value_aux_1 #(bits t) (v a) (v b)
else
shift_right_value_aux_3 #(bits t) (v a) (v b)
[@(strict_on_arguments [0])]
let shift_left #t #l a b =
match t with
| U1 -> UInt8.shift_left a b
| U8 -> UInt8.shift_left a b
| U16 -> UInt16.shift_left a b
| U32 -> UInt32.shift_left a b
| U64 -> UInt64.shift_left a b
| U128 -> UInt128.shift_left a b
| S8 -> Int8.shift_left a b
| S16 -> Int16.shift_left a b
| S32 -> Int32.shift_left a b
| S64 -> Int64.shift_left a b
| S128 -> Int128.shift_left a b
#push-options "--max_fuel 1"
let shift_left_lemma #t #l a b = ()
let rotate_right #t #l a b =
logor (shift_right a b) (shift_left a (sub #U32 (size (bits t)) b))
let rotate_left #t #l a b =
logor (shift_left a b) (shift_right a (sub #U32 (size (bits t)) b))
[@(strict_on_arguments [0])]
let ct_abs #t #l a =
match t with
| S8 -> Int8.ct_abs a
| S16 -> Int16.ct_abs a
| S32 -> Int32.ct_abs a
| S64 -> Int64.ct_abs a
#pop-options
[@(strict_on_arguments [0])]
let eq_mask #t a b =
match t with
| U1 -> lognot (logxor a b)
| U8 -> UInt8.eq_mask a b
| U16 -> UInt16.eq_mask a b
| U32 -> UInt32.eq_mask a b
| U64 -> UInt64.eq_mask a b
| U128 -> UInt128.eq_mask a b
| S8 -> Int.Cast.uint8_to_int8 (UInt8.eq_mask (to_u8 a) (to_u8 b))
| S16 -> Int.Cast.uint16_to_int16 (UInt16.eq_mask (to_u16 a) (to_u16 b))
| S32 -> Int.Cast.uint32_to_int32 (UInt32.eq_mask (to_u32 a) (to_u32 b))
| S64 -> Int.Cast.uint64_to_int64 (UInt64.eq_mask (to_u64 a) (to_u64 b))
val eq_mask_lemma_unsigned: #t:inttype{unsigned t} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
let eq_mask_lemma_unsigned #t a b =
match t with
| U1 ->
assert_norm (
logxor (u1 0) (u1 0) == u1 0 /\ logxor (u1 0) (u1 1) == u1 1 /\
logxor (u1 1) (u1 0) == u1 1 /\ logxor (u1 1) (u1 1) == u1 0 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| U8 | U16 | U32 | U64 | U128 -> ()
#push-options "--z3rlimit 200"
val eq_mask_lemma_signed: #t:inttype{signed t /\ ~(S128? t)} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
let eq_mask_lemma_signed #t a b =
match t with
| S8 ->
begin
assert_norm (pow2 8 = 2 * pow2 7);
if 0 <= v a then modulo_lemma (v a) (pow2 8)
else
begin
modulo_addition_lemma (v a) 1 (pow2 8);
modulo_lemma (v a + pow2 8) (pow2 8)
end
end
| S16 ->
begin
assert_norm (pow2 16 = 2 * pow2 15);
if 0 <= v a then modulo_lemma (v a) (pow2 16)
else
begin
modulo_addition_lemma (v a) 1 (pow2 16);
modulo_lemma (v a + pow2 16) (pow2 16)
end
end
| S32 ->
begin
if 0 <= v a then modulo_lemma (v a) (pow2 32)
else
begin
modulo_addition_lemma (v a) 1 (pow2 32);
modulo_lemma (v a + pow2 32) (pow2 32)
end
end
| S64 ->
begin
if 0 <= v a then modulo_lemma (v a) (pow2 64)
else
begin
modulo_addition_lemma (v a) 1 (pow2 64);
modulo_lemma (v a + pow2 64) (pow2 64)
end
end
#pop-options
let eq_mask_lemma #t a b =
if signed t then eq_mask_lemma_signed a b
else eq_mask_lemma_unsigned a b
let eq_mask_logand_lemma #t a b c =
eq_mask_lemma a b;
logand_zeros c;
logand_ones c;
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v (eq_mask a b)) (v c)
| S8 | S16 | S32 | S64 -> Int.logand_commutative #(bits t) (v (eq_mask a b)) (v c)
[@(strict_on_arguments [0])]
let neq_mask #t a b = lognot (eq_mask #t a b)
let neq_mask_lemma #t a b =
match t with
| U1 -> assert_norm (lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| _ ->
UInt.lognot_lemma_1 #(bits t);
UInt.lognot_self #(bits t) 0
[@(strict_on_arguments [0])]
let gte_mask #t a b =
match t with
| U1 -> logor a (lognot b)
| U8 -> UInt8.gte_mask a b
| U16 -> UInt16.gte_mask a b
| U32 -> UInt32.gte_mask a b
| U64 -> UInt64.gte_mask a b
| U128 -> UInt128.gte_mask a b
let gte_mask_lemma #t a b =
match t with
| U1 ->
begin
assert_norm (
logor (u1 0) (u1 0) == u1 0 /\ logor (u1 1) (u1 1) == u1 1 /\
logor (u1 0) (u1 1) == u1 1 /\ logor (u1 1) (u1 0) == u1 1 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
end
| _ -> ()
let gte_mask_logand_lemma #t a b c =
logand_zeros c;
logand_ones c;
match t with
| U1 ->
assert_norm (
logor (u1 0) (u1 0) == u1 0 /\ logor (u1 1) (u1 1) == u1 1 /\
logor (u1 0) (u1 1) == u1 1 /\ logor (u1 1) (u1 0) == u1 1 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| _ -> UInt.logand_commutative #(bits t) (v (gte_mask a b)) (v c)
let lt_mask #t a b = lognot (gte_mask a b)
let lt_mask_lemma #t a b =
assert_norm (lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1);
UInt.lognot_lemma_1 #(bits t);
UInt.lognot_self #(bits t) 0
let gt_mask #t a b = logand (gte_mask a b) (neq_mask a b)
let gt_mask_lemma #t a b =
logand_zeros (gte_mask a b);
logand_ones (gte_mask a b)
let lte_mask #t a b = logor (lt_mask a b) (eq_mask a b)
let lte_mask_lemma #t a b =
match t with
| U1 ->
assert_norm (
logor (u1 0) (u1 0) == u1 0 /\ logor (u1 1) (u1 1) == u1 1 /\
logor (u1 0) (u1 1) == u1 1 /\ logor (u1 1) (u1 0) == u1 1)
| U8 | U16 | U32 | U64 | U128 ->
if v a > v b then
UInt.logor_lemma_1 #(bits t) (v (lt_mask a b))
else if v a = v b then
UInt.logor_lemma_2 #(bits t) (v (lt_mask a b))
else
UInt.logor_lemma_1 #(bits t) (v (lt_mask a b))
#push-options "--max_fuel 1"
val mod_mask_value: #t:inttype -> #l:secrecy_level -> m:shiftval t{pow2 (uint_v m) <= maxint t} ->
Lemma (v (mod_mask #t #l m) == pow2 (v m) - 1)
let mod_mask_value #t #l m =
shift_left_lemma (mk_int #t #l 1) m;
pow2_double_mult (bits t - 1);
pow2_lt_compat (bits t) (v m);
small_modulo_lemma_1 (pow2 (v m)) (pow2 (bits t));
small_modulo_lemma_1 (pow2 (v m) - 1) (pow2 (bits t))
let mod_mask_lemma #t #l a m =
mod_mask_value #t #l m;
if unsigned t || 0 <= v a then
if v m = 0 then
UInt.logand_lemma_1 #(bits t) (v a)
else
UInt.logand_mask #(bits t) (v a) (v m)
else
begin
let a1 = v a in
let a2 = v a + pow2 (bits t) in
pow2_plus (bits t - v m) (v m);
pow2_le_compat (bits t - 1) (v m);
lemma_mod_plus a1 (pow2 (bits t - v m)) (pow2 (v m));
if v m = 0 then
UInt.logand_lemma_1 #(bits t) a2
else
UInt.logand_mask #(bits t) a2 (v m)
end
#pop-options
#push-options "--max_fuel 0 --max_ifuel 0 --z3rlimit 1000"
(**
Conditionally subtracts 2^(bits t') from a in constant-time,
so that the result fits in t'; i.e.
b = if a >= 2^(bits t' - 1) then a - 2^(bits t') else a
*)
inline_for_extraction
val conditional_subtract:
#t:inttype{signed t}
-> #l:secrecy_level
-> t':inttype{signed t' /\ bits t' < bits t}
-> a:int_t t l{0 <= v a /\ v a <= pow2 (bits t') - 1}
-> b:int_t t l{v b = v a @%. t'}
let conditional_subtract #t #l t' a =
assert_norm (pow2 7 = 128);
assert_norm (pow2 15 = 32768);
let pow2_bits = shift_left #t #l (mk_int 1) (size (bits t')) in
shift_left_lemma #t #l (mk_int 1) (size (bits t'));
let pow2_bits_minus_one = shift_left #t #l (mk_int 1) (size (bits t' - 1)) in
shift_left_lemma #t #l (mk_int 1) (size (bits t' - 1));
// assert (v pow2_bits == pow2 (bits t'));
// assert (v pow2_bits_minus_one == pow2 (bits t' - 1));
let a2 = a `sub` pow2_bits_minus_one in
let mask = shift_right a2 (size (bits t - 1)) in
shift_right_lemma a2 (size (bits t - 1));
// assert (if v a2 < 0 then v mask = -1 else v mask = 0);
let a3 = a `sub` pow2_bits in
logand_lemma mask pow2_bits;
a3 `add` (mask `logand` pow2_bits)
let cast_mod #t #l t' l' a =
assert_norm (pow2 7 = 128);
assert_norm (pow2 15 = 32768);
if bits t' >= bits t then
cast t' l' a
else
begin
let m = size (bits t') in
mod_mask_lemma a m;
let b = conditional_subtract t' (a `logand` mod_mask m) in
cast t' l' b
end
#pop-options
[@(strict_on_arguments [0])]
let div #t x y =
match t with
| U1 -> UInt8.div x y
| U8 -> UInt8.div x y
| U16 -> UInt16.div x y
| U32 -> UInt32.div x y
| U64 -> UInt64.div x y
| S8 -> Int.pow2_values 8; Int8.div x y
| S16 -> Int.pow2_values 16; Int16.div x y
| S32 -> Int.pow2_values 32; Int32.div x y
| S64 -> Int.pow2_values 64; Int64.div x y
let div_lemma #t a b =
match t with
| U1 | U8 | U16 | U32 | U64 -> ()
| S8 -> Int.pow2_values 8
| S16 -> Int.pow2_values 16
| S32 -> Int.pow2_values 32
| S64 -> Int.pow2_values 64
let mod #t x y =
match t with
| U1 -> UInt8.rem x y
| U8 -> UInt8.rem x y
| U16 -> UInt16.rem x y
| U32 -> UInt32.rem x y
| U64 -> UInt64.rem x y
| S8 -> Int.pow2_values 8; Int8.rem x y
| S16 -> Int.pow2_values 16; Int16.rem x y
| S32 -> Int.pow2_values 32; Int32.rem x y
| S64 -> Int.pow2_values 64; Int64.rem x y
let mod_lemma #t a b =
match t with
| U1 | U8 | U16 | U32 | U64 -> ()
| S8 -> Int.pow2_values 8
| S16 -> Int.pow2_values 16
| S32 -> Int.pow2_values 32
| S64 -> Int.pow2_values 64
let eq #t x y =
x = y
let eq_lemma #t x y = ()
let ne #t x y = not (eq x y)
let ne_lemma #t x y = ()
let lt #t x y =
match t with
| U1 -> UInt8.lt x y
| U8 -> UInt8.lt x y
| U16 -> UInt16.lt x y
| U32 -> UInt32.lt x y
| U64 -> UInt64.lt x y
| U128 -> UInt128.lt x y
| S8 -> Int8.lt x y
| S16 -> Int16.lt x y
| S32 -> Int32.lt x y
| S64 -> Int64.lt x y
| S128 -> Int128.lt x y
let lt_lemma #t x y = ()
let lte #t x y =
match t with
| U1 -> UInt8.lte x y
| U8 -> UInt8.lte x y
| U16 -> UInt16.lte x y
| U32 -> UInt32.lte x y
| U64 -> UInt64.lte x y
| U128 -> UInt128.lte x y
| S8 -> Int8.lte x y
| S16 -> Int16.lte x y
| S32 -> Int32.lte x y
| S64 -> Int64.lte x y
| S128 -> Int128.lte x y
let lte_lemma #t x y = ()
let gt #t x y =
match t with
| U1 -> UInt8.gt x y
| U8 -> UInt8.gt x y
| U16 -> UInt16.gt x y
| U32 -> UInt32.gt x y
| U64 -> UInt64.gt x y
| U128 -> UInt128.gt x y
| S8 -> Int8.gt x y
| S16 -> Int16.gt x y
| S32 -> Int32.gt x y
| S64 -> Int64.gt x y
| S128 -> Int128.gt x y
let gt_lemma #t x y = () | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val gte: #t:inttype -> int_t t PUB -> int_t t PUB -> bool | [] | Lib.IntTypes.gte | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | x: Lib.IntTypes.int_t t Lib.IntTypes.PUB -> y: Lib.IntTypes.int_t t Lib.IntTypes.PUB -> Prims.bool | {
"end_col": 26,
"end_line": 1136,
"start_col": 2,
"start_line": 1125
} |
Prims.Tot | val logand: #t:inttype -> #l:secrecy_level
-> int_t t l
-> int_t t l
-> int_t t l | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b | val logand: #t:inttype -> #l:secrecy_level
-> int_t t l
-> int_t t l
-> int_t t l
let logand #t #l a b = | false | null | false | match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"FStar.UInt8.logand",
"Prims.unit",
"FStar.Pervasives.assert_norm",
"Prims.eq2",
"FStar.UInt8.t",
"FStar.UInt8.__uint_to_t",
"FStar.UInt16.logand",
"FStar.UInt32.logand",
"FStar.UInt64.logand",
"FStar.UInt128.logand",
"FStar.Int8.logand",
"FStar.Int16.logand",
"FStar.Int32.logand",
"FStar.Int64.logand",
"FStar.Int128.logand"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])] | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val logand: #t:inttype -> #l:secrecy_level
-> int_t t l
-> int_t t l
-> int_t t l | [] | Lib.IntTypes.logand | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t l -> b: Lib.IntTypes.int_t t l -> Lib.IntTypes.int_t t l | {
"end_col": 29,
"end_line": 509,
"start_col": 2,
"start_line": 493
} |
FStar.Pervasives.Lemma | val lognot_spec: #t:inttype -> #l:secrecy_level
-> a:int_t t l
-> Lemma (v (lognot a) == lognot_v (v a)) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let lognot_spec #t #l a =
match t with
| U1 ->
assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0);
assert_norm(lognot_v #U1 0 == 1 /\ lognot_v #U1 1 == 0)
| _ -> () | val lognot_spec: #t:inttype -> #l:secrecy_level
-> a:int_t t l
-> Lemma (v (lognot a) == lognot_v (v a))
let lognot_spec #t #l a = | false | null | true | match t with
| U1 ->
assert_norm (lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0);
assert_norm (lognot_v #U1 0 == 1 /\ lognot_v #U1 1 == 0)
| _ -> () | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"FStar.Pervasives.assert_norm",
"Prims.l_and",
"Prims.eq2",
"Prims.int",
"Lib.IntTypes.lognot_v",
"Lib.IntTypes.U1",
"Prims.unit",
"Lib.IntTypes.SEC",
"Lib.IntTypes.lognot",
"Lib.IntTypes.u1"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a)
let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t))
let logor_lemma #t #l a b =
logor_zeros #t #l b;
logor_ones #t #l b;
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (v b)) (Int.logor #(bits t) (v b) (v a))
let logor_spec #t #l a b =
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1);
assert_norm (0 `logor_v #U1` 0 == 0 /\ 0 `logor_v #U1` 1 == 1);
assert_norm (1 `logor_v #U1` 0 == 1 /\ 1 `logor_v #U1` 1 == 1)
| _ -> ()
[@(strict_on_arguments [0])]
let lognot #t #l a =
match t with
| U1 -> UInt8.rem (UInt8.lognot a) 2uy
| U8 -> UInt8.lognot a
| U16 -> UInt16.lognot a
| U32 -> UInt32.lognot a
| U64 -> UInt64.lognot a
| U128 -> UInt128.lognot a
| S8 -> Int8.lognot a
| S16 -> Int16.lognot a
| S32 -> Int32.lognot a
| S64 -> Int64.lognot a
| S128 -> Int128.lognot a
let lognot_lemma #t #l a =
match t with
|U1 -> assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0)
| U8 | U16 | U32 | U64 | U128 ->
FStar.UInt.lognot_lemma_1 #(bits t);
UInt.nth_lemma (FStar.UInt.lognot #(bits t) (UInt.ones (bits t))) (UInt.zero (bits t))
| S8 | S16 | S32 | S64 | S128 ->
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.zero (bits t))) (Int.ones (bits t));
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.ones (bits t))) (Int.zero (bits t)) | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val lognot_spec: #t:inttype -> #l:secrecy_level
-> a:int_t t l
-> Lemma (v (lognot a) == lognot_v (v a)) | [] | Lib.IntTypes.lognot_spec | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t l
-> FStar.Pervasives.Lemma
(ensures Lib.IntTypes.v (Lib.IntTypes.lognot a) == Lib.IntTypes.lognot_v (Lib.IntTypes.v a)) | {
"end_col": 11,
"end_line": 668,
"start_col": 2,
"start_line": 664
} |
Prims.Tot | val mul: #t:inttype{~(U128? t) /\ ~(S128? t)} -> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l{range (v a * v b) t}
-> int_t t l | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b | val mul: #t:inttype{~(U128? t) /\ ~(S128? t)} -> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l{range (v a * v b) t}
-> int_t t l
let mul #t #l a b = | false | null | false | match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Prims.l_and",
"Prims.l_not",
"Prims.b2t",
"Lib.IntTypes.uu___is_U128",
"Lib.IntTypes.uu___is_S128",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"Lib.IntTypes.range",
"FStar.Mul.op_Star",
"Lib.IntTypes.v",
"FStar.UInt8.mul",
"FStar.UInt16.mul",
"FStar.UInt32.mul",
"FStar.UInt64.mul",
"FStar.Int8.mul",
"FStar.Int16.mul",
"FStar.Int32.mul",
"FStar.Int64.mul"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])] | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mul: #t:inttype{~(U128? t) /\ ~(S128? t)} -> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l{range (v a * v b) t}
-> int_t t l | [] | Lib.IntTypes.mul | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
a: Lib.IntTypes.int_t t l ->
b: Lib.IntTypes.int_t t l {Lib.IntTypes.range (Lib.IntTypes.v a * Lib.IntTypes.v b) t}
-> Lib.IntTypes.int_t t l | {
"end_col": 24,
"end_line": 354,
"start_col": 2,
"start_line": 345
} |
FStar.Pervasives.Lemma | val logand_spec: #t:inttype -> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma (v (a `logand` b) == v a `logand_v` v b) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> () | val logand_spec: #t:inttype -> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma (v (a `logand` b) == v a `logand_v` v b)
let logand_spec #t #l a b = | false | null | true | match t with
| U1 ->
assert_norm ((u1 0) `logand` (u1 0) == u1 0 /\ (u1 0) `logand` (u1 1) == u1 0);
assert_norm ((u1 1) `logand` (u1 0) == u1 0 /\ (u1 1) `logand` (u1 1) == u1 1);
assert_norm (logand_v #U1 0 0 == 0 /\ logand_v #U1 0 1 == 0);
assert_norm (logand_v #U1 1 0 == 0 /\ logand_v #U1 1 1 == 1)
| _ -> () | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"FStar.Pervasives.assert_norm",
"Prims.l_and",
"Prims.eq2",
"Prims.int",
"Lib.IntTypes.logand_v",
"Lib.IntTypes.U1",
"Prims.unit",
"Lib.IntTypes.SEC",
"Lib.IntTypes.logand",
"Lib.IntTypes.u1"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b) | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val logand_spec: #t:inttype -> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma (v (a `logand` b) == v a `logand_v` v b) | [] | Lib.IntTypes.logand_spec | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t l -> b: Lib.IntTypes.int_t t l
-> FStar.Pervasives.Lemma
(ensures
Lib.IntTypes.v (Lib.IntTypes.logand a b) ==
Lib.IntTypes.logand_v (Lib.IntTypes.v a) (Lib.IntTypes.v b)) | {
"end_col": 11,
"end_line": 540,
"start_col": 2,
"start_line": 534
} |
FStar.Pervasives.Lemma | val logor_spec: #t:inttype -> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma (v (a `logor` b) == v a `logor_v` v b) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let logor_spec #t #l a b =
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1);
assert_norm (0 `logor_v #U1` 0 == 0 /\ 0 `logor_v #U1` 1 == 1);
assert_norm (1 `logor_v #U1` 0 == 1 /\ 1 `logor_v #U1` 1 == 1)
| _ -> () | val logor_spec: #t:inttype -> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma (v (a `logor` b) == v a `logor_v` v b)
let logor_spec #t #l a b = | false | null | true | match t with
| U1 ->
assert_norm ((u1 0) `logor` (ones U1 l) == u1 1 /\ (u1 1) `logor` (ones U1 l) == u1 1);
assert_norm ((u1 0) `logor` (zeros U1 l) == u1 0 /\ (u1 1) `logor` (zeros U1 l) == u1 1);
assert_norm (logor_v #U1 0 0 == 0 /\ logor_v #U1 0 1 == 1);
assert_norm (logor_v #U1 1 0 == 1 /\ logor_v #U1 1 1 == 1)
| _ -> () | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"FStar.Pervasives.assert_norm",
"Prims.l_and",
"Prims.eq2",
"Prims.int",
"Lib.IntTypes.logor_v",
"Lib.IntTypes.U1",
"Prims.unit",
"Lib.IntTypes.SEC",
"Lib.IntTypes.logor",
"Lib.IntTypes.u1",
"Lib.IntTypes.zeros",
"Lib.IntTypes.ones"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a)
let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t))
let logor_lemma #t #l a b =
logor_zeros #t #l b;
logor_ones #t #l b;
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (v b)) (Int.logor #(bits t) (v b) (v a)) | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val logor_spec: #t:inttype -> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma (v (a `logor` b) == v a `logor_v` v b) | [] | Lib.IntTypes.logor_spec | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t l -> b: Lib.IntTypes.int_t t l
-> FStar.Pervasives.Lemma
(ensures
Lib.IntTypes.v (Lib.IntTypes.logor a b) ==
Lib.IntTypes.logor_v (Lib.IntTypes.v a) (Lib.IntTypes.v b)) | {
"end_col": 11,
"end_line": 635,
"start_col": 2,
"start_line": 629
} |
FStar.Pervasives.Lemma | val lt_mask_lemma: #t:inttype{unsigned t} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a < v b then v (lt_mask a b) == ones_v t
else v (lt_mask a b) == 0)
[SMTPat (lt_mask #t a b)] | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let lt_mask_lemma #t a b =
assert_norm (lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1);
UInt.lognot_lemma_1 #(bits t);
UInt.lognot_self #(bits t) 0 | val lt_mask_lemma: #t:inttype{unsigned t} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a < v b then v (lt_mask a b) == ones_v t
else v (lt_mask a b) == 0)
[SMTPat (lt_mask #t a b)]
let lt_mask_lemma #t a b = | false | null | true | assert_norm (lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1);
UInt.lognot_lemma_1 #(bits t);
UInt.lognot_self #(bits t) 0 | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Lib.IntTypes.inttype",
"Prims.b2t",
"Lib.IntTypes.unsigned",
"Lib.IntTypes.int_t",
"Lib.IntTypes.SEC",
"FStar.UInt.lognot_self",
"Lib.IntTypes.bits",
"Prims.unit",
"FStar.UInt.lognot_lemma_1",
"FStar.Pervasives.assert_norm",
"Prims.l_and",
"Prims.eq2",
"Lib.IntTypes.U1",
"Lib.IntTypes.lognot",
"Lib.IntTypes.u1"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a)
let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t))
let logor_lemma #t #l a b =
logor_zeros #t #l b;
logor_ones #t #l b;
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (v b)) (Int.logor #(bits t) (v b) (v a))
let logor_spec #t #l a b =
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1);
assert_norm (0 `logor_v #U1` 0 == 0 /\ 0 `logor_v #U1` 1 == 1);
assert_norm (1 `logor_v #U1` 0 == 1 /\ 1 `logor_v #U1` 1 == 1)
| _ -> ()
[@(strict_on_arguments [0])]
let lognot #t #l a =
match t with
| U1 -> UInt8.rem (UInt8.lognot a) 2uy
| U8 -> UInt8.lognot a
| U16 -> UInt16.lognot a
| U32 -> UInt32.lognot a
| U64 -> UInt64.lognot a
| U128 -> UInt128.lognot a
| S8 -> Int8.lognot a
| S16 -> Int16.lognot a
| S32 -> Int32.lognot a
| S64 -> Int64.lognot a
| S128 -> Int128.lognot a
let lognot_lemma #t #l a =
match t with
|U1 -> assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0)
| U8 | U16 | U32 | U64 | U128 ->
FStar.UInt.lognot_lemma_1 #(bits t);
UInt.nth_lemma (FStar.UInt.lognot #(bits t) (UInt.ones (bits t))) (UInt.zero (bits t))
| S8 | S16 | S32 | S64 | S128 ->
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.zero (bits t))) (Int.ones (bits t));
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.ones (bits t))) (Int.zero (bits t))
let lognot_spec #t #l a =
match t with
| U1 ->
assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0);
assert_norm(lognot_v #U1 0 == 1 /\ lognot_v #U1 1 == 0)
| _ -> ()
[@(strict_on_arguments [0])]
let shift_right #t #l a b =
match t with
| U1 -> UInt8.shift_right a b
| U8 -> UInt8.shift_right a b
| U16 -> UInt16.shift_right a b
| U32 -> UInt32.shift_right a b
| U64 -> UInt64.shift_right a b
| U128 -> UInt128.shift_right a b
| S8 -> Int8.shift_arithmetic_right a b
| S16 -> Int16.shift_arithmetic_right a b
| S32 -> Int32.shift_arithmetic_right a b
| S64 -> Int64.shift_arithmetic_right a b
| S128 -> Int128.shift_arithmetic_right a b
val shift_right_value_aux_1: #n:pos{1 < n} -> a:Int.int_t n -> s:nat{n <= s} ->
Lemma (Int.shift_arithmetic_right #n a s = a / pow2 s)
let shift_right_value_aux_1 #n a s =
pow2_le_compat s n;
if a >= 0 then Int.sign_bit_positive a else Int.sign_bit_negative a
#push-options "--z3rlimit 200"
val shift_right_value_aux_2: #n:pos{1 < n} -> a:Int.int_t n ->
Lemma (Int.shift_arithmetic_right #n a 1 = a / 2)
let shift_right_value_aux_2 #n a =
if a >= 0 then
begin
Int.sign_bit_positive a;
UInt.shift_right_value_aux_3 #n a 1
end
else
begin
Int.sign_bit_negative a;
let a1 = Int.to_vec a in
let au = Int.to_uint a in
let sar = Int.shift_arithmetic_right #n a 1 in
let sar1 = Int.to_vec sar in
let sr = UInt.shift_right #n au 1 in
let sr1 = UInt.to_vec sr in
assert (Seq.equal (Seq.slice sar1 1 n) (Seq.slice sr1 1 n));
assert (Seq.equal sar1 (Seq.append (BitVector.ones_vec #1) (Seq.slice sr1 1 n)));
UInt.append_lemma #1 #(n-1) (BitVector.ones_vec #1) (Seq.slice sr1 1 n);
assert (Seq.equal (Seq.slice a1 0 (n-1)) (Seq.slice sar1 1 n));
UInt.slice_left_lemma a1 (n-1);
assert (sar + pow2 n = pow2 (n-1) + (au / 2));
pow2_double_sum (n-1);
assert (sar + pow2 (n-1) = (a + pow2 n) / 2);
pow2_double_mult (n-1);
lemma_div_plus a (pow2 (n-1)) 2;
assert (sar = a / 2)
end
val shift_right_value_aux_3: #n:pos -> a:Int.int_t n -> s:pos{s < n} ->
Lemma (ensures Int.shift_arithmetic_right #n a s = a / pow2 s)
(decreases s)
let rec shift_right_value_aux_3 #n a s =
if s = 1 then
shift_right_value_aux_2 #n a
else
begin
let a1 = Int.to_vec a in
assert (Seq.equal (BitVector.shift_arithmetic_right_vec #n a1 s)
(BitVector.shift_arithmetic_right_vec #n
(BitVector.shift_arithmetic_right_vec #n a1 (s-1)) 1));
assert (Int.shift_arithmetic_right #n a s =
Int.shift_arithmetic_right #n (Int.shift_arithmetic_right #n a (s-1)) 1);
shift_right_value_aux_3 #n a (s-1);
shift_right_value_aux_2 #n (Int.shift_arithmetic_right #n a (s-1));
assert (Int.shift_arithmetic_right #n a s = (a / pow2 (s-1)) / 2);
pow2_double_mult (s-1);
division_multiplication_lemma a (pow2 (s-1)) 2
end
let shift_right_lemma #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> ()
| S8 | S16 | S32 | S64 | S128 ->
if v b = 0 then ()
else if v b >= bits t then
shift_right_value_aux_1 #(bits t) (v a) (v b)
else
shift_right_value_aux_3 #(bits t) (v a) (v b)
[@(strict_on_arguments [0])]
let shift_left #t #l a b =
match t with
| U1 -> UInt8.shift_left a b
| U8 -> UInt8.shift_left a b
| U16 -> UInt16.shift_left a b
| U32 -> UInt32.shift_left a b
| U64 -> UInt64.shift_left a b
| U128 -> UInt128.shift_left a b
| S8 -> Int8.shift_left a b
| S16 -> Int16.shift_left a b
| S32 -> Int32.shift_left a b
| S64 -> Int64.shift_left a b
| S128 -> Int128.shift_left a b
#push-options "--max_fuel 1"
let shift_left_lemma #t #l a b = ()
let rotate_right #t #l a b =
logor (shift_right a b) (shift_left a (sub #U32 (size (bits t)) b))
let rotate_left #t #l a b =
logor (shift_left a b) (shift_right a (sub #U32 (size (bits t)) b))
[@(strict_on_arguments [0])]
let ct_abs #t #l a =
match t with
| S8 -> Int8.ct_abs a
| S16 -> Int16.ct_abs a
| S32 -> Int32.ct_abs a
| S64 -> Int64.ct_abs a
#pop-options
[@(strict_on_arguments [0])]
let eq_mask #t a b =
match t with
| U1 -> lognot (logxor a b)
| U8 -> UInt8.eq_mask a b
| U16 -> UInt16.eq_mask a b
| U32 -> UInt32.eq_mask a b
| U64 -> UInt64.eq_mask a b
| U128 -> UInt128.eq_mask a b
| S8 -> Int.Cast.uint8_to_int8 (UInt8.eq_mask (to_u8 a) (to_u8 b))
| S16 -> Int.Cast.uint16_to_int16 (UInt16.eq_mask (to_u16 a) (to_u16 b))
| S32 -> Int.Cast.uint32_to_int32 (UInt32.eq_mask (to_u32 a) (to_u32 b))
| S64 -> Int.Cast.uint64_to_int64 (UInt64.eq_mask (to_u64 a) (to_u64 b))
val eq_mask_lemma_unsigned: #t:inttype{unsigned t} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
let eq_mask_lemma_unsigned #t a b =
match t with
| U1 ->
assert_norm (
logxor (u1 0) (u1 0) == u1 0 /\ logxor (u1 0) (u1 1) == u1 1 /\
logxor (u1 1) (u1 0) == u1 1 /\ logxor (u1 1) (u1 1) == u1 0 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| U8 | U16 | U32 | U64 | U128 -> ()
#push-options "--z3rlimit 200"
val eq_mask_lemma_signed: #t:inttype{signed t /\ ~(S128? t)} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
let eq_mask_lemma_signed #t a b =
match t with
| S8 ->
begin
assert_norm (pow2 8 = 2 * pow2 7);
if 0 <= v a then modulo_lemma (v a) (pow2 8)
else
begin
modulo_addition_lemma (v a) 1 (pow2 8);
modulo_lemma (v a + pow2 8) (pow2 8)
end
end
| S16 ->
begin
assert_norm (pow2 16 = 2 * pow2 15);
if 0 <= v a then modulo_lemma (v a) (pow2 16)
else
begin
modulo_addition_lemma (v a) 1 (pow2 16);
modulo_lemma (v a + pow2 16) (pow2 16)
end
end
| S32 ->
begin
if 0 <= v a then modulo_lemma (v a) (pow2 32)
else
begin
modulo_addition_lemma (v a) 1 (pow2 32);
modulo_lemma (v a + pow2 32) (pow2 32)
end
end
| S64 ->
begin
if 0 <= v a then modulo_lemma (v a) (pow2 64)
else
begin
modulo_addition_lemma (v a) 1 (pow2 64);
modulo_lemma (v a + pow2 64) (pow2 64)
end
end
#pop-options
let eq_mask_lemma #t a b =
if signed t then eq_mask_lemma_signed a b
else eq_mask_lemma_unsigned a b
let eq_mask_logand_lemma #t a b c =
eq_mask_lemma a b;
logand_zeros c;
logand_ones c;
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v (eq_mask a b)) (v c)
| S8 | S16 | S32 | S64 -> Int.logand_commutative #(bits t) (v (eq_mask a b)) (v c)
[@(strict_on_arguments [0])]
let neq_mask #t a b = lognot (eq_mask #t a b)
let neq_mask_lemma #t a b =
match t with
| U1 -> assert_norm (lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| _ ->
UInt.lognot_lemma_1 #(bits t);
UInt.lognot_self #(bits t) 0
[@(strict_on_arguments [0])]
let gte_mask #t a b =
match t with
| U1 -> logor a (lognot b)
| U8 -> UInt8.gte_mask a b
| U16 -> UInt16.gte_mask a b
| U32 -> UInt32.gte_mask a b
| U64 -> UInt64.gte_mask a b
| U128 -> UInt128.gte_mask a b
let gte_mask_lemma #t a b =
match t with
| U1 ->
begin
assert_norm (
logor (u1 0) (u1 0) == u1 0 /\ logor (u1 1) (u1 1) == u1 1 /\
logor (u1 0) (u1 1) == u1 1 /\ logor (u1 1) (u1 0) == u1 1 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
end
| _ -> ()
let gte_mask_logand_lemma #t a b c =
logand_zeros c;
logand_ones c;
match t with
| U1 ->
assert_norm (
logor (u1 0) (u1 0) == u1 0 /\ logor (u1 1) (u1 1) == u1 1 /\
logor (u1 0) (u1 1) == u1 1 /\ logor (u1 1) (u1 0) == u1 1 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| _ -> UInt.logand_commutative #(bits t) (v (gte_mask a b)) (v c)
let lt_mask #t a b = lognot (gte_mask a b) | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val lt_mask_lemma: #t:inttype{unsigned t} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a < v b then v (lt_mask a b) == ones_v t
else v (lt_mask a b) == 0)
[SMTPat (lt_mask #t a b)] | [] | Lib.IntTypes.lt_mask_lemma | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t Lib.IntTypes.SEC -> b: Lib.IntTypes.int_t t Lib.IntTypes.SEC
-> FStar.Pervasives.Lemma
(ensures
((match Lib.IntTypes.v a < Lib.IntTypes.v b with
| true -> Lib.IntTypes.v (Lib.IntTypes.lt_mask a b) == Lib.IntTypes.ones_v t
| _ -> Lib.IntTypes.v (Lib.IntTypes.lt_mask a b) == 0)
<:
Type0)) [SMTPat (Lib.IntTypes.lt_mask a b)] | {
"end_col": 30,
"end_line": 923,
"start_col": 2,
"start_line": 921
} |
FStar.Pervasives.Lemma | val gte_mask_logand_lemma: #t:inttype{unsigned t}
-> a:int_t t SEC
-> b:int_t t SEC
-> c:int_t t SEC
-> Lemma
(if v a >= v b then v (c `logand` gte_mask a b) == v c
else v (c `logand` gte_mask a b) == 0)
[SMTPat (c `logand` gte_mask a b)] | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let gte_mask_logand_lemma #t a b c =
logand_zeros c;
logand_ones c;
match t with
| U1 ->
assert_norm (
logor (u1 0) (u1 0) == u1 0 /\ logor (u1 1) (u1 1) == u1 1 /\
logor (u1 0) (u1 1) == u1 1 /\ logor (u1 1) (u1 0) == u1 1 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| _ -> UInt.logand_commutative #(bits t) (v (gte_mask a b)) (v c) | val gte_mask_logand_lemma: #t:inttype{unsigned t}
-> a:int_t t SEC
-> b:int_t t SEC
-> c:int_t t SEC
-> Lemma
(if v a >= v b then v (c `logand` gte_mask a b) == v c
else v (c `logand` gte_mask a b) == 0)
[SMTPat (c `logand` gte_mask a b)]
let gte_mask_logand_lemma #t a b c = | false | null | true | logand_zeros c;
logand_ones c;
match t with
| U1 ->
assert_norm (logor (u1 0) (u1 0) == u1 0 /\ logor (u1 1) (u1 1) == u1 1 /\
logor (u1 0) (u1 1) == u1 1 /\ logor (u1 1) (u1 0) == u1 1 /\ lognot (u1 1) == u1 0 /\
lognot (u1 0) == u1 1)
| _ -> UInt.logand_commutative #(bits t) (v (gte_mask a b)) (v c) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Lib.IntTypes.inttype",
"Prims.b2t",
"Lib.IntTypes.unsigned",
"Lib.IntTypes.int_t",
"Lib.IntTypes.SEC",
"FStar.Pervasives.assert_norm",
"Prims.l_and",
"Prims.eq2",
"Lib.IntTypes.U1",
"Lib.IntTypes.logor",
"Lib.IntTypes.u1",
"Lib.IntTypes.lognot",
"FStar.UInt.logand_commutative",
"Lib.IntTypes.bits",
"Lib.IntTypes.v",
"Lib.IntTypes.gte_mask",
"Prims.unit",
"Lib.IntTypes.logand_ones",
"Lib.IntTypes.logand_zeros"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a)
let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t))
let logor_lemma #t #l a b =
logor_zeros #t #l b;
logor_ones #t #l b;
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (v b)) (Int.logor #(bits t) (v b) (v a))
let logor_spec #t #l a b =
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1);
assert_norm (0 `logor_v #U1` 0 == 0 /\ 0 `logor_v #U1` 1 == 1);
assert_norm (1 `logor_v #U1` 0 == 1 /\ 1 `logor_v #U1` 1 == 1)
| _ -> ()
[@(strict_on_arguments [0])]
let lognot #t #l a =
match t with
| U1 -> UInt8.rem (UInt8.lognot a) 2uy
| U8 -> UInt8.lognot a
| U16 -> UInt16.lognot a
| U32 -> UInt32.lognot a
| U64 -> UInt64.lognot a
| U128 -> UInt128.lognot a
| S8 -> Int8.lognot a
| S16 -> Int16.lognot a
| S32 -> Int32.lognot a
| S64 -> Int64.lognot a
| S128 -> Int128.lognot a
let lognot_lemma #t #l a =
match t with
|U1 -> assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0)
| U8 | U16 | U32 | U64 | U128 ->
FStar.UInt.lognot_lemma_1 #(bits t);
UInt.nth_lemma (FStar.UInt.lognot #(bits t) (UInt.ones (bits t))) (UInt.zero (bits t))
| S8 | S16 | S32 | S64 | S128 ->
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.zero (bits t))) (Int.ones (bits t));
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.ones (bits t))) (Int.zero (bits t))
let lognot_spec #t #l a =
match t with
| U1 ->
assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0);
assert_norm(lognot_v #U1 0 == 1 /\ lognot_v #U1 1 == 0)
| _ -> ()
[@(strict_on_arguments [0])]
let shift_right #t #l a b =
match t with
| U1 -> UInt8.shift_right a b
| U8 -> UInt8.shift_right a b
| U16 -> UInt16.shift_right a b
| U32 -> UInt32.shift_right a b
| U64 -> UInt64.shift_right a b
| U128 -> UInt128.shift_right a b
| S8 -> Int8.shift_arithmetic_right a b
| S16 -> Int16.shift_arithmetic_right a b
| S32 -> Int32.shift_arithmetic_right a b
| S64 -> Int64.shift_arithmetic_right a b
| S128 -> Int128.shift_arithmetic_right a b
val shift_right_value_aux_1: #n:pos{1 < n} -> a:Int.int_t n -> s:nat{n <= s} ->
Lemma (Int.shift_arithmetic_right #n a s = a / pow2 s)
let shift_right_value_aux_1 #n a s =
pow2_le_compat s n;
if a >= 0 then Int.sign_bit_positive a else Int.sign_bit_negative a
#push-options "--z3rlimit 200"
val shift_right_value_aux_2: #n:pos{1 < n} -> a:Int.int_t n ->
Lemma (Int.shift_arithmetic_right #n a 1 = a / 2)
let shift_right_value_aux_2 #n a =
if a >= 0 then
begin
Int.sign_bit_positive a;
UInt.shift_right_value_aux_3 #n a 1
end
else
begin
Int.sign_bit_negative a;
let a1 = Int.to_vec a in
let au = Int.to_uint a in
let sar = Int.shift_arithmetic_right #n a 1 in
let sar1 = Int.to_vec sar in
let sr = UInt.shift_right #n au 1 in
let sr1 = UInt.to_vec sr in
assert (Seq.equal (Seq.slice sar1 1 n) (Seq.slice sr1 1 n));
assert (Seq.equal sar1 (Seq.append (BitVector.ones_vec #1) (Seq.slice sr1 1 n)));
UInt.append_lemma #1 #(n-1) (BitVector.ones_vec #1) (Seq.slice sr1 1 n);
assert (Seq.equal (Seq.slice a1 0 (n-1)) (Seq.slice sar1 1 n));
UInt.slice_left_lemma a1 (n-1);
assert (sar + pow2 n = pow2 (n-1) + (au / 2));
pow2_double_sum (n-1);
assert (sar + pow2 (n-1) = (a + pow2 n) / 2);
pow2_double_mult (n-1);
lemma_div_plus a (pow2 (n-1)) 2;
assert (sar = a / 2)
end
val shift_right_value_aux_3: #n:pos -> a:Int.int_t n -> s:pos{s < n} ->
Lemma (ensures Int.shift_arithmetic_right #n a s = a / pow2 s)
(decreases s)
let rec shift_right_value_aux_3 #n a s =
if s = 1 then
shift_right_value_aux_2 #n a
else
begin
let a1 = Int.to_vec a in
assert (Seq.equal (BitVector.shift_arithmetic_right_vec #n a1 s)
(BitVector.shift_arithmetic_right_vec #n
(BitVector.shift_arithmetic_right_vec #n a1 (s-1)) 1));
assert (Int.shift_arithmetic_right #n a s =
Int.shift_arithmetic_right #n (Int.shift_arithmetic_right #n a (s-1)) 1);
shift_right_value_aux_3 #n a (s-1);
shift_right_value_aux_2 #n (Int.shift_arithmetic_right #n a (s-1));
assert (Int.shift_arithmetic_right #n a s = (a / pow2 (s-1)) / 2);
pow2_double_mult (s-1);
division_multiplication_lemma a (pow2 (s-1)) 2
end
let shift_right_lemma #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> ()
| S8 | S16 | S32 | S64 | S128 ->
if v b = 0 then ()
else if v b >= bits t then
shift_right_value_aux_1 #(bits t) (v a) (v b)
else
shift_right_value_aux_3 #(bits t) (v a) (v b)
[@(strict_on_arguments [0])]
let shift_left #t #l a b =
match t with
| U1 -> UInt8.shift_left a b
| U8 -> UInt8.shift_left a b
| U16 -> UInt16.shift_left a b
| U32 -> UInt32.shift_left a b
| U64 -> UInt64.shift_left a b
| U128 -> UInt128.shift_left a b
| S8 -> Int8.shift_left a b
| S16 -> Int16.shift_left a b
| S32 -> Int32.shift_left a b
| S64 -> Int64.shift_left a b
| S128 -> Int128.shift_left a b
#push-options "--max_fuel 1"
let shift_left_lemma #t #l a b = ()
let rotate_right #t #l a b =
logor (shift_right a b) (shift_left a (sub #U32 (size (bits t)) b))
let rotate_left #t #l a b =
logor (shift_left a b) (shift_right a (sub #U32 (size (bits t)) b))
[@(strict_on_arguments [0])]
let ct_abs #t #l a =
match t with
| S8 -> Int8.ct_abs a
| S16 -> Int16.ct_abs a
| S32 -> Int32.ct_abs a
| S64 -> Int64.ct_abs a
#pop-options
[@(strict_on_arguments [0])]
let eq_mask #t a b =
match t with
| U1 -> lognot (logxor a b)
| U8 -> UInt8.eq_mask a b
| U16 -> UInt16.eq_mask a b
| U32 -> UInt32.eq_mask a b
| U64 -> UInt64.eq_mask a b
| U128 -> UInt128.eq_mask a b
| S8 -> Int.Cast.uint8_to_int8 (UInt8.eq_mask (to_u8 a) (to_u8 b))
| S16 -> Int.Cast.uint16_to_int16 (UInt16.eq_mask (to_u16 a) (to_u16 b))
| S32 -> Int.Cast.uint32_to_int32 (UInt32.eq_mask (to_u32 a) (to_u32 b))
| S64 -> Int.Cast.uint64_to_int64 (UInt64.eq_mask (to_u64 a) (to_u64 b))
val eq_mask_lemma_unsigned: #t:inttype{unsigned t} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
let eq_mask_lemma_unsigned #t a b =
match t with
| U1 ->
assert_norm (
logxor (u1 0) (u1 0) == u1 0 /\ logxor (u1 0) (u1 1) == u1 1 /\
logxor (u1 1) (u1 0) == u1 1 /\ logxor (u1 1) (u1 1) == u1 0 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| U8 | U16 | U32 | U64 | U128 -> ()
#push-options "--z3rlimit 200"
val eq_mask_lemma_signed: #t:inttype{signed t /\ ~(S128? t)} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
let eq_mask_lemma_signed #t a b =
match t with
| S8 ->
begin
assert_norm (pow2 8 = 2 * pow2 7);
if 0 <= v a then modulo_lemma (v a) (pow2 8)
else
begin
modulo_addition_lemma (v a) 1 (pow2 8);
modulo_lemma (v a + pow2 8) (pow2 8)
end
end
| S16 ->
begin
assert_norm (pow2 16 = 2 * pow2 15);
if 0 <= v a then modulo_lemma (v a) (pow2 16)
else
begin
modulo_addition_lemma (v a) 1 (pow2 16);
modulo_lemma (v a + pow2 16) (pow2 16)
end
end
| S32 ->
begin
if 0 <= v a then modulo_lemma (v a) (pow2 32)
else
begin
modulo_addition_lemma (v a) 1 (pow2 32);
modulo_lemma (v a + pow2 32) (pow2 32)
end
end
| S64 ->
begin
if 0 <= v a then modulo_lemma (v a) (pow2 64)
else
begin
modulo_addition_lemma (v a) 1 (pow2 64);
modulo_lemma (v a + pow2 64) (pow2 64)
end
end
#pop-options
let eq_mask_lemma #t a b =
if signed t then eq_mask_lemma_signed a b
else eq_mask_lemma_unsigned a b
let eq_mask_logand_lemma #t a b c =
eq_mask_lemma a b;
logand_zeros c;
logand_ones c;
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v (eq_mask a b)) (v c)
| S8 | S16 | S32 | S64 -> Int.logand_commutative #(bits t) (v (eq_mask a b)) (v c)
[@(strict_on_arguments [0])]
let neq_mask #t a b = lognot (eq_mask #t a b)
let neq_mask_lemma #t a b =
match t with
| U1 -> assert_norm (lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| _ ->
UInt.lognot_lemma_1 #(bits t);
UInt.lognot_self #(bits t) 0
[@(strict_on_arguments [0])]
let gte_mask #t a b =
match t with
| U1 -> logor a (lognot b)
| U8 -> UInt8.gte_mask a b
| U16 -> UInt16.gte_mask a b
| U32 -> UInt32.gte_mask a b
| U64 -> UInt64.gte_mask a b
| U128 -> UInt128.gte_mask a b
let gte_mask_lemma #t a b =
match t with
| U1 ->
begin
assert_norm (
logor (u1 0) (u1 0) == u1 0 /\ logor (u1 1) (u1 1) == u1 1 /\
logor (u1 0) (u1 1) == u1 1 /\ logor (u1 1) (u1 0) == u1 1 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
end
| _ -> () | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val gte_mask_logand_lemma: #t:inttype{unsigned t}
-> a:int_t t SEC
-> b:int_t t SEC
-> c:int_t t SEC
-> Lemma
(if v a >= v b then v (c `logand` gte_mask a b) == v c
else v (c `logand` gte_mask a b) == 0)
[SMTPat (c `logand` gte_mask a b)] | [] | Lib.IntTypes.gte_mask_logand_lemma | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
a: Lib.IntTypes.int_t t Lib.IntTypes.SEC ->
b: Lib.IntTypes.int_t t Lib.IntTypes.SEC ->
c: Lib.IntTypes.int_t t Lib.IntTypes.SEC
-> FStar.Pervasives.Lemma
(ensures
((match Lib.IntTypes.v a >= Lib.IntTypes.v b with
| true ->
Lib.IntTypes.v (Lib.IntTypes.logand c (Lib.IntTypes.gte_mask a b)) == Lib.IntTypes.v c
| _ -> Lib.IntTypes.v (Lib.IntTypes.logand c (Lib.IntTypes.gte_mask a b)) == 0)
<:
Type0)) [SMTPat (Lib.IntTypes.logand c (Lib.IntTypes.gte_mask a b))] | {
"end_col": 67,
"end_line": 916,
"start_col": 2,
"start_line": 908
} |
FStar.Pervasives.Lemma | val shift_right_value_aux_2: #n:pos{1 < n} -> a:Int.int_t n ->
Lemma (Int.shift_arithmetic_right #n a 1 = a / 2) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let shift_right_value_aux_2 #n a =
if a >= 0 then
begin
Int.sign_bit_positive a;
UInt.shift_right_value_aux_3 #n a 1
end
else
begin
Int.sign_bit_negative a;
let a1 = Int.to_vec a in
let au = Int.to_uint a in
let sar = Int.shift_arithmetic_right #n a 1 in
let sar1 = Int.to_vec sar in
let sr = UInt.shift_right #n au 1 in
let sr1 = UInt.to_vec sr in
assert (Seq.equal (Seq.slice sar1 1 n) (Seq.slice sr1 1 n));
assert (Seq.equal sar1 (Seq.append (BitVector.ones_vec #1) (Seq.slice sr1 1 n)));
UInt.append_lemma #1 #(n-1) (BitVector.ones_vec #1) (Seq.slice sr1 1 n);
assert (Seq.equal (Seq.slice a1 0 (n-1)) (Seq.slice sar1 1 n));
UInt.slice_left_lemma a1 (n-1);
assert (sar + pow2 n = pow2 (n-1) + (au / 2));
pow2_double_sum (n-1);
assert (sar + pow2 (n-1) = (a + pow2 n) / 2);
pow2_double_mult (n-1);
lemma_div_plus a (pow2 (n-1)) 2;
assert (sar = a / 2)
end | val shift_right_value_aux_2: #n:pos{1 < n} -> a:Int.int_t n ->
Lemma (Int.shift_arithmetic_right #n a 1 = a / 2)
let shift_right_value_aux_2 #n a = | false | null | true | if a >= 0
then
(Int.sign_bit_positive a;
UInt.shift_right_value_aux_3 #n a 1)
else
(Int.sign_bit_negative a;
let a1 = Int.to_vec a in
let au = Int.to_uint a in
let sar = Int.shift_arithmetic_right #n a 1 in
let sar1 = Int.to_vec sar in
let sr = UInt.shift_right #n au 1 in
let sr1 = UInt.to_vec sr in
assert (Seq.equal (Seq.slice sar1 1 n) (Seq.slice sr1 1 n));
assert (Seq.equal sar1 (Seq.append (BitVector.ones_vec #1) (Seq.slice sr1 1 n)));
UInt.append_lemma #1 #(n - 1) (BitVector.ones_vec #1) (Seq.slice sr1 1 n);
assert (Seq.equal (Seq.slice a1 0 (n - 1)) (Seq.slice sar1 1 n));
UInt.slice_left_lemma a1 (n - 1);
assert (sar + pow2 n = pow2 (n - 1) + (au / 2));
pow2_double_sum (n - 1);
assert (sar + pow2 (n - 1) = (a + pow2 n) / 2);
pow2_double_mult (n - 1);
lemma_div_plus a (pow2 (n - 1)) 2;
assert (sar = a / 2)) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Prims.pos",
"Prims.b2t",
"Prims.op_LessThan",
"FStar.Int.int_t",
"Prims.op_GreaterThanOrEqual",
"FStar.UInt.shift_right_value_aux_3",
"Prims.unit",
"FStar.Int.sign_bit_positive",
"Prims.bool",
"Prims._assert",
"Prims.op_Equality",
"Prims.int",
"Prims.op_Division",
"FStar.Math.Lemmas.lemma_div_plus",
"Prims.pow2",
"Prims.op_Subtraction",
"FStar.Math.Lemmas.pow2_double_mult",
"Prims.op_Addition",
"FStar.Math.Lemmas.pow2_double_sum",
"FStar.UInt.slice_left_lemma",
"FStar.Seq.Base.equal",
"FStar.Seq.Base.slice",
"FStar.UInt.append_lemma",
"FStar.BitVector.ones_vec",
"FStar.Seq.Base.append",
"FStar.BitVector.bv_t",
"FStar.UInt.to_vec",
"FStar.UInt.uint_t",
"FStar.UInt.shift_right",
"FStar.Int.to_vec",
"FStar.Int.shift_arithmetic_right",
"FStar.Int.to_uint",
"FStar.Int.sign_bit_negative"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a)
let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t))
let logor_lemma #t #l a b =
logor_zeros #t #l b;
logor_ones #t #l b;
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (v b)) (Int.logor #(bits t) (v b) (v a))
let logor_spec #t #l a b =
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1);
assert_norm (0 `logor_v #U1` 0 == 0 /\ 0 `logor_v #U1` 1 == 1);
assert_norm (1 `logor_v #U1` 0 == 1 /\ 1 `logor_v #U1` 1 == 1)
| _ -> ()
[@(strict_on_arguments [0])]
let lognot #t #l a =
match t with
| U1 -> UInt8.rem (UInt8.lognot a) 2uy
| U8 -> UInt8.lognot a
| U16 -> UInt16.lognot a
| U32 -> UInt32.lognot a
| U64 -> UInt64.lognot a
| U128 -> UInt128.lognot a
| S8 -> Int8.lognot a
| S16 -> Int16.lognot a
| S32 -> Int32.lognot a
| S64 -> Int64.lognot a
| S128 -> Int128.lognot a
let lognot_lemma #t #l a =
match t with
|U1 -> assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0)
| U8 | U16 | U32 | U64 | U128 ->
FStar.UInt.lognot_lemma_1 #(bits t);
UInt.nth_lemma (FStar.UInt.lognot #(bits t) (UInt.ones (bits t))) (UInt.zero (bits t))
| S8 | S16 | S32 | S64 | S128 ->
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.zero (bits t))) (Int.ones (bits t));
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.ones (bits t))) (Int.zero (bits t))
let lognot_spec #t #l a =
match t with
| U1 ->
assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0);
assert_norm(lognot_v #U1 0 == 1 /\ lognot_v #U1 1 == 0)
| _ -> ()
[@(strict_on_arguments [0])]
let shift_right #t #l a b =
match t with
| U1 -> UInt8.shift_right a b
| U8 -> UInt8.shift_right a b
| U16 -> UInt16.shift_right a b
| U32 -> UInt32.shift_right a b
| U64 -> UInt64.shift_right a b
| U128 -> UInt128.shift_right a b
| S8 -> Int8.shift_arithmetic_right a b
| S16 -> Int16.shift_arithmetic_right a b
| S32 -> Int32.shift_arithmetic_right a b
| S64 -> Int64.shift_arithmetic_right a b
| S128 -> Int128.shift_arithmetic_right a b
val shift_right_value_aux_1: #n:pos{1 < n} -> a:Int.int_t n -> s:nat{n <= s} ->
Lemma (Int.shift_arithmetic_right #n a s = a / pow2 s)
let shift_right_value_aux_1 #n a s =
pow2_le_compat s n;
if a >= 0 then Int.sign_bit_positive a else Int.sign_bit_negative a
#push-options "--z3rlimit 200"
val shift_right_value_aux_2: #n:pos{1 < n} -> a:Int.int_t n ->
Lemma (Int.shift_arithmetic_right #n a 1 = a / 2) | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val shift_right_value_aux_2: #n:pos{1 < n} -> a:Int.int_t n ->
Lemma (Int.shift_arithmetic_right #n a 1 = a / 2) | [] | Lib.IntTypes.shift_right_value_aux_2 | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: FStar.Int.int_t n
-> FStar.Pervasives.Lemma (ensures FStar.Int.shift_arithmetic_right a 1 = a / 2) | {
"end_col": 5,
"end_line": 722,
"start_col": 2,
"start_line": 697
} |
Prims.Tot | val secret: #t:inttype -> x:int_t t PUB -> y:int_t t SEC{v x == v y} | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let secret #t x = x | val secret: #t:inttype -> x:int_t t PUB -> y:int_t t SEC{v x == v y}
let secret #t x = | false | null | false | x | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.int_t",
"Lib.IntTypes.PUB",
"Lib.IntTypes.SEC",
"Prims.eq2",
"Lib.IntTypes.range_t",
"Lib.IntTypes.v"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val secret: #t:inttype -> x:int_t t PUB -> y:int_t t SEC{v x == v y} | [] | Lib.IntTypes.secret | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | x: Lib.IntTypes.int_t t Lib.IntTypes.PUB
-> y: Lib.IntTypes.int_t t Lib.IntTypes.SEC {Lib.IntTypes.v x == Lib.IntTypes.v y} | {
"end_col": 19,
"end_line": 18,
"start_col": 18,
"start_line": 18
} |
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let byte_to_int8 x = Int.Cast.uint8_to_int8 x | let byte_to_int8 x = | false | null | false | Int.Cast.uint8_to_int8 x | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"FStar.UInt8.t",
"FStar.Int.Cast.uint8_to_int8",
"FStar.Int8.t",
"Prims.b2t",
"Prims.op_Equality",
"Prims.int",
"FStar.Int8.v",
"FStar.Int.Cast.op_At_Percent",
"FStar.UInt8.v",
"Prims.pow2"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val byte_to_int8 : x: FStar.UInt8.t -> b: FStar.Int8.t{FStar.Int8.v b = FStar.UInt8.v x @% Prims.pow2 8} | [] | Lib.IntTypes.byte_to_int8 | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | x: FStar.UInt8.t -> b: FStar.Int8.t{FStar.Int8.v b = FStar.UInt8.v x @% Prims.pow2 8} | {
"end_col": 45,
"end_line": 76,
"start_col": 21,
"start_line": 76
} |
|
FStar.Pervasives.Lemma | val logor_ones: #t: inttype -> #l: secrecy_level -> a: int_t t l ->
Lemma (v (a `logor` ones t l) == ones_v t) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t)) | val logor_ones: #t: inttype -> #l: secrecy_level -> a: int_t t l ->
Lemma (v (a `logor` ones t l) == ones_v t)
let logor_ones #t #l a = | false | null | true | match t with
| U1 -> assert_norm ((u1 0) `logor` (ones U1 l) == u1 1 /\ (u1 1) `logor` (ones U1 l) == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8
| S16
| S32
| S64
| S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t)) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"FStar.Pervasives.assert_norm",
"Prims.l_and",
"Prims.eq2",
"Lib.IntTypes.U1",
"Lib.IntTypes.SEC",
"Lib.IntTypes.logor",
"Lib.IntTypes.u1",
"Lib.IntTypes.ones",
"FStar.UInt.logor_lemma_2",
"Lib.IntTypes.bits",
"Lib.IntTypes.v",
"FStar.Int.nth_lemma",
"FStar.Int.logor",
"FStar.Int.ones",
"Prims.unit"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a) | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val logor_ones: #t: inttype -> #l: secrecy_level -> a: int_t t l ->
Lemma (v (a `logor` ones t l) == ones_v t) | [] | Lib.IntTypes.logor_ones | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t l
-> FStar.Pervasives.Lemma
(ensures Lib.IntTypes.v (Lib.IntTypes.logor a (Lib.IntTypes.ones t l)) == Lib.IntTypes.ones_v t) | {
"end_col": 116,
"end_line": 616,
"start_col": 2,
"start_line": 613
} |
FStar.Pervasives.Lemma | val eq_mask_lemma_unsigned: #t:inttype{unsigned t} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let eq_mask_lemma_unsigned #t a b =
match t with
| U1 ->
assert_norm (
logxor (u1 0) (u1 0) == u1 0 /\ logxor (u1 0) (u1 1) == u1 1 /\
logxor (u1 1) (u1 0) == u1 1 /\ logxor (u1 1) (u1 1) == u1 0 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| U8 | U16 | U32 | U64 | U128 -> () | val eq_mask_lemma_unsigned: #t:inttype{unsigned t} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
let eq_mask_lemma_unsigned #t a b = | false | null | true | match t with
| U1 ->
assert_norm (logxor (u1 0) (u1 0) == u1 0 /\ logxor (u1 0) (u1 1) == u1 1 /\
logxor (u1 1) (u1 0) == u1 1 /\ logxor (u1 1) (u1 1) == u1 0 /\ lognot (u1 1) == u1 0 /\
lognot (u1 0) == u1 1)
| U8 | U16 | U32 | U64 | U128 -> () | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Lib.IntTypes.inttype",
"Prims.b2t",
"Lib.IntTypes.unsigned",
"Lib.IntTypes.int_t",
"Lib.IntTypes.SEC",
"FStar.Pervasives.assert_norm",
"Prims.l_and",
"Prims.eq2",
"Lib.IntTypes.U1",
"Lib.IntTypes.logxor",
"Lib.IntTypes.u1",
"Lib.IntTypes.lognot",
"Prims.unit"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a)
let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t))
let logor_lemma #t #l a b =
logor_zeros #t #l b;
logor_ones #t #l b;
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (v b)) (Int.logor #(bits t) (v b) (v a))
let logor_spec #t #l a b =
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1);
assert_norm (0 `logor_v #U1` 0 == 0 /\ 0 `logor_v #U1` 1 == 1);
assert_norm (1 `logor_v #U1` 0 == 1 /\ 1 `logor_v #U1` 1 == 1)
| _ -> ()
[@(strict_on_arguments [0])]
let lognot #t #l a =
match t with
| U1 -> UInt8.rem (UInt8.lognot a) 2uy
| U8 -> UInt8.lognot a
| U16 -> UInt16.lognot a
| U32 -> UInt32.lognot a
| U64 -> UInt64.lognot a
| U128 -> UInt128.lognot a
| S8 -> Int8.lognot a
| S16 -> Int16.lognot a
| S32 -> Int32.lognot a
| S64 -> Int64.lognot a
| S128 -> Int128.lognot a
let lognot_lemma #t #l a =
match t with
|U1 -> assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0)
| U8 | U16 | U32 | U64 | U128 ->
FStar.UInt.lognot_lemma_1 #(bits t);
UInt.nth_lemma (FStar.UInt.lognot #(bits t) (UInt.ones (bits t))) (UInt.zero (bits t))
| S8 | S16 | S32 | S64 | S128 ->
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.zero (bits t))) (Int.ones (bits t));
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.ones (bits t))) (Int.zero (bits t))
let lognot_spec #t #l a =
match t with
| U1 ->
assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0);
assert_norm(lognot_v #U1 0 == 1 /\ lognot_v #U1 1 == 0)
| _ -> ()
[@(strict_on_arguments [0])]
let shift_right #t #l a b =
match t with
| U1 -> UInt8.shift_right a b
| U8 -> UInt8.shift_right a b
| U16 -> UInt16.shift_right a b
| U32 -> UInt32.shift_right a b
| U64 -> UInt64.shift_right a b
| U128 -> UInt128.shift_right a b
| S8 -> Int8.shift_arithmetic_right a b
| S16 -> Int16.shift_arithmetic_right a b
| S32 -> Int32.shift_arithmetic_right a b
| S64 -> Int64.shift_arithmetic_right a b
| S128 -> Int128.shift_arithmetic_right a b
val shift_right_value_aux_1: #n:pos{1 < n} -> a:Int.int_t n -> s:nat{n <= s} ->
Lemma (Int.shift_arithmetic_right #n a s = a / pow2 s)
let shift_right_value_aux_1 #n a s =
pow2_le_compat s n;
if a >= 0 then Int.sign_bit_positive a else Int.sign_bit_negative a
#push-options "--z3rlimit 200"
val shift_right_value_aux_2: #n:pos{1 < n} -> a:Int.int_t n ->
Lemma (Int.shift_arithmetic_right #n a 1 = a / 2)
let shift_right_value_aux_2 #n a =
if a >= 0 then
begin
Int.sign_bit_positive a;
UInt.shift_right_value_aux_3 #n a 1
end
else
begin
Int.sign_bit_negative a;
let a1 = Int.to_vec a in
let au = Int.to_uint a in
let sar = Int.shift_arithmetic_right #n a 1 in
let sar1 = Int.to_vec sar in
let sr = UInt.shift_right #n au 1 in
let sr1 = UInt.to_vec sr in
assert (Seq.equal (Seq.slice sar1 1 n) (Seq.slice sr1 1 n));
assert (Seq.equal sar1 (Seq.append (BitVector.ones_vec #1) (Seq.slice sr1 1 n)));
UInt.append_lemma #1 #(n-1) (BitVector.ones_vec #1) (Seq.slice sr1 1 n);
assert (Seq.equal (Seq.slice a1 0 (n-1)) (Seq.slice sar1 1 n));
UInt.slice_left_lemma a1 (n-1);
assert (sar + pow2 n = pow2 (n-1) + (au / 2));
pow2_double_sum (n-1);
assert (sar + pow2 (n-1) = (a + pow2 n) / 2);
pow2_double_mult (n-1);
lemma_div_plus a (pow2 (n-1)) 2;
assert (sar = a / 2)
end
val shift_right_value_aux_3: #n:pos -> a:Int.int_t n -> s:pos{s < n} ->
Lemma (ensures Int.shift_arithmetic_right #n a s = a / pow2 s)
(decreases s)
let rec shift_right_value_aux_3 #n a s =
if s = 1 then
shift_right_value_aux_2 #n a
else
begin
let a1 = Int.to_vec a in
assert (Seq.equal (BitVector.shift_arithmetic_right_vec #n a1 s)
(BitVector.shift_arithmetic_right_vec #n
(BitVector.shift_arithmetic_right_vec #n a1 (s-1)) 1));
assert (Int.shift_arithmetic_right #n a s =
Int.shift_arithmetic_right #n (Int.shift_arithmetic_right #n a (s-1)) 1);
shift_right_value_aux_3 #n a (s-1);
shift_right_value_aux_2 #n (Int.shift_arithmetic_right #n a (s-1));
assert (Int.shift_arithmetic_right #n a s = (a / pow2 (s-1)) / 2);
pow2_double_mult (s-1);
division_multiplication_lemma a (pow2 (s-1)) 2
end
let shift_right_lemma #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> ()
| S8 | S16 | S32 | S64 | S128 ->
if v b = 0 then ()
else if v b >= bits t then
shift_right_value_aux_1 #(bits t) (v a) (v b)
else
shift_right_value_aux_3 #(bits t) (v a) (v b)
[@(strict_on_arguments [0])]
let shift_left #t #l a b =
match t with
| U1 -> UInt8.shift_left a b
| U8 -> UInt8.shift_left a b
| U16 -> UInt16.shift_left a b
| U32 -> UInt32.shift_left a b
| U64 -> UInt64.shift_left a b
| U128 -> UInt128.shift_left a b
| S8 -> Int8.shift_left a b
| S16 -> Int16.shift_left a b
| S32 -> Int32.shift_left a b
| S64 -> Int64.shift_left a b
| S128 -> Int128.shift_left a b
#push-options "--max_fuel 1"
let shift_left_lemma #t #l a b = ()
let rotate_right #t #l a b =
logor (shift_right a b) (shift_left a (sub #U32 (size (bits t)) b))
let rotate_left #t #l a b =
logor (shift_left a b) (shift_right a (sub #U32 (size (bits t)) b))
[@(strict_on_arguments [0])]
let ct_abs #t #l a =
match t with
| S8 -> Int8.ct_abs a
| S16 -> Int16.ct_abs a
| S32 -> Int32.ct_abs a
| S64 -> Int64.ct_abs a
#pop-options
[@(strict_on_arguments [0])]
let eq_mask #t a b =
match t with
| U1 -> lognot (logxor a b)
| U8 -> UInt8.eq_mask a b
| U16 -> UInt16.eq_mask a b
| U32 -> UInt32.eq_mask a b
| U64 -> UInt64.eq_mask a b
| U128 -> UInt128.eq_mask a b
| S8 -> Int.Cast.uint8_to_int8 (UInt8.eq_mask (to_u8 a) (to_u8 b))
| S16 -> Int.Cast.uint16_to_int16 (UInt16.eq_mask (to_u16 a) (to_u16 b))
| S32 -> Int.Cast.uint32_to_int32 (UInt32.eq_mask (to_u32 a) (to_u32 b))
| S64 -> Int.Cast.uint64_to_int64 (UInt64.eq_mask (to_u64 a) (to_u64 b))
val eq_mask_lemma_unsigned: #t:inttype{unsigned t} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0) | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val eq_mask_lemma_unsigned: #t:inttype{unsigned t} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0) | [] | Lib.IntTypes.eq_mask_lemma_unsigned | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t Lib.IntTypes.SEC -> b: Lib.IntTypes.int_t t Lib.IntTypes.SEC
-> FStar.Pervasives.Lemma
(ensures
((match Lib.IntTypes.v a = Lib.IntTypes.v b with
| true -> Lib.IntTypes.v (Lib.IntTypes.eq_mask a b) == Lib.IntTypes.ones_v t
| _ -> Lib.IntTypes.v (Lib.IntTypes.eq_mask a b) == 0)
<:
Type0)) | {
"end_col": 37,
"end_line": 814,
"start_col": 2,
"start_line": 808
} |
FStar.Pervasives.Lemma | val shift_right_value_aux_3: #n:pos -> a:Int.int_t n -> s:pos{s < n} ->
Lemma (ensures Int.shift_arithmetic_right #n a s = a / pow2 s)
(decreases s) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let rec shift_right_value_aux_3 #n a s =
if s = 1 then
shift_right_value_aux_2 #n a
else
begin
let a1 = Int.to_vec a in
assert (Seq.equal (BitVector.shift_arithmetic_right_vec #n a1 s)
(BitVector.shift_arithmetic_right_vec #n
(BitVector.shift_arithmetic_right_vec #n a1 (s-1)) 1));
assert (Int.shift_arithmetic_right #n a s =
Int.shift_arithmetic_right #n (Int.shift_arithmetic_right #n a (s-1)) 1);
shift_right_value_aux_3 #n a (s-1);
shift_right_value_aux_2 #n (Int.shift_arithmetic_right #n a (s-1));
assert (Int.shift_arithmetic_right #n a s = (a / pow2 (s-1)) / 2);
pow2_double_mult (s-1);
division_multiplication_lemma a (pow2 (s-1)) 2
end | val shift_right_value_aux_3: #n:pos -> a:Int.int_t n -> s:pos{s < n} ->
Lemma (ensures Int.shift_arithmetic_right #n a s = a / pow2 s)
(decreases s)
let rec shift_right_value_aux_3 #n a s = | false | null | true | if s = 1
then shift_right_value_aux_2 #n a
else
let a1 = Int.to_vec a in
assert (Seq.equal (BitVector.shift_arithmetic_right_vec #n a1 s)
(BitVector.shift_arithmetic_right_vec #n
(BitVector.shift_arithmetic_right_vec #n a1 (s - 1))
1));
assert (Int.shift_arithmetic_right #n a s =
Int.shift_arithmetic_right #n (Int.shift_arithmetic_right #n a (s - 1)) 1);
shift_right_value_aux_3 #n a (s - 1);
shift_right_value_aux_2 #n (Int.shift_arithmetic_right #n a (s - 1));
assert (Int.shift_arithmetic_right #n a s = (a / pow2 (s - 1)) / 2);
pow2_double_mult (s - 1);
division_multiplication_lemma a (pow2 (s - 1)) 2 | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma",
""
] | [
"Prims.pos",
"FStar.Int.int_t",
"Prims.b2t",
"Prims.op_LessThan",
"Prims.op_Equality",
"Prims.int",
"Lib.IntTypes.shift_right_value_aux_2",
"Prims.bool",
"FStar.Math.Lemmas.division_multiplication_lemma",
"Prims.pow2",
"Prims.op_Subtraction",
"Prims.unit",
"FStar.Math.Lemmas.pow2_double_mult",
"Prims._assert",
"FStar.Int.shift_arithmetic_right",
"Prims.op_Division",
"Lib.IntTypes.shift_right_value_aux_3",
"FStar.Seq.Base.equal",
"FStar.BitVector.shift_arithmetic_right_vec",
"FStar.BitVector.bv_t",
"FStar.Int.to_vec"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a)
let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t))
let logor_lemma #t #l a b =
logor_zeros #t #l b;
logor_ones #t #l b;
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (v b)) (Int.logor #(bits t) (v b) (v a))
let logor_spec #t #l a b =
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1);
assert_norm (0 `logor_v #U1` 0 == 0 /\ 0 `logor_v #U1` 1 == 1);
assert_norm (1 `logor_v #U1` 0 == 1 /\ 1 `logor_v #U1` 1 == 1)
| _ -> ()
[@(strict_on_arguments [0])]
let lognot #t #l a =
match t with
| U1 -> UInt8.rem (UInt8.lognot a) 2uy
| U8 -> UInt8.lognot a
| U16 -> UInt16.lognot a
| U32 -> UInt32.lognot a
| U64 -> UInt64.lognot a
| U128 -> UInt128.lognot a
| S8 -> Int8.lognot a
| S16 -> Int16.lognot a
| S32 -> Int32.lognot a
| S64 -> Int64.lognot a
| S128 -> Int128.lognot a
let lognot_lemma #t #l a =
match t with
|U1 -> assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0)
| U8 | U16 | U32 | U64 | U128 ->
FStar.UInt.lognot_lemma_1 #(bits t);
UInt.nth_lemma (FStar.UInt.lognot #(bits t) (UInt.ones (bits t))) (UInt.zero (bits t))
| S8 | S16 | S32 | S64 | S128 ->
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.zero (bits t))) (Int.ones (bits t));
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.ones (bits t))) (Int.zero (bits t))
let lognot_spec #t #l a =
match t with
| U1 ->
assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0);
assert_norm(lognot_v #U1 0 == 1 /\ lognot_v #U1 1 == 0)
| _ -> ()
[@(strict_on_arguments [0])]
let shift_right #t #l a b =
match t with
| U1 -> UInt8.shift_right a b
| U8 -> UInt8.shift_right a b
| U16 -> UInt16.shift_right a b
| U32 -> UInt32.shift_right a b
| U64 -> UInt64.shift_right a b
| U128 -> UInt128.shift_right a b
| S8 -> Int8.shift_arithmetic_right a b
| S16 -> Int16.shift_arithmetic_right a b
| S32 -> Int32.shift_arithmetic_right a b
| S64 -> Int64.shift_arithmetic_right a b
| S128 -> Int128.shift_arithmetic_right a b
val shift_right_value_aux_1: #n:pos{1 < n} -> a:Int.int_t n -> s:nat{n <= s} ->
Lemma (Int.shift_arithmetic_right #n a s = a / pow2 s)
let shift_right_value_aux_1 #n a s =
pow2_le_compat s n;
if a >= 0 then Int.sign_bit_positive a else Int.sign_bit_negative a
#push-options "--z3rlimit 200"
val shift_right_value_aux_2: #n:pos{1 < n} -> a:Int.int_t n ->
Lemma (Int.shift_arithmetic_right #n a 1 = a / 2)
let shift_right_value_aux_2 #n a =
if a >= 0 then
begin
Int.sign_bit_positive a;
UInt.shift_right_value_aux_3 #n a 1
end
else
begin
Int.sign_bit_negative a;
let a1 = Int.to_vec a in
let au = Int.to_uint a in
let sar = Int.shift_arithmetic_right #n a 1 in
let sar1 = Int.to_vec sar in
let sr = UInt.shift_right #n au 1 in
let sr1 = UInt.to_vec sr in
assert (Seq.equal (Seq.slice sar1 1 n) (Seq.slice sr1 1 n));
assert (Seq.equal sar1 (Seq.append (BitVector.ones_vec #1) (Seq.slice sr1 1 n)));
UInt.append_lemma #1 #(n-1) (BitVector.ones_vec #1) (Seq.slice sr1 1 n);
assert (Seq.equal (Seq.slice a1 0 (n-1)) (Seq.slice sar1 1 n));
UInt.slice_left_lemma a1 (n-1);
assert (sar + pow2 n = pow2 (n-1) + (au / 2));
pow2_double_sum (n-1);
assert (sar + pow2 (n-1) = (a + pow2 n) / 2);
pow2_double_mult (n-1);
lemma_div_plus a (pow2 (n-1)) 2;
assert (sar = a / 2)
end
val shift_right_value_aux_3: #n:pos -> a:Int.int_t n -> s:pos{s < n} ->
Lemma (ensures Int.shift_arithmetic_right #n a s = a / pow2 s)
(decreases s) | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val shift_right_value_aux_3: #n:pos -> a:Int.int_t n -> s:pos{s < n} ->
Lemma (ensures Int.shift_arithmetic_right #n a s = a / pow2 s)
(decreases s) | [
"recursion"
] | Lib.IntTypes.shift_right_value_aux_3 | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: FStar.Int.int_t n -> s: Prims.pos{s < n}
-> FStar.Pervasives.Lemma (ensures FStar.Int.shift_arithmetic_right a s = a / Prims.pow2 s)
(decreases s) | {
"end_col": 7,
"end_line": 743,
"start_col": 2,
"start_line": 728
} |
Prims.Tot | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let op_At_Percent = Int.op_At_Percent | let op_At_Percent = | false | null | false | Int.op_At_Percent | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"FStar.Int.op_At_Percent"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val op_At_Percent : v: Prims.int -> p: Prims.int{p > 0 /\ p % 2 = 0} -> Prims.int | [] | Lib.IntTypes.op_At_Percent | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | v: Prims.int -> p: Prims.int{p > 0 /\ p % 2 = 0} -> Prims.int | {
"end_col": 37,
"end_line": 78,
"start_col": 20,
"start_line": 78
} |
|
FStar.Pervasives.Lemma | val logand_lemma: #t:inttype -> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a = 0 \/ v a = ones_v t)
(ensures (if v a = 0 then v (a `logand` b) == 0 else v (a `logand` b) == v b)) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b) | val logand_lemma: #t:inttype -> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a = 0 \/ v a = ones_v t)
(ensures (if v a = 0 then v (a `logand` b) == 0 else v (a `logand` b) == v b))
let logand_lemma #t #l a b = | false | null | true | logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm ((u1 0) `logand` (zeros U1 l) == u1 0 /\ (u1 1) `logand` (zeros U1 l) == u1 0);
assert_norm ((u1 0) `logand` (ones U1 l) == u1 0 /\ (u1 1) `logand` (ones U1 l) == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"FStar.Pervasives.assert_norm",
"Prims.l_and",
"Prims.eq2",
"Lib.IntTypes.U1",
"Lib.IntTypes.SEC",
"Lib.IntTypes.logand",
"Lib.IntTypes.u1",
"Lib.IntTypes.ones",
"Prims.unit",
"Lib.IntTypes.zeros",
"FStar.UInt.logand_commutative",
"Lib.IntTypes.bits",
"Lib.IntTypes.v",
"FStar.Int.logand_commutative",
"Lib.IntTypes.logand_ones",
"Lib.IntTypes.logand_zeros"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a) | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val logand_lemma: #t:inttype -> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a = 0 \/ v a = ones_v t)
(ensures (if v a = 0 then v (a `logand` b) == 0 else v (a `logand` b) == v b)) | [] | Lib.IntTypes.logand_lemma | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t l -> b: Lib.IntTypes.int_t t l
-> FStar.Pervasives.Lemma
(requires Lib.IntTypes.v a = 0 \/ Lib.IntTypes.v a = Lib.IntTypes.ones_v t)
(ensures
((match Lib.IntTypes.v a = 0 with
| true -> Lib.IntTypes.v (Lib.IntTypes.logand a b) == 0
| _ -> Lib.IntTypes.v (Lib.IntTypes.logand a b) == Lib.IntTypes.v b)
<:
Type0)) | {
"end_col": 79,
"end_line": 531,
"start_col": 2,
"start_line": 524
} |
FStar.Pervasives.Lemma | val lognot_lemma: #t: inttype -> #l: secrecy_level ->
a: int_t t l ->
Lemma
(requires v a = 0 \/ v a = ones_v t)
(ensures (if v a = ones_v t then v (lognot a) == 0 else v (lognot a) == ones_v t)) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let lognot_lemma #t #l a =
match t with
|U1 -> assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0)
| U8 | U16 | U32 | U64 | U128 ->
FStar.UInt.lognot_lemma_1 #(bits t);
UInt.nth_lemma (FStar.UInt.lognot #(bits t) (UInt.ones (bits t))) (UInt.zero (bits t))
| S8 | S16 | S32 | S64 | S128 ->
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.zero (bits t))) (Int.ones (bits t));
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.ones (bits t))) (Int.zero (bits t)) | val lognot_lemma: #t: inttype -> #l: secrecy_level ->
a: int_t t l ->
Lemma
(requires v a = 0 \/ v a = ones_v t)
(ensures (if v a = ones_v t then v (lognot a) == 0 else v (lognot a) == ones_v t))
let lognot_lemma #t #l a = | false | null | true | match t with
| U1 -> assert_norm (lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0)
| U8
| U16
| U32
| U64
| U128 ->
FStar.UInt.lognot_lemma_1 #(bits t);
UInt.nth_lemma (FStar.UInt.lognot #(bits t) (UInt.ones (bits t))) (UInt.zero (bits t))
| S8
| S16
| S32
| S64
| S128 ->
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.zero (bits t))) (Int.ones (bits t));
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.ones (bits t))) (Int.zero (bits t)) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"FStar.Pervasives.assert_norm",
"Prims.l_and",
"Prims.eq2",
"Lib.IntTypes.U1",
"Lib.IntTypes.SEC",
"Lib.IntTypes.lognot",
"Lib.IntTypes.u1",
"FStar.UInt.nth_lemma",
"Lib.IntTypes.bits",
"FStar.UInt.lognot",
"FStar.UInt.ones",
"FStar.UInt.zero",
"Prims.unit",
"FStar.UInt.lognot_lemma_1",
"FStar.Int.nth_lemma",
"FStar.Int.lognot",
"FStar.Int.ones",
"FStar.Int.zero"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a)
let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t))
let logor_lemma #t #l a b =
logor_zeros #t #l b;
logor_ones #t #l b;
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (v b)) (Int.logor #(bits t) (v b) (v a))
let logor_spec #t #l a b =
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1);
assert_norm (0 `logor_v #U1` 0 == 0 /\ 0 `logor_v #U1` 1 == 1);
assert_norm (1 `logor_v #U1` 0 == 1 /\ 1 `logor_v #U1` 1 == 1)
| _ -> ()
[@(strict_on_arguments [0])]
let lognot #t #l a =
match t with
| U1 -> UInt8.rem (UInt8.lognot a) 2uy
| U8 -> UInt8.lognot a
| U16 -> UInt16.lognot a
| U32 -> UInt32.lognot a
| U64 -> UInt64.lognot a
| U128 -> UInt128.lognot a
| S8 -> Int8.lognot a
| S16 -> Int16.lognot a
| S32 -> Int32.lognot a
| S64 -> Int64.lognot a
| S128 -> Int128.lognot a | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val lognot_lemma: #t: inttype -> #l: secrecy_level ->
a: int_t t l ->
Lemma
(requires v a = 0 \/ v a = ones_v t)
(ensures (if v a = ones_v t then v (lognot a) == 0 else v (lognot a) == ones_v t)) | [] | Lib.IntTypes.lognot_lemma | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t l
-> FStar.Pervasives.Lemma
(requires Lib.IntTypes.v a = 0 \/ Lib.IntTypes.v a = Lib.IntTypes.ones_v t)
(ensures
((match Lib.IntTypes.v a = Lib.IntTypes.ones_v t with
| true -> Lib.IntTypes.v (Lib.IntTypes.lognot a) == 0
| _ -> Lib.IntTypes.v (Lib.IntTypes.lognot a) == Lib.IntTypes.ones_v t)
<:
Type0)) | {
"end_col": 86,
"end_line": 661,
"start_col": 2,
"start_line": 654
} |
Prims.Tot | val mul64_wide: uint64 -> uint64 -> uint128 | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mul64_wide a b = UInt128.mul_wide a b | val mul64_wide: uint64 -> uint64 -> uint128
let mul64_wide a b = | false | null | false | UInt128.mul_wide a b | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.uint64",
"FStar.UInt128.mul_wide",
"Lib.IntTypes.uint128"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = () | false | true | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mul64_wide: uint64 -> uint64 -> uint128 | [] | Lib.IntTypes.mul64_wide | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.uint64 -> b: Lib.IntTypes.uint64 -> Lib.IntTypes.uint128 | {
"end_col": 41,
"end_line": 358,
"start_col": 21,
"start_line": 358
} |
FStar.Pervasives.Lemma | val logor_disjoint: #t:inttype{unsigned t} -> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> m:nat{m < bits t}
-> Lemma
(requires 0 <= v a /\ v a < pow2 m /\ v b % pow2 m == 0)
(ensures v (a `logor` b) == v a + v b) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end | val logor_disjoint: #t:inttype{unsigned t} -> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> m:nat{m < bits t}
-> Lemma
(requires 0 <= v a /\ v a < pow2 m /\ v b % pow2 m == 0)
(ensures v (a `logor` b) == v a + v b)
let logor_disjoint #t #l a b m = | false | null | true | if m > 0
then
(UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a))
else
(UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Lib.IntTypes.inttype",
"Prims.b2t",
"Lib.IntTypes.unsigned",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"Prims.nat",
"Prims.op_LessThan",
"Lib.IntTypes.bits",
"Prims.op_GreaterThan",
"FStar.UInt.logor_commutative",
"Lib.IntTypes.v",
"Prims.unit",
"FStar.UInt.logor_disjoint",
"Prims.bool",
"FStar.UInt.logor_lemma_1"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1" | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val logor_disjoint: #t:inttype{unsigned t} -> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> m:nat{m < bits t}
-> Lemma
(requires 0 <= v a /\ v a < pow2 m /\ v b % pow2 m == 0)
(ensures v (a `logor` b) == v a + v b) | [] | Lib.IntTypes.logor_disjoint | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t l -> b: Lib.IntTypes.int_t t l -> m: Prims.nat{m < Lib.IntTypes.bits t}
-> FStar.Pervasives.Lemma
(requires
0 <= Lib.IntTypes.v a /\ Lib.IntTypes.v a < Prims.pow2 m /\
Lib.IntTypes.v b % Prims.pow2 m == 0)
(ensures Lib.IntTypes.v (Lib.IntTypes.logor a b) == Lib.IntTypes.v a + Lib.IntTypes.v b) | {
"end_col": 5,
"end_line": 601,
"start_col": 2,
"start_line": 592
} |
Prims.Tot | val mk_int: #t:inttype -> #l:secrecy_level -> n:range_t t -> u:int_t t l{v u == n} | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x | val mk_int: #t:inttype -> #l:secrecy_level -> n:range_t t -> u:int_t t l{v u == n}
let mk_int #t #l x = | false | null | false | match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.range_t",
"FStar.UInt8.uint_to_t",
"FStar.UInt16.uint_to_t",
"FStar.UInt32.uint_to_t",
"FStar.UInt64.uint_to_t",
"FStar.UInt128.uint_to_t",
"FStar.Int8.int_to_t",
"FStar.Int16.int_to_t",
"FStar.Int32.int_to_t",
"FStar.Int64.int_to_t",
"FStar.Int128.int_to_t",
"Lib.IntTypes.int_t",
"Prims.eq2",
"Lib.IntTypes.v"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])] | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mk_int: #t:inttype -> #l:secrecy_level -> n:range_t t -> u:int_t t l{v u == n} | [] | Lib.IntTypes.mk_int | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | n: Lib.IntTypes.range_t t -> u62: Lib.IntTypes.int_t t l {Lib.IntTypes.v u62 == n} | {
"end_col": 29,
"end_line": 33,
"start_col": 2,
"start_line": 22
} |
FStar.Pervasives.Lemma | val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b | val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b = | false | null | true | match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"FStar.UInt8.v_inj",
"FStar.UInt16.v_inj",
"FStar.UInt32.v_inj",
"FStar.UInt64.v_inj",
"FStar.UInt128.v_inj",
"FStar.Int8.v_inj",
"FStar.Int16.v_inj",
"FStar.Int32.v_inj",
"FStar.Int64.v_inj",
"FStar.Int128.v_inj",
"Prims.unit"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b) | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b) | [] | Lib.IntTypes.v_extensionality | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t l -> b: Lib.IntTypes.int_t t l
-> FStar.Pervasives.Lemma (requires Lib.IntTypes.v a == Lib.IntTypes.v b) (ensures a == b) | {
"end_col": 28,
"end_line": 55,
"start_col": 2,
"start_line": 44
} |
FStar.Pervasives.Lemma | val logand_mask: #t:inttype{unsigned t} -> #l:secrecy_level -> a:uint_t t l -> b:uint_t t l -> m:pos{m < bits t} ->
Lemma
(requires v b == pow2 m - 1)
(ensures v (logand #t #l a b) == v a % pow2 m) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m | val logand_mask: #t:inttype{unsigned t} -> #l:secrecy_level -> a:uint_t t l -> b:uint_t t l -> m:pos{m < bits t} ->
Lemma
(requires v b == pow2 m - 1)
(ensures v (logand #t #l a b) == v a % pow2 m)
let logand_mask #t #l a b m = | false | null | true | match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Lib.IntTypes.inttype",
"Prims.b2t",
"Lib.IntTypes.unsigned",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.uint_t",
"Prims.pos",
"Prims.op_LessThan",
"Lib.IntTypes.bits",
"FStar.Pervasives.assert_norm",
"Prims.eq2",
"FStar.UInt8.t",
"FStar.UInt8.logand",
"FStar.UInt8.__uint_to_t",
"Prims.unit",
"FStar.UInt.logand_mask",
"FStar.UInt.to_uint_t",
"Lib.IntTypes.v"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b)) | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val logand_mask: #t:inttype{unsigned t} -> #l:secrecy_level -> a:uint_t t l -> b:uint_t t l -> m:pos{m < bits t} ->
Lemma
(requires v b == pow2 m - 1)
(ensures v (logand #t #l a b) == v a % pow2 m) | [] | Lib.IntTypes.logand_mask | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.uint_t t l -> b: Lib.IntTypes.uint_t t l -> m: Prims.pos{m < Lib.IntTypes.bits t}
-> FStar.Pervasives.Lemma (requires Lib.IntTypes.v b == Prims.pow2 m - 1)
(ensures Lib.IntTypes.v (Lib.IntTypes.logand a b) == Lib.IntTypes.v a % Prims.pow2 m) | {
"end_col": 57,
"end_line": 566,
"start_col": 2,
"start_line": 556
} |
Prims.Tot | val u128: n:range_t U64 -> u:uint128{v #U128 u == n} | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n) | val u128: n:range_t U64 -> u:uint128{v #U128 u == n}
let u128 n = | false | null | false | FStar.UInt128.uint64_to_uint128 (u64 n) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.range_t",
"Lib.IntTypes.U64",
"FStar.UInt128.uint64_to_uint128",
"Lib.IntTypes.u64",
"Lib.IntTypes.uint128",
"Prims.eq2",
"Prims.int",
"Prims.l_or",
"Lib.IntTypes.range",
"Lib.IntTypes.U128",
"Lib.IntTypes.v",
"Lib.IntTypes.SEC"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = () | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val u128: n:range_t U64 -> u:uint128{v #U128 u == n} | [] | Lib.IntTypes.u128 | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | n: Lib.IntTypes.range_t Lib.IntTypes.U64 -> u86: Lib.IntTypes.uint128{Lib.IntTypes.v u86 == n} | {
"end_col": 52,
"end_line": 62,
"start_col": 13,
"start_line": 62
} |
Prims.Tot | val neq_mask: #t:inttype{~(S128? t)} -> a:int_t t SEC -> b:int_t t SEC -> int_t t SEC | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let neq_mask #t a b = lognot (eq_mask #t a b) | val neq_mask: #t:inttype{~(S128? t)} -> a:int_t t SEC -> b:int_t t SEC -> int_t t SEC
let neq_mask #t a b = | false | null | false | lognot (eq_mask #t a b) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Prims.l_not",
"Prims.b2t",
"Lib.IntTypes.uu___is_S128",
"Lib.IntTypes.int_t",
"Lib.IntTypes.SEC",
"Lib.IntTypes.lognot",
"Lib.IntTypes.eq_mask"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a)
let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t))
let logor_lemma #t #l a b =
logor_zeros #t #l b;
logor_ones #t #l b;
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (v b)) (Int.logor #(bits t) (v b) (v a))
let logor_spec #t #l a b =
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1);
assert_norm (0 `logor_v #U1` 0 == 0 /\ 0 `logor_v #U1` 1 == 1);
assert_norm (1 `logor_v #U1` 0 == 1 /\ 1 `logor_v #U1` 1 == 1)
| _ -> ()
[@(strict_on_arguments [0])]
let lognot #t #l a =
match t with
| U1 -> UInt8.rem (UInt8.lognot a) 2uy
| U8 -> UInt8.lognot a
| U16 -> UInt16.lognot a
| U32 -> UInt32.lognot a
| U64 -> UInt64.lognot a
| U128 -> UInt128.lognot a
| S8 -> Int8.lognot a
| S16 -> Int16.lognot a
| S32 -> Int32.lognot a
| S64 -> Int64.lognot a
| S128 -> Int128.lognot a
let lognot_lemma #t #l a =
match t with
|U1 -> assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0)
| U8 | U16 | U32 | U64 | U128 ->
FStar.UInt.lognot_lemma_1 #(bits t);
UInt.nth_lemma (FStar.UInt.lognot #(bits t) (UInt.ones (bits t))) (UInt.zero (bits t))
| S8 | S16 | S32 | S64 | S128 ->
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.zero (bits t))) (Int.ones (bits t));
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.ones (bits t))) (Int.zero (bits t))
let lognot_spec #t #l a =
match t with
| U1 ->
assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0);
assert_norm(lognot_v #U1 0 == 1 /\ lognot_v #U1 1 == 0)
| _ -> ()
[@(strict_on_arguments [0])]
let shift_right #t #l a b =
match t with
| U1 -> UInt8.shift_right a b
| U8 -> UInt8.shift_right a b
| U16 -> UInt16.shift_right a b
| U32 -> UInt32.shift_right a b
| U64 -> UInt64.shift_right a b
| U128 -> UInt128.shift_right a b
| S8 -> Int8.shift_arithmetic_right a b
| S16 -> Int16.shift_arithmetic_right a b
| S32 -> Int32.shift_arithmetic_right a b
| S64 -> Int64.shift_arithmetic_right a b
| S128 -> Int128.shift_arithmetic_right a b
val shift_right_value_aux_1: #n:pos{1 < n} -> a:Int.int_t n -> s:nat{n <= s} ->
Lemma (Int.shift_arithmetic_right #n a s = a / pow2 s)
let shift_right_value_aux_1 #n a s =
pow2_le_compat s n;
if a >= 0 then Int.sign_bit_positive a else Int.sign_bit_negative a
#push-options "--z3rlimit 200"
val shift_right_value_aux_2: #n:pos{1 < n} -> a:Int.int_t n ->
Lemma (Int.shift_arithmetic_right #n a 1 = a / 2)
let shift_right_value_aux_2 #n a =
if a >= 0 then
begin
Int.sign_bit_positive a;
UInt.shift_right_value_aux_3 #n a 1
end
else
begin
Int.sign_bit_negative a;
let a1 = Int.to_vec a in
let au = Int.to_uint a in
let sar = Int.shift_arithmetic_right #n a 1 in
let sar1 = Int.to_vec sar in
let sr = UInt.shift_right #n au 1 in
let sr1 = UInt.to_vec sr in
assert (Seq.equal (Seq.slice sar1 1 n) (Seq.slice sr1 1 n));
assert (Seq.equal sar1 (Seq.append (BitVector.ones_vec #1) (Seq.slice sr1 1 n)));
UInt.append_lemma #1 #(n-1) (BitVector.ones_vec #1) (Seq.slice sr1 1 n);
assert (Seq.equal (Seq.slice a1 0 (n-1)) (Seq.slice sar1 1 n));
UInt.slice_left_lemma a1 (n-1);
assert (sar + pow2 n = pow2 (n-1) + (au / 2));
pow2_double_sum (n-1);
assert (sar + pow2 (n-1) = (a + pow2 n) / 2);
pow2_double_mult (n-1);
lemma_div_plus a (pow2 (n-1)) 2;
assert (sar = a / 2)
end
val shift_right_value_aux_3: #n:pos -> a:Int.int_t n -> s:pos{s < n} ->
Lemma (ensures Int.shift_arithmetic_right #n a s = a / pow2 s)
(decreases s)
let rec shift_right_value_aux_3 #n a s =
if s = 1 then
shift_right_value_aux_2 #n a
else
begin
let a1 = Int.to_vec a in
assert (Seq.equal (BitVector.shift_arithmetic_right_vec #n a1 s)
(BitVector.shift_arithmetic_right_vec #n
(BitVector.shift_arithmetic_right_vec #n a1 (s-1)) 1));
assert (Int.shift_arithmetic_right #n a s =
Int.shift_arithmetic_right #n (Int.shift_arithmetic_right #n a (s-1)) 1);
shift_right_value_aux_3 #n a (s-1);
shift_right_value_aux_2 #n (Int.shift_arithmetic_right #n a (s-1));
assert (Int.shift_arithmetic_right #n a s = (a / pow2 (s-1)) / 2);
pow2_double_mult (s-1);
division_multiplication_lemma a (pow2 (s-1)) 2
end
let shift_right_lemma #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> ()
| S8 | S16 | S32 | S64 | S128 ->
if v b = 0 then ()
else if v b >= bits t then
shift_right_value_aux_1 #(bits t) (v a) (v b)
else
shift_right_value_aux_3 #(bits t) (v a) (v b)
[@(strict_on_arguments [0])]
let shift_left #t #l a b =
match t with
| U1 -> UInt8.shift_left a b
| U8 -> UInt8.shift_left a b
| U16 -> UInt16.shift_left a b
| U32 -> UInt32.shift_left a b
| U64 -> UInt64.shift_left a b
| U128 -> UInt128.shift_left a b
| S8 -> Int8.shift_left a b
| S16 -> Int16.shift_left a b
| S32 -> Int32.shift_left a b
| S64 -> Int64.shift_left a b
| S128 -> Int128.shift_left a b
#push-options "--max_fuel 1"
let shift_left_lemma #t #l a b = ()
let rotate_right #t #l a b =
logor (shift_right a b) (shift_left a (sub #U32 (size (bits t)) b))
let rotate_left #t #l a b =
logor (shift_left a b) (shift_right a (sub #U32 (size (bits t)) b))
[@(strict_on_arguments [0])]
let ct_abs #t #l a =
match t with
| S8 -> Int8.ct_abs a
| S16 -> Int16.ct_abs a
| S32 -> Int32.ct_abs a
| S64 -> Int64.ct_abs a
#pop-options
[@(strict_on_arguments [0])]
let eq_mask #t a b =
match t with
| U1 -> lognot (logxor a b)
| U8 -> UInt8.eq_mask a b
| U16 -> UInt16.eq_mask a b
| U32 -> UInt32.eq_mask a b
| U64 -> UInt64.eq_mask a b
| U128 -> UInt128.eq_mask a b
| S8 -> Int.Cast.uint8_to_int8 (UInt8.eq_mask (to_u8 a) (to_u8 b))
| S16 -> Int.Cast.uint16_to_int16 (UInt16.eq_mask (to_u16 a) (to_u16 b))
| S32 -> Int.Cast.uint32_to_int32 (UInt32.eq_mask (to_u32 a) (to_u32 b))
| S64 -> Int.Cast.uint64_to_int64 (UInt64.eq_mask (to_u64 a) (to_u64 b))
val eq_mask_lemma_unsigned: #t:inttype{unsigned t} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
let eq_mask_lemma_unsigned #t a b =
match t with
| U1 ->
assert_norm (
logxor (u1 0) (u1 0) == u1 0 /\ logxor (u1 0) (u1 1) == u1 1 /\
logxor (u1 1) (u1 0) == u1 1 /\ logxor (u1 1) (u1 1) == u1 0 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| U8 | U16 | U32 | U64 | U128 -> ()
#push-options "--z3rlimit 200"
val eq_mask_lemma_signed: #t:inttype{signed t /\ ~(S128? t)} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
let eq_mask_lemma_signed #t a b =
match t with
| S8 ->
begin
assert_norm (pow2 8 = 2 * pow2 7);
if 0 <= v a then modulo_lemma (v a) (pow2 8)
else
begin
modulo_addition_lemma (v a) 1 (pow2 8);
modulo_lemma (v a + pow2 8) (pow2 8)
end
end
| S16 ->
begin
assert_norm (pow2 16 = 2 * pow2 15);
if 0 <= v a then modulo_lemma (v a) (pow2 16)
else
begin
modulo_addition_lemma (v a) 1 (pow2 16);
modulo_lemma (v a + pow2 16) (pow2 16)
end
end
| S32 ->
begin
if 0 <= v a then modulo_lemma (v a) (pow2 32)
else
begin
modulo_addition_lemma (v a) 1 (pow2 32);
modulo_lemma (v a + pow2 32) (pow2 32)
end
end
| S64 ->
begin
if 0 <= v a then modulo_lemma (v a) (pow2 64)
else
begin
modulo_addition_lemma (v a) 1 (pow2 64);
modulo_lemma (v a + pow2 64) (pow2 64)
end
end
#pop-options
let eq_mask_lemma #t a b =
if signed t then eq_mask_lemma_signed a b
else eq_mask_lemma_unsigned a b
let eq_mask_logand_lemma #t a b c =
eq_mask_lemma a b;
logand_zeros c;
logand_ones c;
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v (eq_mask a b)) (v c)
| S8 | S16 | S32 | S64 -> Int.logand_commutative #(bits t) (v (eq_mask a b)) (v c) | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val neq_mask: #t:inttype{~(S128? t)} -> a:int_t t SEC -> b:int_t t SEC -> int_t t SEC | [] | Lib.IntTypes.neq_mask | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t Lib.IntTypes.SEC -> b: Lib.IntTypes.int_t t Lib.IntTypes.SEC
-> Lib.IntTypes.int_t t Lib.IntTypes.SEC | {
"end_col": 45,
"end_line": 877,
"start_col": 22,
"start_line": 877
} |
FStar.Pervasives.Lemma | val shift_right_value_aux_1: #n:pos{1 < n} -> a:Int.int_t n -> s:nat{n <= s} ->
Lemma (Int.shift_arithmetic_right #n a s = a / pow2 s) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let shift_right_value_aux_1 #n a s =
pow2_le_compat s n;
if a >= 0 then Int.sign_bit_positive a else Int.sign_bit_negative a | val shift_right_value_aux_1: #n:pos{1 < n} -> a:Int.int_t n -> s:nat{n <= s} ->
Lemma (Int.shift_arithmetic_right #n a s = a / pow2 s)
let shift_right_value_aux_1 #n a s = | false | null | true | pow2_le_compat s n;
if a >= 0 then Int.sign_bit_positive a else Int.sign_bit_negative a | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Prims.pos",
"Prims.b2t",
"Prims.op_LessThan",
"FStar.Int.int_t",
"Prims.nat",
"Prims.op_LessThanOrEqual",
"Prims.op_GreaterThanOrEqual",
"FStar.Int.sign_bit_positive",
"Prims.bool",
"FStar.Int.sign_bit_negative",
"Prims.unit",
"FStar.Math.Lemmas.pow2_le_compat"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a)
let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t))
let logor_lemma #t #l a b =
logor_zeros #t #l b;
logor_ones #t #l b;
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (v b)) (Int.logor #(bits t) (v b) (v a))
let logor_spec #t #l a b =
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1);
assert_norm (0 `logor_v #U1` 0 == 0 /\ 0 `logor_v #U1` 1 == 1);
assert_norm (1 `logor_v #U1` 0 == 1 /\ 1 `logor_v #U1` 1 == 1)
| _ -> ()
[@(strict_on_arguments [0])]
let lognot #t #l a =
match t with
| U1 -> UInt8.rem (UInt8.lognot a) 2uy
| U8 -> UInt8.lognot a
| U16 -> UInt16.lognot a
| U32 -> UInt32.lognot a
| U64 -> UInt64.lognot a
| U128 -> UInt128.lognot a
| S8 -> Int8.lognot a
| S16 -> Int16.lognot a
| S32 -> Int32.lognot a
| S64 -> Int64.lognot a
| S128 -> Int128.lognot a
let lognot_lemma #t #l a =
match t with
|U1 -> assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0)
| U8 | U16 | U32 | U64 | U128 ->
FStar.UInt.lognot_lemma_1 #(bits t);
UInt.nth_lemma (FStar.UInt.lognot #(bits t) (UInt.ones (bits t))) (UInt.zero (bits t))
| S8 | S16 | S32 | S64 | S128 ->
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.zero (bits t))) (Int.ones (bits t));
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.ones (bits t))) (Int.zero (bits t))
let lognot_spec #t #l a =
match t with
| U1 ->
assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0);
assert_norm(lognot_v #U1 0 == 1 /\ lognot_v #U1 1 == 0)
| _ -> ()
[@(strict_on_arguments [0])]
let shift_right #t #l a b =
match t with
| U1 -> UInt8.shift_right a b
| U8 -> UInt8.shift_right a b
| U16 -> UInt16.shift_right a b
| U32 -> UInt32.shift_right a b
| U64 -> UInt64.shift_right a b
| U128 -> UInt128.shift_right a b
| S8 -> Int8.shift_arithmetic_right a b
| S16 -> Int16.shift_arithmetic_right a b
| S32 -> Int32.shift_arithmetic_right a b
| S64 -> Int64.shift_arithmetic_right a b
| S128 -> Int128.shift_arithmetic_right a b
val shift_right_value_aux_1: #n:pos{1 < n} -> a:Int.int_t n -> s:nat{n <= s} ->
Lemma (Int.shift_arithmetic_right #n a s = a / pow2 s) | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val shift_right_value_aux_1: #n:pos{1 < n} -> a:Int.int_t n -> s:nat{n <= s} ->
Lemma (Int.shift_arithmetic_right #n a s = a / pow2 s) | [] | Lib.IntTypes.shift_right_value_aux_1 | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: FStar.Int.int_t n -> s: Prims.nat{n <= s}
-> FStar.Pervasives.Lemma (ensures FStar.Int.shift_arithmetic_right a s = a / Prims.pow2 s) | {
"end_col": 69,
"end_line": 690,
"start_col": 2,
"start_line": 689
} |
Prims.Tot | val lt_mask: #t:inttype{unsigned t} -> int_t t SEC -> int_t t SEC -> int_t t SEC | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let lt_mask #t a b = lognot (gte_mask a b) | val lt_mask: #t:inttype{unsigned t} -> int_t t SEC -> int_t t SEC -> int_t t SEC
let lt_mask #t a b = | false | null | false | lognot (gte_mask a b) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Prims.b2t",
"Lib.IntTypes.unsigned",
"Lib.IntTypes.int_t",
"Lib.IntTypes.SEC",
"Lib.IntTypes.lognot",
"Lib.IntTypes.gte_mask"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a)
let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t))
let logor_lemma #t #l a b =
logor_zeros #t #l b;
logor_ones #t #l b;
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (v b)) (Int.logor #(bits t) (v b) (v a))
let logor_spec #t #l a b =
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1);
assert_norm (0 `logor_v #U1` 0 == 0 /\ 0 `logor_v #U1` 1 == 1);
assert_norm (1 `logor_v #U1` 0 == 1 /\ 1 `logor_v #U1` 1 == 1)
| _ -> ()
[@(strict_on_arguments [0])]
let lognot #t #l a =
match t with
| U1 -> UInt8.rem (UInt8.lognot a) 2uy
| U8 -> UInt8.lognot a
| U16 -> UInt16.lognot a
| U32 -> UInt32.lognot a
| U64 -> UInt64.lognot a
| U128 -> UInt128.lognot a
| S8 -> Int8.lognot a
| S16 -> Int16.lognot a
| S32 -> Int32.lognot a
| S64 -> Int64.lognot a
| S128 -> Int128.lognot a
let lognot_lemma #t #l a =
match t with
|U1 -> assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0)
| U8 | U16 | U32 | U64 | U128 ->
FStar.UInt.lognot_lemma_1 #(bits t);
UInt.nth_lemma (FStar.UInt.lognot #(bits t) (UInt.ones (bits t))) (UInt.zero (bits t))
| S8 | S16 | S32 | S64 | S128 ->
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.zero (bits t))) (Int.ones (bits t));
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.ones (bits t))) (Int.zero (bits t))
let lognot_spec #t #l a =
match t with
| U1 ->
assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0);
assert_norm(lognot_v #U1 0 == 1 /\ lognot_v #U1 1 == 0)
| _ -> ()
[@(strict_on_arguments [0])]
let shift_right #t #l a b =
match t with
| U1 -> UInt8.shift_right a b
| U8 -> UInt8.shift_right a b
| U16 -> UInt16.shift_right a b
| U32 -> UInt32.shift_right a b
| U64 -> UInt64.shift_right a b
| U128 -> UInt128.shift_right a b
| S8 -> Int8.shift_arithmetic_right a b
| S16 -> Int16.shift_arithmetic_right a b
| S32 -> Int32.shift_arithmetic_right a b
| S64 -> Int64.shift_arithmetic_right a b
| S128 -> Int128.shift_arithmetic_right a b
val shift_right_value_aux_1: #n:pos{1 < n} -> a:Int.int_t n -> s:nat{n <= s} ->
Lemma (Int.shift_arithmetic_right #n a s = a / pow2 s)
let shift_right_value_aux_1 #n a s =
pow2_le_compat s n;
if a >= 0 then Int.sign_bit_positive a else Int.sign_bit_negative a
#push-options "--z3rlimit 200"
val shift_right_value_aux_2: #n:pos{1 < n} -> a:Int.int_t n ->
Lemma (Int.shift_arithmetic_right #n a 1 = a / 2)
let shift_right_value_aux_2 #n a =
if a >= 0 then
begin
Int.sign_bit_positive a;
UInt.shift_right_value_aux_3 #n a 1
end
else
begin
Int.sign_bit_negative a;
let a1 = Int.to_vec a in
let au = Int.to_uint a in
let sar = Int.shift_arithmetic_right #n a 1 in
let sar1 = Int.to_vec sar in
let sr = UInt.shift_right #n au 1 in
let sr1 = UInt.to_vec sr in
assert (Seq.equal (Seq.slice sar1 1 n) (Seq.slice sr1 1 n));
assert (Seq.equal sar1 (Seq.append (BitVector.ones_vec #1) (Seq.slice sr1 1 n)));
UInt.append_lemma #1 #(n-1) (BitVector.ones_vec #1) (Seq.slice sr1 1 n);
assert (Seq.equal (Seq.slice a1 0 (n-1)) (Seq.slice sar1 1 n));
UInt.slice_left_lemma a1 (n-1);
assert (sar + pow2 n = pow2 (n-1) + (au / 2));
pow2_double_sum (n-1);
assert (sar + pow2 (n-1) = (a + pow2 n) / 2);
pow2_double_mult (n-1);
lemma_div_plus a (pow2 (n-1)) 2;
assert (sar = a / 2)
end
val shift_right_value_aux_3: #n:pos -> a:Int.int_t n -> s:pos{s < n} ->
Lemma (ensures Int.shift_arithmetic_right #n a s = a / pow2 s)
(decreases s)
let rec shift_right_value_aux_3 #n a s =
if s = 1 then
shift_right_value_aux_2 #n a
else
begin
let a1 = Int.to_vec a in
assert (Seq.equal (BitVector.shift_arithmetic_right_vec #n a1 s)
(BitVector.shift_arithmetic_right_vec #n
(BitVector.shift_arithmetic_right_vec #n a1 (s-1)) 1));
assert (Int.shift_arithmetic_right #n a s =
Int.shift_arithmetic_right #n (Int.shift_arithmetic_right #n a (s-1)) 1);
shift_right_value_aux_3 #n a (s-1);
shift_right_value_aux_2 #n (Int.shift_arithmetic_right #n a (s-1));
assert (Int.shift_arithmetic_right #n a s = (a / pow2 (s-1)) / 2);
pow2_double_mult (s-1);
division_multiplication_lemma a (pow2 (s-1)) 2
end
let shift_right_lemma #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> ()
| S8 | S16 | S32 | S64 | S128 ->
if v b = 0 then ()
else if v b >= bits t then
shift_right_value_aux_1 #(bits t) (v a) (v b)
else
shift_right_value_aux_3 #(bits t) (v a) (v b)
[@(strict_on_arguments [0])]
let shift_left #t #l a b =
match t with
| U1 -> UInt8.shift_left a b
| U8 -> UInt8.shift_left a b
| U16 -> UInt16.shift_left a b
| U32 -> UInt32.shift_left a b
| U64 -> UInt64.shift_left a b
| U128 -> UInt128.shift_left a b
| S8 -> Int8.shift_left a b
| S16 -> Int16.shift_left a b
| S32 -> Int32.shift_left a b
| S64 -> Int64.shift_left a b
| S128 -> Int128.shift_left a b
#push-options "--max_fuel 1"
let shift_left_lemma #t #l a b = ()
let rotate_right #t #l a b =
logor (shift_right a b) (shift_left a (sub #U32 (size (bits t)) b))
let rotate_left #t #l a b =
logor (shift_left a b) (shift_right a (sub #U32 (size (bits t)) b))
[@(strict_on_arguments [0])]
let ct_abs #t #l a =
match t with
| S8 -> Int8.ct_abs a
| S16 -> Int16.ct_abs a
| S32 -> Int32.ct_abs a
| S64 -> Int64.ct_abs a
#pop-options
[@(strict_on_arguments [0])]
let eq_mask #t a b =
match t with
| U1 -> lognot (logxor a b)
| U8 -> UInt8.eq_mask a b
| U16 -> UInt16.eq_mask a b
| U32 -> UInt32.eq_mask a b
| U64 -> UInt64.eq_mask a b
| U128 -> UInt128.eq_mask a b
| S8 -> Int.Cast.uint8_to_int8 (UInt8.eq_mask (to_u8 a) (to_u8 b))
| S16 -> Int.Cast.uint16_to_int16 (UInt16.eq_mask (to_u16 a) (to_u16 b))
| S32 -> Int.Cast.uint32_to_int32 (UInt32.eq_mask (to_u32 a) (to_u32 b))
| S64 -> Int.Cast.uint64_to_int64 (UInt64.eq_mask (to_u64 a) (to_u64 b))
val eq_mask_lemma_unsigned: #t:inttype{unsigned t} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
let eq_mask_lemma_unsigned #t a b =
match t with
| U1 ->
assert_norm (
logxor (u1 0) (u1 0) == u1 0 /\ logxor (u1 0) (u1 1) == u1 1 /\
logxor (u1 1) (u1 0) == u1 1 /\ logxor (u1 1) (u1 1) == u1 0 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| U8 | U16 | U32 | U64 | U128 -> ()
#push-options "--z3rlimit 200"
val eq_mask_lemma_signed: #t:inttype{signed t /\ ~(S128? t)} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
let eq_mask_lemma_signed #t a b =
match t with
| S8 ->
begin
assert_norm (pow2 8 = 2 * pow2 7);
if 0 <= v a then modulo_lemma (v a) (pow2 8)
else
begin
modulo_addition_lemma (v a) 1 (pow2 8);
modulo_lemma (v a + pow2 8) (pow2 8)
end
end
| S16 ->
begin
assert_norm (pow2 16 = 2 * pow2 15);
if 0 <= v a then modulo_lemma (v a) (pow2 16)
else
begin
modulo_addition_lemma (v a) 1 (pow2 16);
modulo_lemma (v a + pow2 16) (pow2 16)
end
end
| S32 ->
begin
if 0 <= v a then modulo_lemma (v a) (pow2 32)
else
begin
modulo_addition_lemma (v a) 1 (pow2 32);
modulo_lemma (v a + pow2 32) (pow2 32)
end
end
| S64 ->
begin
if 0 <= v a then modulo_lemma (v a) (pow2 64)
else
begin
modulo_addition_lemma (v a) 1 (pow2 64);
modulo_lemma (v a + pow2 64) (pow2 64)
end
end
#pop-options
let eq_mask_lemma #t a b =
if signed t then eq_mask_lemma_signed a b
else eq_mask_lemma_unsigned a b
let eq_mask_logand_lemma #t a b c =
eq_mask_lemma a b;
logand_zeros c;
logand_ones c;
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v (eq_mask a b)) (v c)
| S8 | S16 | S32 | S64 -> Int.logand_commutative #(bits t) (v (eq_mask a b)) (v c)
[@(strict_on_arguments [0])]
let neq_mask #t a b = lognot (eq_mask #t a b)
let neq_mask_lemma #t a b =
match t with
| U1 -> assert_norm (lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| _ ->
UInt.lognot_lemma_1 #(bits t);
UInt.lognot_self #(bits t) 0
[@(strict_on_arguments [0])]
let gte_mask #t a b =
match t with
| U1 -> logor a (lognot b)
| U8 -> UInt8.gte_mask a b
| U16 -> UInt16.gte_mask a b
| U32 -> UInt32.gte_mask a b
| U64 -> UInt64.gte_mask a b
| U128 -> UInt128.gte_mask a b
let gte_mask_lemma #t a b =
match t with
| U1 ->
begin
assert_norm (
logor (u1 0) (u1 0) == u1 0 /\ logor (u1 1) (u1 1) == u1 1 /\
logor (u1 0) (u1 1) == u1 1 /\ logor (u1 1) (u1 0) == u1 1 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
end
| _ -> ()
let gte_mask_logand_lemma #t a b c =
logand_zeros c;
logand_ones c;
match t with
| U1 ->
assert_norm (
logor (u1 0) (u1 0) == u1 0 /\ logor (u1 1) (u1 1) == u1 1 /\
logor (u1 0) (u1 1) == u1 1 /\ logor (u1 1) (u1 0) == u1 1 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| _ -> UInt.logand_commutative #(bits t) (v (gte_mask a b)) (v c) | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val lt_mask: #t:inttype{unsigned t} -> int_t t SEC -> int_t t SEC -> int_t t SEC | [] | Lib.IntTypes.lt_mask | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t Lib.IntTypes.SEC -> b: Lib.IntTypes.int_t t Lib.IntTypes.SEC
-> Lib.IntTypes.int_t t Lib.IntTypes.SEC | {
"end_col": 42,
"end_line": 918,
"start_col": 21,
"start_line": 918
} |
Prims.Tot | val size_to_uint32: s:size_t -> u:uint32{u == u32 (v s)} | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let size_to_uint32 x = x | val size_to_uint32: s:size_t -> u:uint32{u == u32 (v s)}
let size_to_uint32 x = | false | null | false | x | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.size_t",
"Lib.IntTypes.uint32",
"Prims.eq2",
"Lib.IntTypes.u32",
"Lib.IntTypes.v",
"Lib.IntTypes.U32",
"Lib.IntTypes.PUB"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val size_to_uint32: s:size_t -> u:uint32{u == u32 (v s)} | [] | Lib.IntTypes.size_to_uint32 | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | s: Lib.IntTypes.size_t -> u92: Lib.IntTypes.uint32{u92 == Lib.IntTypes.u32 (Lib.IntTypes.v s)} | {
"end_col": 24,
"end_line": 70,
"start_col": 23,
"start_line": 70
} |
Prims.Tot | val i128 (n:range_t S64) : u:int128{v #S128 u == n} | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n | val i128 (n:range_t S64) : u:int128{v #S128 u == n}
let i128 n = | false | null | false | assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.range_t",
"Lib.IntTypes.S64",
"Lib.IntTypes.sint",
"Lib.IntTypes.S128",
"Lib.IntTypes.SEC",
"Prims.unit",
"FStar.Pervasives.assert_norm",
"Prims.b2t",
"Prims.op_LessThanOrEqual",
"Prims.pow2",
"Prims.op_Subtraction",
"Lib.IntTypes.bits",
"Lib.IntTypes.int128",
"Prims.eq2",
"Prims.int",
"Prims.l_or",
"Lib.IntTypes.range",
"Lib.IntTypes.v"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val i128 (n:range_t S64) : u:int128{v #S128 u == n} | [] | Lib.IntTypes.i128 | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | n: Lib.IntTypes.range_t Lib.IntTypes.S64 -> u87: Lib.IntTypes.int128{Lib.IntTypes.v u87 == n} | {
"end_col": 19,
"end_line": 68,
"start_col": 2,
"start_line": 67
} |
Prims.Tot | val byte_to_uint8: s:byte_t -> u:uint8{u == u8 (v s)} | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let byte_to_uint8 x = x | val byte_to_uint8: s:byte_t -> u:uint8{u == u8 (v s)}
let byte_to_uint8 x = | false | null | false | x | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.byte_t",
"Lib.IntTypes.uint8",
"Prims.eq2",
"Lib.IntTypes.u8",
"Lib.IntTypes.v",
"Lib.IntTypes.U8",
"Lib.IntTypes.PUB"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val byte_to_uint8: s:byte_t -> u:uint8{u == u8 (v s)} | [] | Lib.IntTypes.byte_to_uint8 | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | s: Lib.IntTypes.byte_t -> u94: Lib.IntTypes.uint8{u94 == Lib.IntTypes.u8 (Lib.IntTypes.v s)} | {
"end_col": 23,
"end_line": 74,
"start_col": 22,
"start_line": 74
} |
Prims.Tot | val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128} | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a) | val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = | false | null | false | int128_to_uint128 (int64_to_int128 a) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"FStar.Int64.t",
"Lib.IntTypes.int128_to_uint128",
"Lib.IntTypes.int64_to_int128",
"FStar.UInt128.t",
"Prims.eq2",
"Prims.int",
"FStar.UInt128.v",
"Prims.op_Modulus",
"FStar.Int64.v",
"Prims.pow2"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a) | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128} | [] | Lib.IntTypes.int64_to_uint128 | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: FStar.Int64.t -> b: FStar.UInt128.t{FStar.UInt128.v b == FStar.Int64.v a % Prims.pow2 128} | {
"end_col": 62,
"end_line": 108,
"start_col": 25,
"start_line": 108
} |
Prims.Tot | val add_mod: #t:inttype{unsigned t} -> #l:secrecy_level
-> int_t t l
-> int_t t l
-> int_t t l | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b | val add_mod: #t:inttype{unsigned t} -> #l:secrecy_level
-> int_t t l
-> int_t t l
-> int_t t l
let add_mod #t #l a b = | false | null | false | match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Prims.b2t",
"Lib.IntTypes.unsigned",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"FStar.UInt8.rem",
"FStar.UInt8.add_mod",
"FStar.UInt8.__uint_to_t",
"FStar.UInt16.add_mod",
"FStar.UInt32.add_mod",
"FStar.UInt64.add_mod",
"FStar.UInt128.add_mod"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])] | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val add_mod: #t:inttype{unsigned t} -> #l:secrecy_level
-> int_t t l
-> int_t t l
-> int_t t l | [] | Lib.IntTypes.add_mod | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t l -> b: Lib.IntTypes.int_t t l -> Lib.IntTypes.int_t t l | {
"end_col": 31,
"end_line": 290,
"start_col": 2,
"start_line": 284
} |
Prims.Tot | val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a} | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a) | val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = | false | null | false | uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"FStar.UInt64.t",
"Lib.IntTypes.uint128_to_int128",
"FStar.Int.Cast.Full.uint64_to_uint128",
"FStar.Int128.t",
"Prims.eq2",
"Prims.int",
"Prims.l_or",
"FStar.Int.size",
"FStar.Int128.n",
"FStar.UInt.size",
"FStar.UInt64.n",
"FStar.Int128.v",
"FStar.UInt64.v"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a) | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a} | [] | Lib.IntTypes.uint64_to_int128 | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: FStar.UInt64.t -> b: FStar.Int128.t{FStar.Int128.v b == FStar.UInt64.v a} | {
"end_col": 78,
"end_line": 105,
"start_col": 25,
"start_line": 105
} |
Prims.Tot | val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64} | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a) | val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = | false | null | false | Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"FStar.Int128.t",
"FStar.Int.Cast.Full.uint128_to_uint64",
"Lib.IntTypes.int128_to_uint128",
"FStar.UInt64.t",
"Prims.eq2",
"Prims.int",
"FStar.UInt64.v",
"Prims.op_Modulus",
"FStar.Int128.v",
"Prims.pow2"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a) | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64} | [] | Lib.IntTypes.int128_to_uint64 | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: FStar.Int128.t -> b: FStar.UInt64.t{FStar.UInt64.v b == FStar.Int128.v a % Prims.pow2 64} | {
"end_col": 78,
"end_line": 111,
"start_col": 25,
"start_line": 111
} |
Prims.Tot | val decr: #t:inttype -> #l:secrecy_level
-> a:int_t t l{minint t < v a}
-> int_t t l | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1) | val decr: #t:inttype -> #l:secrecy_level
-> a:int_t t l{minint t < v a}
-> int_t t l
let decr #t #l a = | false | null | false | match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"Prims.b2t",
"Prims.op_LessThan",
"Lib.IntTypes.minint",
"Lib.IntTypes.v",
"FStar.UInt8.sub",
"FStar.UInt8.__uint_to_t",
"FStar.UInt16.sub",
"FStar.UInt16.__uint_to_t",
"FStar.UInt32.sub",
"FStar.UInt32.__uint_to_t",
"FStar.UInt64.sub",
"FStar.UInt64.__uint_to_t",
"FStar.UInt128.sub",
"FStar.UInt128.uint_to_t",
"FStar.Int8.sub",
"FStar.Int8.__int_to_t",
"FStar.Int16.sub",
"FStar.Int16.__int_to_t",
"FStar.Int32.sub",
"FStar.Int32.__int_to_t",
"FStar.Int64.sub",
"FStar.Int64.__int_to_t",
"FStar.Int128.sub",
"FStar.Int128.int_to_t"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])] | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val decr: #t:inttype -> #l:secrecy_level
-> a:int_t t l{minint t < v a}
-> int_t t l | [] | Lib.IntTypes.decr | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t l {Lib.IntTypes.minint t < Lib.IntTypes.v a} -> Lib.IntTypes.int_t t l | {
"end_col": 44,
"end_line": 410,
"start_col": 2,
"start_line": 399
} |
Prims.Tot | val add: #t:inttype -> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l{range (v a + v b) t}
-> int_t t l | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b | val add: #t:inttype -> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l{range (v a + v b) t}
-> int_t t l
let add #t #l a b = | false | null | false | match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"Lib.IntTypes.range",
"Prims.op_Addition",
"Lib.IntTypes.v",
"FStar.UInt8.add",
"FStar.UInt16.add",
"FStar.UInt32.add",
"FStar.UInt64.add",
"FStar.UInt128.add",
"FStar.Int8.add",
"FStar.Int16.add",
"FStar.Int32.add",
"FStar.Int64.add",
"FStar.Int128.add"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])] | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val add: #t:inttype -> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l{range (v a + v b) t}
-> int_t t l | [] | Lib.IntTypes.add | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
a: Lib.IntTypes.int_t t l ->
b: Lib.IntTypes.int_t t l {Lib.IntTypes.range (Lib.IntTypes.v a + Lib.IntTypes.v b) t}
-> Lib.IntTypes.int_t t l | {
"end_col": 26,
"end_line": 307,
"start_col": 2,
"start_line": 296
} |
Prims.Tot | val mul_mod: #t:inttype{unsigned t /\ ~(U128? t)} -> #l:secrecy_level
-> int_t t l
-> int_t t l
-> int_t t l | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b | val mul_mod: #t:inttype{unsigned t /\ ~(U128? t)} -> #l:secrecy_level
-> int_t t l
-> int_t t l
-> int_t t l
let mul_mod #t #l a b = | false | null | false | match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Prims.l_and",
"Prims.b2t",
"Lib.IntTypes.unsigned",
"Prims.l_not",
"Lib.IntTypes.uu___is_U128",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"FStar.UInt8.mul_mod",
"FStar.UInt16.mul_mod",
"FStar.UInt32.mul_mod",
"FStar.UInt64.mul_mod"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])] | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val mul_mod: #t:inttype{unsigned t /\ ~(U128? t)} -> #l:secrecy_level
-> int_t t l
-> int_t t l
-> int_t t l | [] | Lib.IntTypes.mul_mod | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t l -> b: Lib.IntTypes.int_t t l -> Lib.IntTypes.int_t t l | {
"end_col": 29,
"end_line": 339,
"start_col": 2,
"start_line": 334
} |
Prims.Tot | val ones: t:inttype -> l:secrecy_level -> n:int_t t l{v n = ones_v t} | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1) | val ones: t:inttype -> l:secrecy_level -> n:int_t t l{v n = ones_v t}
let ones t l = | false | null | false | match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (- 1) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"FStar.UInt8.__uint_to_t",
"FStar.UInt16.__uint_to_t",
"FStar.UInt32.__uint_to_t",
"FStar.UInt64.__uint_to_t",
"Prims.unit",
"FStar.Pervasives.assert_norm",
"Prims.eq2",
"Prims.int",
"FStar.UInt128.v",
"Prims.op_Subtraction",
"Prims.pow2",
"FStar.UInt128.t",
"FStar.UInt128.add",
"FStar.UInt128.shift_left",
"Prims.l_or",
"FStar.UInt.size",
"FStar.UInt128.n",
"FStar.UInt64.v",
"FStar.UInt64.uint_to_t",
"FStar.UInt64.t",
"FStar.UInt128.uint64_to_uint128",
"Lib.IntTypes.mk_int",
"Prims.op_Minus",
"Lib.IntTypes.int_t",
"Prims.b2t",
"Prims.op_Equality",
"Lib.IntTypes.v",
"Lib.IntTypes.ones_v"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])] | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val ones: t:inttype -> l:secrecy_level -> n:int_t t l{v n = ones_v t} | [] | Lib.IntTypes.ones | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | t: Lib.IntTypes.inttype -> l: Lib.IntTypes.secrecy_level
-> n: Lib.IntTypes.int_t t l {Lib.IntTypes.v n = Lib.IntTypes.ones_v t} | {
"end_col": 20,
"end_line": 278,
"start_col": 2,
"start_line": 267
} |
Prims.Tot | val sub: #t:inttype -> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l{range (v a - v b) t}
-> int_t t l | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b | val sub: #t:inttype -> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l{range (v a - v b) t}
-> int_t t l
let sub #t #l a b = | false | null | false | match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"Lib.IntTypes.range",
"Prims.op_Subtraction",
"Lib.IntTypes.v",
"FStar.UInt8.sub",
"FStar.UInt16.sub",
"FStar.UInt32.sub",
"FStar.UInt64.sub",
"FStar.UInt128.sub",
"FStar.Int8.sub",
"FStar.Int16.sub",
"FStar.Int32.sub",
"FStar.Int64.sub",
"FStar.Int128.sub"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])] | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val sub: #t:inttype -> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l{range (v a - v b) t}
-> int_t t l | [] | Lib.IntTypes.sub | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} |
a: Lib.IntTypes.int_t t l ->
b: Lib.IntTypes.int_t t l {Lib.IntTypes.range (Lib.IntTypes.v a - Lib.IntTypes.v b) t}
-> Lib.IntTypes.int_t t l | {
"end_col": 26,
"end_line": 391,
"start_col": 2,
"start_line": 380
} |
FStar.Pervasives.Lemma | val logand_zeros: #t:inttype -> #l:secrecy_level -> a:int_t t l ->
Lemma (v (a `logand` zeros t l) == 0) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a) | val logand_zeros: #t:inttype -> #l:secrecy_level -> a:int_t t l ->
Lemma (v (a `logand` zeros t l) == 0)
let logand_zeros #t #l a = | false | null | true | match t with
| U1 -> assert_norm ((u1 0) `logand` (zeros U1 l) == u1 0 /\ (u1 1) `logand` (zeros U1 l) == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"FStar.Pervasives.assert_norm",
"Prims.l_and",
"Prims.eq2",
"Lib.IntTypes.U1",
"Lib.IntTypes.SEC",
"Lib.IntTypes.logand",
"Lib.IntTypes.u1",
"Lib.IntTypes.zeros",
"FStar.UInt.logand_lemma_1",
"Lib.IntTypes.bits",
"Lib.IntTypes.v",
"FStar.Int.logand_lemma_1",
"Prims.unit"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val logand_zeros: #t:inttype -> #l:secrecy_level -> a:int_t t l ->
Lemma (v (a `logand` zeros t l) == 0) | [] | Lib.IntTypes.logand_zeros | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t l
-> FStar.Pervasives.Lemma
(ensures Lib.IntTypes.v (Lib.IntTypes.logand a (Lib.IntTypes.zeros t l)) == 0) | {
"end_col": 69,
"end_line": 515,
"start_col": 2,
"start_line": 512
} |
Prims.Tot | val logor: #t:inttype -> #l:secrecy_level
-> int_t t l
-> int_t t l
-> int_t t l | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b | val logor: #t:inttype -> #l:secrecy_level
-> int_t t l
-> int_t t l
-> int_t t l
let logor #t #l a b = | false | null | false | match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"FStar.UInt8.logor",
"Prims.unit",
"FStar.Pervasives.assert_norm",
"Prims.eq2",
"FStar.UInt8.t",
"FStar.UInt8.__uint_to_t",
"FStar.UInt16.logor",
"FStar.UInt32.logor",
"FStar.UInt64.logor",
"FStar.UInt128.logor",
"FStar.Int8.logor",
"FStar.Int16.logor",
"FStar.Int32.logor",
"FStar.Int64.logor",
"FStar.Int128.logor"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])] | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val logor: #t:inttype -> #l:secrecy_level
-> int_t t l
-> int_t t l
-> int_t t l | [] | Lib.IntTypes.logor | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t l -> b: Lib.IntTypes.int_t t l -> Lib.IntTypes.int_t t l | {
"end_col": 28,
"end_line": 587,
"start_col": 2,
"start_line": 571
} |
FStar.Pervasives.Lemma | val logxor_lemma1: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(requires range (v a) U1 /\ range (v b) U1)
(ensures range (v (a `logxor` b)) U1) | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a) | val logxor_lemma1: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(requires range (v a) U1 /\ range (v b) U1)
(ensures range (v (a `logxor` b)) U1)
let logxor_lemma1 #t #l a b = | false | null | true | match v a, v b with
| _, 0 -> UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"lemma"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"FStar.Pervasives.Native.Mktuple2",
"Prims.int",
"Lib.IntTypes.v",
"FStar.UInt.logxor_lemma_1",
"Lib.IntTypes.bits",
"Prims.unit",
"FStar.UInt.logxor_commutative",
"FStar.UInt.logxor_self",
"Lib.IntTypes.v_extensionality"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 1,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val logxor_lemma1: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(requires range (v a) U1 /\ range (v b) U1)
(ensures range (v (a `logxor` b)) U1) | [] | Lib.IntTypes.logxor_lemma1 | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t l -> b: Lib.IntTypes.int_t t l
-> FStar.Pervasives.Lemma
(requires
Lib.IntTypes.range (Lib.IntTypes.v a) Lib.IntTypes.U1 /\
Lib.IntTypes.range (Lib.IntTypes.v b) Lib.IntTypes.U1)
(ensures Lib.IntTypes.range (Lib.IntTypes.v (Lib.IntTypes.logxor a b)) Lib.IntTypes.U1) | {
"end_col": 36,
"end_line": 478,
"start_col": 2,
"start_line": 470
} |
Prims.Tot | val shift_right: #t:inttype -> #l:secrecy_level
-> int_t t l
-> shiftval t
-> int_t t l | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let shift_right #t #l a b =
match t with
| U1 -> UInt8.shift_right a b
| U8 -> UInt8.shift_right a b
| U16 -> UInt16.shift_right a b
| U32 -> UInt32.shift_right a b
| U64 -> UInt64.shift_right a b
| U128 -> UInt128.shift_right a b
| S8 -> Int8.shift_arithmetic_right a b
| S16 -> Int16.shift_arithmetic_right a b
| S32 -> Int32.shift_arithmetic_right a b
| S64 -> Int64.shift_arithmetic_right a b
| S128 -> Int128.shift_arithmetic_right a b | val shift_right: #t:inttype -> #l:secrecy_level
-> int_t t l
-> shiftval t
-> int_t t l
let shift_right #t #l a b = | false | null | false | match t with
| U1 -> UInt8.shift_right a b
| U8 -> UInt8.shift_right a b
| U16 -> UInt16.shift_right a b
| U32 -> UInt32.shift_right a b
| U64 -> UInt64.shift_right a b
| U128 -> UInt128.shift_right a b
| S8 -> Int8.shift_arithmetic_right a b
| S16 -> Int16.shift_arithmetic_right a b
| S32 -> Int32.shift_arithmetic_right a b
| S64 -> Int64.shift_arithmetic_right a b
| S128 -> Int128.shift_arithmetic_right a b | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.secrecy_level",
"Lib.IntTypes.int_t",
"Lib.IntTypes.shiftval",
"FStar.UInt8.shift_right",
"FStar.UInt16.shift_right",
"FStar.UInt32.shift_right",
"FStar.UInt64.shift_right",
"FStar.UInt128.shift_right",
"FStar.Int8.shift_arithmetic_right",
"FStar.Int16.shift_arithmetic_right",
"FStar.Int32.shift_arithmetic_right",
"FStar.Int64.shift_arithmetic_right",
"FStar.Int128.shift_arithmetic_right"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a)
let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t))
let logor_lemma #t #l a b =
logor_zeros #t #l b;
logor_ones #t #l b;
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (v b)) (Int.logor #(bits t) (v b) (v a))
let logor_spec #t #l a b =
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1);
assert_norm (0 `logor_v #U1` 0 == 0 /\ 0 `logor_v #U1` 1 == 1);
assert_norm (1 `logor_v #U1` 0 == 1 /\ 1 `logor_v #U1` 1 == 1)
| _ -> ()
[@(strict_on_arguments [0])]
let lognot #t #l a =
match t with
| U1 -> UInt8.rem (UInt8.lognot a) 2uy
| U8 -> UInt8.lognot a
| U16 -> UInt16.lognot a
| U32 -> UInt32.lognot a
| U64 -> UInt64.lognot a
| U128 -> UInt128.lognot a
| S8 -> Int8.lognot a
| S16 -> Int16.lognot a
| S32 -> Int32.lognot a
| S64 -> Int64.lognot a
| S128 -> Int128.lognot a
let lognot_lemma #t #l a =
match t with
|U1 -> assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0)
| U8 | U16 | U32 | U64 | U128 ->
FStar.UInt.lognot_lemma_1 #(bits t);
UInt.nth_lemma (FStar.UInt.lognot #(bits t) (UInt.ones (bits t))) (UInt.zero (bits t))
| S8 | S16 | S32 | S64 | S128 ->
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.zero (bits t))) (Int.ones (bits t));
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.ones (bits t))) (Int.zero (bits t))
let lognot_spec #t #l a =
match t with
| U1 ->
assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0);
assert_norm(lognot_v #U1 0 == 1 /\ lognot_v #U1 1 == 0)
| _ -> ()
[@(strict_on_arguments [0])] | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val shift_right: #t:inttype -> #l:secrecy_level
-> int_t t l
-> shiftval t
-> int_t t l | [] | Lib.IntTypes.shift_right | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t l -> b: Lib.IntTypes.shiftval t -> Lib.IntTypes.int_t t l | {
"end_col": 45,
"end_line": 684,
"start_col": 2,
"start_line": 673
} |
Prims.Tot | val ne: #t:inttype -> int_t t PUB -> int_t t PUB -> bool | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let ne #t x y = not (eq x y) | val ne: #t:inttype -> int_t t PUB -> int_t t PUB -> bool
let ne #t x y = | false | null | false | not (eq x y) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Lib.IntTypes.int_t",
"Lib.IntTypes.PUB",
"Prims.op_Negation",
"Lib.IntTypes.eq",
"Prims.bool"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a)
let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t))
let logor_lemma #t #l a b =
logor_zeros #t #l b;
logor_ones #t #l b;
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (v b)) (Int.logor #(bits t) (v b) (v a))
let logor_spec #t #l a b =
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1);
assert_norm (0 `logor_v #U1` 0 == 0 /\ 0 `logor_v #U1` 1 == 1);
assert_norm (1 `logor_v #U1` 0 == 1 /\ 1 `logor_v #U1` 1 == 1)
| _ -> ()
[@(strict_on_arguments [0])]
let lognot #t #l a =
match t with
| U1 -> UInt8.rem (UInt8.lognot a) 2uy
| U8 -> UInt8.lognot a
| U16 -> UInt16.lognot a
| U32 -> UInt32.lognot a
| U64 -> UInt64.lognot a
| U128 -> UInt128.lognot a
| S8 -> Int8.lognot a
| S16 -> Int16.lognot a
| S32 -> Int32.lognot a
| S64 -> Int64.lognot a
| S128 -> Int128.lognot a
let lognot_lemma #t #l a =
match t with
|U1 -> assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0)
| U8 | U16 | U32 | U64 | U128 ->
FStar.UInt.lognot_lemma_1 #(bits t);
UInt.nth_lemma (FStar.UInt.lognot #(bits t) (UInt.ones (bits t))) (UInt.zero (bits t))
| S8 | S16 | S32 | S64 | S128 ->
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.zero (bits t))) (Int.ones (bits t));
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.ones (bits t))) (Int.zero (bits t))
let lognot_spec #t #l a =
match t with
| U1 ->
assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0);
assert_norm(lognot_v #U1 0 == 1 /\ lognot_v #U1 1 == 0)
| _ -> ()
[@(strict_on_arguments [0])]
let shift_right #t #l a b =
match t with
| U1 -> UInt8.shift_right a b
| U8 -> UInt8.shift_right a b
| U16 -> UInt16.shift_right a b
| U32 -> UInt32.shift_right a b
| U64 -> UInt64.shift_right a b
| U128 -> UInt128.shift_right a b
| S8 -> Int8.shift_arithmetic_right a b
| S16 -> Int16.shift_arithmetic_right a b
| S32 -> Int32.shift_arithmetic_right a b
| S64 -> Int64.shift_arithmetic_right a b
| S128 -> Int128.shift_arithmetic_right a b
val shift_right_value_aux_1: #n:pos{1 < n} -> a:Int.int_t n -> s:nat{n <= s} ->
Lemma (Int.shift_arithmetic_right #n a s = a / pow2 s)
let shift_right_value_aux_1 #n a s =
pow2_le_compat s n;
if a >= 0 then Int.sign_bit_positive a else Int.sign_bit_negative a
#push-options "--z3rlimit 200"
val shift_right_value_aux_2: #n:pos{1 < n} -> a:Int.int_t n ->
Lemma (Int.shift_arithmetic_right #n a 1 = a / 2)
let shift_right_value_aux_2 #n a =
if a >= 0 then
begin
Int.sign_bit_positive a;
UInt.shift_right_value_aux_3 #n a 1
end
else
begin
Int.sign_bit_negative a;
let a1 = Int.to_vec a in
let au = Int.to_uint a in
let sar = Int.shift_arithmetic_right #n a 1 in
let sar1 = Int.to_vec sar in
let sr = UInt.shift_right #n au 1 in
let sr1 = UInt.to_vec sr in
assert (Seq.equal (Seq.slice sar1 1 n) (Seq.slice sr1 1 n));
assert (Seq.equal sar1 (Seq.append (BitVector.ones_vec #1) (Seq.slice sr1 1 n)));
UInt.append_lemma #1 #(n-1) (BitVector.ones_vec #1) (Seq.slice sr1 1 n);
assert (Seq.equal (Seq.slice a1 0 (n-1)) (Seq.slice sar1 1 n));
UInt.slice_left_lemma a1 (n-1);
assert (sar + pow2 n = pow2 (n-1) + (au / 2));
pow2_double_sum (n-1);
assert (sar + pow2 (n-1) = (a + pow2 n) / 2);
pow2_double_mult (n-1);
lemma_div_plus a (pow2 (n-1)) 2;
assert (sar = a / 2)
end
val shift_right_value_aux_3: #n:pos -> a:Int.int_t n -> s:pos{s < n} ->
Lemma (ensures Int.shift_arithmetic_right #n a s = a / pow2 s)
(decreases s)
let rec shift_right_value_aux_3 #n a s =
if s = 1 then
shift_right_value_aux_2 #n a
else
begin
let a1 = Int.to_vec a in
assert (Seq.equal (BitVector.shift_arithmetic_right_vec #n a1 s)
(BitVector.shift_arithmetic_right_vec #n
(BitVector.shift_arithmetic_right_vec #n a1 (s-1)) 1));
assert (Int.shift_arithmetic_right #n a s =
Int.shift_arithmetic_right #n (Int.shift_arithmetic_right #n a (s-1)) 1);
shift_right_value_aux_3 #n a (s-1);
shift_right_value_aux_2 #n (Int.shift_arithmetic_right #n a (s-1));
assert (Int.shift_arithmetic_right #n a s = (a / pow2 (s-1)) / 2);
pow2_double_mult (s-1);
division_multiplication_lemma a (pow2 (s-1)) 2
end
let shift_right_lemma #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> ()
| S8 | S16 | S32 | S64 | S128 ->
if v b = 0 then ()
else if v b >= bits t then
shift_right_value_aux_1 #(bits t) (v a) (v b)
else
shift_right_value_aux_3 #(bits t) (v a) (v b)
[@(strict_on_arguments [0])]
let shift_left #t #l a b =
match t with
| U1 -> UInt8.shift_left a b
| U8 -> UInt8.shift_left a b
| U16 -> UInt16.shift_left a b
| U32 -> UInt32.shift_left a b
| U64 -> UInt64.shift_left a b
| U128 -> UInt128.shift_left a b
| S8 -> Int8.shift_left a b
| S16 -> Int16.shift_left a b
| S32 -> Int32.shift_left a b
| S64 -> Int64.shift_left a b
| S128 -> Int128.shift_left a b
#push-options "--max_fuel 1"
let shift_left_lemma #t #l a b = ()
let rotate_right #t #l a b =
logor (shift_right a b) (shift_left a (sub #U32 (size (bits t)) b))
let rotate_left #t #l a b =
logor (shift_left a b) (shift_right a (sub #U32 (size (bits t)) b))
[@(strict_on_arguments [0])]
let ct_abs #t #l a =
match t with
| S8 -> Int8.ct_abs a
| S16 -> Int16.ct_abs a
| S32 -> Int32.ct_abs a
| S64 -> Int64.ct_abs a
#pop-options
[@(strict_on_arguments [0])]
let eq_mask #t a b =
match t with
| U1 -> lognot (logxor a b)
| U8 -> UInt8.eq_mask a b
| U16 -> UInt16.eq_mask a b
| U32 -> UInt32.eq_mask a b
| U64 -> UInt64.eq_mask a b
| U128 -> UInt128.eq_mask a b
| S8 -> Int.Cast.uint8_to_int8 (UInt8.eq_mask (to_u8 a) (to_u8 b))
| S16 -> Int.Cast.uint16_to_int16 (UInt16.eq_mask (to_u16 a) (to_u16 b))
| S32 -> Int.Cast.uint32_to_int32 (UInt32.eq_mask (to_u32 a) (to_u32 b))
| S64 -> Int.Cast.uint64_to_int64 (UInt64.eq_mask (to_u64 a) (to_u64 b))
val eq_mask_lemma_unsigned: #t:inttype{unsigned t} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
let eq_mask_lemma_unsigned #t a b =
match t with
| U1 ->
assert_norm (
logxor (u1 0) (u1 0) == u1 0 /\ logxor (u1 0) (u1 1) == u1 1 /\
logxor (u1 1) (u1 0) == u1 1 /\ logxor (u1 1) (u1 1) == u1 0 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| U8 | U16 | U32 | U64 | U128 -> ()
#push-options "--z3rlimit 200"
val eq_mask_lemma_signed: #t:inttype{signed t /\ ~(S128? t)} -> a:int_t t SEC -> b:int_t t SEC -> Lemma
(if v a = v b then v (eq_mask a b) == ones_v t
else v (eq_mask a b) == 0)
let eq_mask_lemma_signed #t a b =
match t with
| S8 ->
begin
assert_norm (pow2 8 = 2 * pow2 7);
if 0 <= v a then modulo_lemma (v a) (pow2 8)
else
begin
modulo_addition_lemma (v a) 1 (pow2 8);
modulo_lemma (v a + pow2 8) (pow2 8)
end
end
| S16 ->
begin
assert_norm (pow2 16 = 2 * pow2 15);
if 0 <= v a then modulo_lemma (v a) (pow2 16)
else
begin
modulo_addition_lemma (v a) 1 (pow2 16);
modulo_lemma (v a + pow2 16) (pow2 16)
end
end
| S32 ->
begin
if 0 <= v a then modulo_lemma (v a) (pow2 32)
else
begin
modulo_addition_lemma (v a) 1 (pow2 32);
modulo_lemma (v a + pow2 32) (pow2 32)
end
end
| S64 ->
begin
if 0 <= v a then modulo_lemma (v a) (pow2 64)
else
begin
modulo_addition_lemma (v a) 1 (pow2 64);
modulo_lemma (v a + pow2 64) (pow2 64)
end
end
#pop-options
let eq_mask_lemma #t a b =
if signed t then eq_mask_lemma_signed a b
else eq_mask_lemma_unsigned a b
let eq_mask_logand_lemma #t a b c =
eq_mask_lemma a b;
logand_zeros c;
logand_ones c;
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v (eq_mask a b)) (v c)
| S8 | S16 | S32 | S64 -> Int.logand_commutative #(bits t) (v (eq_mask a b)) (v c)
[@(strict_on_arguments [0])]
let neq_mask #t a b = lognot (eq_mask #t a b)
let neq_mask_lemma #t a b =
match t with
| U1 -> assert_norm (lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| _ ->
UInt.lognot_lemma_1 #(bits t);
UInt.lognot_self #(bits t) 0
[@(strict_on_arguments [0])]
let gte_mask #t a b =
match t with
| U1 -> logor a (lognot b)
| U8 -> UInt8.gte_mask a b
| U16 -> UInt16.gte_mask a b
| U32 -> UInt32.gte_mask a b
| U64 -> UInt64.gte_mask a b
| U128 -> UInt128.gte_mask a b
let gte_mask_lemma #t a b =
match t with
| U1 ->
begin
assert_norm (
logor (u1 0) (u1 0) == u1 0 /\ logor (u1 1) (u1 1) == u1 1 /\
logor (u1 0) (u1 1) == u1 1 /\ logor (u1 1) (u1 0) == u1 1 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
end
| _ -> ()
let gte_mask_logand_lemma #t a b c =
logand_zeros c;
logand_ones c;
match t with
| U1 ->
assert_norm (
logor (u1 0) (u1 0) == u1 0 /\ logor (u1 1) (u1 1) == u1 1 /\
logor (u1 0) (u1 1) == u1 1 /\ logor (u1 1) (u1 0) == u1 1 /\
lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1)
| _ -> UInt.logand_commutative #(bits t) (v (gte_mask a b)) (v c)
let lt_mask #t a b = lognot (gte_mask a b)
let lt_mask_lemma #t a b =
assert_norm (lognot (u1 1) == u1 0 /\ lognot (u1 0) == u1 1);
UInt.lognot_lemma_1 #(bits t);
UInt.lognot_self #(bits t) 0
let gt_mask #t a b = logand (gte_mask a b) (neq_mask a b)
let gt_mask_lemma #t a b =
logand_zeros (gte_mask a b);
logand_ones (gte_mask a b)
let lte_mask #t a b = logor (lt_mask a b) (eq_mask a b)
let lte_mask_lemma #t a b =
match t with
| U1 ->
assert_norm (
logor (u1 0) (u1 0) == u1 0 /\ logor (u1 1) (u1 1) == u1 1 /\
logor (u1 0) (u1 1) == u1 1 /\ logor (u1 1) (u1 0) == u1 1)
| U8 | U16 | U32 | U64 | U128 ->
if v a > v b then
UInt.logor_lemma_1 #(bits t) (v (lt_mask a b))
else if v a = v b then
UInt.logor_lemma_2 #(bits t) (v (lt_mask a b))
else
UInt.logor_lemma_1 #(bits t) (v (lt_mask a b))
#push-options "--max_fuel 1"
val mod_mask_value: #t:inttype -> #l:secrecy_level -> m:shiftval t{pow2 (uint_v m) <= maxint t} ->
Lemma (v (mod_mask #t #l m) == pow2 (v m) - 1)
let mod_mask_value #t #l m =
shift_left_lemma (mk_int #t #l 1) m;
pow2_double_mult (bits t - 1);
pow2_lt_compat (bits t) (v m);
small_modulo_lemma_1 (pow2 (v m)) (pow2 (bits t));
small_modulo_lemma_1 (pow2 (v m) - 1) (pow2 (bits t))
let mod_mask_lemma #t #l a m =
mod_mask_value #t #l m;
if unsigned t || 0 <= v a then
if v m = 0 then
UInt.logand_lemma_1 #(bits t) (v a)
else
UInt.logand_mask #(bits t) (v a) (v m)
else
begin
let a1 = v a in
let a2 = v a + pow2 (bits t) in
pow2_plus (bits t - v m) (v m);
pow2_le_compat (bits t - 1) (v m);
lemma_mod_plus a1 (pow2 (bits t - v m)) (pow2 (v m));
if v m = 0 then
UInt.logand_lemma_1 #(bits t) a2
else
UInt.logand_mask #(bits t) a2 (v m)
end
#pop-options
#push-options "--max_fuel 0 --max_ifuel 0 --z3rlimit 1000"
(**
Conditionally subtracts 2^(bits t') from a in constant-time,
so that the result fits in t'; i.e.
b = if a >= 2^(bits t' - 1) then a - 2^(bits t') else a
*)
inline_for_extraction
val conditional_subtract:
#t:inttype{signed t}
-> #l:secrecy_level
-> t':inttype{signed t' /\ bits t' < bits t}
-> a:int_t t l{0 <= v a /\ v a <= pow2 (bits t') - 1}
-> b:int_t t l{v b = v a @%. t'}
let conditional_subtract #t #l t' a =
assert_norm (pow2 7 = 128);
assert_norm (pow2 15 = 32768);
let pow2_bits = shift_left #t #l (mk_int 1) (size (bits t')) in
shift_left_lemma #t #l (mk_int 1) (size (bits t'));
let pow2_bits_minus_one = shift_left #t #l (mk_int 1) (size (bits t' - 1)) in
shift_left_lemma #t #l (mk_int 1) (size (bits t' - 1));
// assert (v pow2_bits == pow2 (bits t'));
// assert (v pow2_bits_minus_one == pow2 (bits t' - 1));
let a2 = a `sub` pow2_bits_minus_one in
let mask = shift_right a2 (size (bits t - 1)) in
shift_right_lemma a2 (size (bits t - 1));
// assert (if v a2 < 0 then v mask = -1 else v mask = 0);
let a3 = a `sub` pow2_bits in
logand_lemma mask pow2_bits;
a3 `add` (mask `logand` pow2_bits)
let cast_mod #t #l t' l' a =
assert_norm (pow2 7 = 128);
assert_norm (pow2 15 = 32768);
if bits t' >= bits t then
cast t' l' a
else
begin
let m = size (bits t') in
mod_mask_lemma a m;
let b = conditional_subtract t' (a `logand` mod_mask m) in
cast t' l' b
end
#pop-options
[@(strict_on_arguments [0])]
let div #t x y =
match t with
| U1 -> UInt8.div x y
| U8 -> UInt8.div x y
| U16 -> UInt16.div x y
| U32 -> UInt32.div x y
| U64 -> UInt64.div x y
| S8 -> Int.pow2_values 8; Int8.div x y
| S16 -> Int.pow2_values 16; Int16.div x y
| S32 -> Int.pow2_values 32; Int32.div x y
| S64 -> Int.pow2_values 64; Int64.div x y
let div_lemma #t a b =
match t with
| U1 | U8 | U16 | U32 | U64 -> ()
| S8 -> Int.pow2_values 8
| S16 -> Int.pow2_values 16
| S32 -> Int.pow2_values 32
| S64 -> Int.pow2_values 64
let mod #t x y =
match t with
| U1 -> UInt8.rem x y
| U8 -> UInt8.rem x y
| U16 -> UInt16.rem x y
| U32 -> UInt32.rem x y
| U64 -> UInt64.rem x y
| S8 -> Int.pow2_values 8; Int8.rem x y
| S16 -> Int.pow2_values 16; Int16.rem x y
| S32 -> Int.pow2_values 32; Int32.rem x y
| S64 -> Int.pow2_values 64; Int64.rem x y
let mod_lemma #t a b =
match t with
| U1 | U8 | U16 | U32 | U64 -> ()
| S8 -> Int.pow2_values 8
| S16 -> Int.pow2_values 16
| S32 -> Int.pow2_values 32
| S64 -> Int.pow2_values 64
let eq #t x y =
x = y
let eq_lemma #t x y = () | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val ne: #t:inttype -> int_t t PUB -> int_t t PUB -> bool | [] | Lib.IntTypes.ne | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | x: Lib.IntTypes.int_t t Lib.IntTypes.PUB -> y: Lib.IntTypes.int_t t Lib.IntTypes.PUB -> Prims.bool | {
"end_col": 28,
"end_line": 1072,
"start_col": 16,
"start_line": 1072
} |
Prims.Tot | val eq_mask: #t:inttype{~(S128? t)} -> int_t t SEC -> int_t t SEC -> int_t t SEC | [
{
"abbrev": false,
"full_module": "FStar.Math.Lemmas",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Mul",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "Lib",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar.Pervasives",
"short_module": null
},
{
"abbrev": false,
"full_module": "Prims",
"short_module": null
},
{
"abbrev": false,
"full_module": "FStar",
"short_module": null
}
] | false | let eq_mask #t a b =
match t with
| U1 -> lognot (logxor a b)
| U8 -> UInt8.eq_mask a b
| U16 -> UInt16.eq_mask a b
| U32 -> UInt32.eq_mask a b
| U64 -> UInt64.eq_mask a b
| U128 -> UInt128.eq_mask a b
| S8 -> Int.Cast.uint8_to_int8 (UInt8.eq_mask (to_u8 a) (to_u8 b))
| S16 -> Int.Cast.uint16_to_int16 (UInt16.eq_mask (to_u16 a) (to_u16 b))
| S32 -> Int.Cast.uint32_to_int32 (UInt32.eq_mask (to_u32 a) (to_u32 b))
| S64 -> Int.Cast.uint64_to_int64 (UInt64.eq_mask (to_u64 a) (to_u64 b)) | val eq_mask: #t:inttype{~(S128? t)} -> int_t t SEC -> int_t t SEC -> int_t t SEC
let eq_mask #t a b = | false | null | false | match t with
| U1 -> lognot (logxor a b)
| U8 -> UInt8.eq_mask a b
| U16 -> UInt16.eq_mask a b
| U32 -> UInt32.eq_mask a b
| U64 -> UInt64.eq_mask a b
| U128 -> UInt128.eq_mask a b
| S8 -> Int.Cast.uint8_to_int8 (UInt8.eq_mask (to_u8 a) (to_u8 b))
| S16 -> Int.Cast.uint16_to_int16 (UInt16.eq_mask (to_u16 a) (to_u16 b))
| S32 -> Int.Cast.uint32_to_int32 (UInt32.eq_mask (to_u32 a) (to_u32 b))
| S64 -> Int.Cast.uint64_to_int64 (UInt64.eq_mask (to_u64 a) (to_u64 b)) | {
"checked_file": "Lib.IntTypes.fst.checked",
"dependencies": [
"prims.fst.checked",
"FStar.UInt8.fsti.checked",
"FStar.UInt64.fsti.checked",
"FStar.UInt32.fsti.checked",
"FStar.UInt16.fsti.checked",
"FStar.UInt128.fsti.checked",
"FStar.UInt.fsti.checked",
"FStar.Seq.fst.checked",
"FStar.Pervasives.Native.fst.checked",
"FStar.Pervasives.fsti.checked",
"FStar.Math.Lemmas.fst.checked",
"FStar.Int8.fsti.checked",
"FStar.Int64.fsti.checked",
"FStar.Int32.fsti.checked",
"FStar.Int16.fsti.checked",
"FStar.Int128.fsti.checked",
"FStar.Int.Cast.Full.fst.checked",
"FStar.Int.Cast.fst.checked",
"FStar.Int.fsti.checked",
"FStar.BitVector.fst.checked"
],
"interface_file": true,
"source_file": "Lib.IntTypes.fst"
} | [
"total"
] | [
"Lib.IntTypes.inttype",
"Prims.l_not",
"Prims.b2t",
"Lib.IntTypes.uu___is_S128",
"Lib.IntTypes.int_t",
"Lib.IntTypes.SEC",
"Lib.IntTypes.lognot",
"Lib.IntTypes.logxor",
"FStar.UInt8.eq_mask",
"FStar.UInt16.eq_mask",
"FStar.UInt32.eq_mask",
"FStar.UInt64.eq_mask",
"FStar.UInt128.eq_mask",
"FStar.Int.Cast.uint8_to_int8",
"Lib.IntTypes.to_u8",
"FStar.Int.Cast.uint16_to_int16",
"Lib.IntTypes.to_u16",
"FStar.Int.Cast.uint32_to_int32",
"Lib.IntTypes.to_u32",
"FStar.Int.Cast.uint64_to_int64",
"Lib.IntTypes.to_u64"
] | [] | module Lib.IntTypes
open FStar.Math.Lemmas
#push-options "--max_fuel 0 --max_ifuel 1 --z3rlimit 200"
let pow2_2 _ = assert_norm (pow2 2 = 4)
let pow2_3 _ = assert_norm (pow2 3 = 8)
let pow2_4 _ = assert_norm (pow2 4 = 16)
let pow2_127 _ = assert_norm (pow2 127 = 0x80000000000000000000000000000000)
let bits_numbytes t = ()
let sec_int_t t = pub_int_t t
let sec_int_v #t u = pub_int_v u
let secret #t x = x
[@(strict_on_arguments [0])]
let mk_int #t #l x =
match t with
| U1 -> UInt8.uint_to_t x
| U8 -> UInt8.uint_to_t x
| U16 -> UInt16.uint_to_t x
| U32 -> UInt32.uint_to_t x
| U64 -> UInt64.uint_to_t x
| U128 -> UInt128.uint_to_t x
| S8 -> Int8.int_to_t x
| S16 -> Int16.int_to_t x
| S32 -> Int32.int_to_t x
| S64 -> Int64.int_to_t x
| S128 -> Int128.int_to_t x
val v_extensionality:
#t:inttype
-> #l:secrecy_level
-> a:int_t t l
-> b:int_t t l
-> Lemma
(requires v a == v b)
(ensures a == b)
let v_extensionality #t #l a b =
match t with
| U1 -> ()
| U8 -> UInt8.v_inj a b
| U16 -> UInt16.v_inj a b
| U32 -> UInt32.v_inj a b
| U64 -> UInt64.v_inj a b
| U128 -> UInt128.v_inj a b
| S8 -> Int8.v_inj a b
| S16 -> Int16.v_inj a b
| S32 -> Int32.v_inj a b
| S64 -> Int64.v_inj a b
| S128 -> Int128.v_inj a b
let v_injective #t #l a =
v_extensionality a (mk_int (v a))
let v_mk_int #t #l n = ()
let u128 n = FStar.UInt128.uint64_to_uint128 (u64 n)
// KaRaMeL will extract this to FStar_Int128_int_to_t, which isn't provided
// We'll need to have FStar.Int128.int64_to_int128 to support int128_t literals
let i128 n =
assert_norm (pow2 (bits S64 - 1) <= pow2 (bits S128 - 1));
sint #S128 #SEC n
let size_to_uint32 x = x
let size_to_uint64 x = Int.Cast.uint32_to_uint64 x
let byte_to_uint8 x = x
let byte_to_int8 x = Int.Cast.uint8_to_int8 x
let op_At_Percent = Int.op_At_Percent
// FStar.UInt128 gets special treatment in KaRaMeL. There is no
// equivalent for FStar.Int128 at the moment, so we use the three
// assumed cast operators below.
//
// Using them will fail at runtime with an informative message.
// The commented-out implementations show that they are realizable.
//
// When support for `FStar.Int128` is added KaRaMeL, these casts must
// be added as special cases. When using builtin compiler support for
// `int128_t`, they can be implemented directly as C casts without
// undefined or implementation-defined behaviour.
assume
val uint128_to_int128: a:UInt128.t{v a <= maxint S128} -> b:Int128.t{Int128.v b == UInt128.v a}
//let uint128_to_int128 a = Int128.int_to_t (v a)
assume
val int128_to_uint128: a:Int128.t -> b:UInt128.t{UInt128.v b == Int128.v a % pow2 128}
//let int128_to_uint128 a = mk_int (v a % pow2 128)
assume
val int64_to_int128: a:Int64.t -> b:Int128.t{Int128.v b == Int64.v a}
//let int64_to_int128 a = Int128.int_to_t (v a)
val uint64_to_int128: a:UInt64.t -> b:Int128.t{Int128.v b == UInt64.v a}
let uint64_to_int128 a = uint128_to_int128 (Int.Cast.Full.uint64_to_uint128 a)
val int64_to_uint128: a:Int64.t -> b:UInt128.t{UInt128.v b == Int64.v a % pow2 128}
let int64_to_uint128 a = int128_to_uint128 (int64_to_int128 a)
val int128_to_uint64: a:Int128.t -> b:UInt64.t{UInt64.v b == Int128.v a % pow2 64}
let int128_to_uint64 a = Int.Cast.Full.uint128_to_uint64 (int128_to_uint128 a)
#push-options "--z3rlimit 1000"
[@(strict_on_arguments [0;2])]
let cast #t #l t' l' u =
assert_norm (pow2 8 = 2 * pow2 7);
assert_norm (pow2 16 = 2 * pow2 15);
assert_norm (pow2 64 * pow2 64 = pow2 128);
assert_norm (pow2 16 * pow2 48 = pow2 64);
assert_norm (pow2 8 * pow2 56 = pow2 64);
assert_norm (pow2 32 * pow2 32 = pow2 64);
modulo_modulo_lemma (v u) (pow2 32) (pow2 32);
modulo_modulo_lemma (v u) (pow2 64) (pow2 64);
modulo_modulo_lemma (v u) (pow2 128) (pow2 64);
modulo_modulo_lemma (v u) (pow2 16) (pow2 48);
modulo_modulo_lemma (v u) (pow2 8) (pow2 56);
let open FStar.Int.Cast in
let open FStar.Int.Cast.Full in
match t, t' with
| U1, U1 -> u
| U1, U8 -> u
| U1, U16 -> uint8_to_uint16 u
| U1, U32 -> uint8_to_uint32 u
| U1, U64 -> uint8_to_uint64 u
| U1, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U1, S8 -> uint8_to_int8 u
| U1, S16 -> uint8_to_int16 u
| U1, S32 -> uint8_to_int32 u
| U1, S64 -> uint8_to_int64 u
| U1, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U8, U1 -> UInt8.rem u 2uy
| U8, U8 -> u
| U8, U16 -> uint8_to_uint16 u
| U8, U32 -> uint8_to_uint32 u
| U8, U64 -> uint8_to_uint64 u
| U8, U128 -> UInt128.uint64_to_uint128 (uint8_to_uint64 u)
| U8, S8 -> uint8_to_int8 u
| U8, S16 -> uint8_to_int16 u
| U8, S32 -> uint8_to_int32 u
| U8, S64 -> uint8_to_int64 u
| U8, S128 -> uint64_to_int128 (uint8_to_uint64 u)
| U16, U1 -> UInt8.rem (uint16_to_uint8 u) 2uy
| U16, U8 -> uint16_to_uint8 u
| U16, U16 -> u
| U16, U32 -> uint16_to_uint32 u
| U16, U64 -> uint16_to_uint64 u
| U16, U128 -> UInt128.uint64_to_uint128 (uint16_to_uint64 u)
| U16, S8 -> uint16_to_int8 u
| U16, S16 -> uint16_to_int16 u
| U16, S32 -> uint16_to_int32 u
| U16, S64 -> uint16_to_int64 u
| U16, S128 -> uint64_to_int128 (uint16_to_uint64 u)
| U32, U1 -> UInt8.rem (uint32_to_uint8 u) 2uy
| U32, U8 -> uint32_to_uint8 u
| U32, U16 -> uint32_to_uint16 u
| U32, U32 -> u
| U32, U64 -> uint32_to_uint64 u
| U32, U128 -> UInt128.uint64_to_uint128 (uint32_to_uint64 u)
| U32, S8 -> uint32_to_int8 u
| U32, S16 -> uint32_to_int16 u
| U32, S32 -> uint32_to_int32 u
| U32, S64 -> uint32_to_int64 u
| U32, S128 -> uint64_to_int128 (uint32_to_uint64 u)
| U64, U1 -> UInt8.rem (uint64_to_uint8 u) 2uy
| U64, U8 -> uint64_to_uint8 u
| U64, U16 -> uint64_to_uint16 u
| U64, U32 -> uint64_to_uint32 u
| U64, U64 -> u
| U64, U128 -> UInt128.uint64_to_uint128 u
| U64, S8 -> uint64_to_int8 u
| U64, S16 -> uint64_to_int16 u
| U64, S32 -> uint64_to_int32 u
| U64, S64 -> uint64_to_int64 u
| U64, S128 -> uint64_to_int128 u
| U128, U1 -> UInt8.rem (uint64_to_uint8 (uint128_to_uint64 u)) 2uy
| U128, U8 -> uint64_to_uint8 (UInt128.uint128_to_uint64 u)
| U128, U16 -> uint64_to_uint16 (UInt128.uint128_to_uint64 u)
| U128, U32 -> uint64_to_uint32 (UInt128.uint128_to_uint64 u)
| U128, U64 -> UInt128.uint128_to_uint64 u
| U128, U128 -> u
| U128, S8 -> uint64_to_int8 (UInt128.uint128_to_uint64 u)
| U128, S16 -> uint64_to_int16 (UInt128.uint128_to_uint64 u)
| U128, S32 -> uint64_to_int32 (UInt128.uint128_to_uint64 u)
| U128, S64 -> uint64_to_int64 (UInt128.uint128_to_uint64 u)
| U128, S128 -> uint128_to_int128 u
| S8, U1 -> UInt8.rem (int8_to_uint8 u) 2uy
| S8, U8 -> int8_to_uint8 u
| S8, U16 -> int8_to_uint16 u
| S8, U32 -> int8_to_uint32 u
| S8, U64 -> int8_to_uint64 u
| S8, U128 -> int64_to_uint128 (int8_to_int64 u)
| S8, S8 -> u
| S8, S16 -> int8_to_int16 u
| S8, S32 -> int8_to_int32 u
| S8, S64 -> int8_to_int64 u
| S8, S128 -> int64_to_int128 (int8_to_int64 u)
| S16, U1 -> UInt8.rem (int16_to_uint8 u) 2uy
| S16, U8 -> int16_to_uint8 u
| S16, U16 -> int16_to_uint16 u
| S16, U32 -> int16_to_uint32 u
| S16, U64 -> int16_to_uint64 u
| S16, U128 -> int64_to_uint128 (int16_to_int64 u)
| S16, S8 -> int16_to_int8 u
| S16, S16 -> u
| S16, S32 -> int16_to_int32 u
| S16, S64 -> int16_to_int64 u
| S16, S128 -> int64_to_int128 (int16_to_int64 u)
| S32, U1 -> UInt8.rem (int32_to_uint8 u) 2uy
| S32, U8 -> int32_to_uint8 u
| S32, U16 -> int32_to_uint16 u
| S32, U32 -> int32_to_uint32 u
| S32, U64 -> int32_to_uint64 u
| S32, U128 -> int64_to_uint128 (int32_to_int64 u)
| S32, S8 -> int32_to_int8 u
| S32, S16 -> int32_to_int16 u
| S32, S32 -> u
| S32, S64 -> int32_to_int64 u
| S32, S128 -> int64_to_int128 (int32_to_int64 u)
| S64, U1 -> UInt8.rem (int64_to_uint8 u) 2uy
| S64, U8 -> int64_to_uint8 u
| S64, U16 -> int64_to_uint16 u
| S64, U32 -> int64_to_uint32 u
| S64, U64 -> int64_to_uint64 u
| S64, U128 -> int64_to_uint128 u
| S64, S8 -> int64_to_int8 u
| S64, S16 -> int64_to_int16 u
| S64, S32 -> int64_to_int32 u
| S64, S64 -> u
| S64, S128 -> int64_to_int128 u
| S128, U1 -> UInt8.rem (uint64_to_uint8 (int128_to_uint64 u)) 2uy
| S128, U8 -> uint64_to_uint8 (int128_to_uint64 u)
| S128, U16 -> uint64_to_uint16 (int128_to_uint64 u)
| S128, U32 -> uint64_to_uint32 (int128_to_uint64 u)
| S128, U64 -> int128_to_uint64 u
| S128, U128 -> int128_to_uint128 u
| S128, S8 -> uint64_to_int8 (int128_to_uint64 u)
| S128, S16 -> uint64_to_int16 (int128_to_uint64 u)
| S128, S32 -> uint64_to_int32 (int128_to_uint64 u)
| S128, S64 -> uint64_to_int64 (int128_to_uint64 u)
| S128, S128 -> u
#pop-options
[@(strict_on_arguments [0])]
let ones t l =
match t with
| U1 -> 0x1uy
| U8 -> 0xFFuy
| U16 -> 0xFFFFus
| U32 -> 0xFFFFFFFFul
| U64 -> 0xFFFFFFFFFFFFFFFFuL
| U128 ->
let x = UInt128.uint64_to_uint128 0xFFFFFFFFFFFFFFFFuL in
let y = (UInt128.shift_left x 64ul) `UInt128.add` x in
assert_norm (UInt128.v y == pow2 128 - 1);
y
| _ -> mk_int (-1)
let zeros t l = mk_int 0
[@(strict_on_arguments [0])]
let add_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.add_mod a b) 2uy
| U8 -> UInt8.add_mod a b
| U16 -> UInt16.add_mod a b
| U32 -> UInt32.add_mod a b
| U64 -> UInt64.add_mod a b
| U128 -> UInt128.add_mod a b
let add_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let add #t #l a b =
match t with
| U1 -> UInt8.add a b
| U8 -> UInt8.add a b
| U16 -> UInt16.add a b
| U32 -> UInt32.add a b
| U64 -> UInt64.add a b
| U128 -> UInt128.add a b
| S8 -> Int8.add a b
| S16 -> Int16.add a b
| S32 -> Int32.add a b
| S64 -> Int64.add a b
| S128 -> Int128.add a b
let add_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let incr #t #l a =
match t with
| U1 -> UInt8.add a 1uy
| U8 -> UInt8.add a 1uy
| U16 -> UInt16.add a 1us
| U32 -> UInt32.add a 1ul
| U64 -> UInt64.add a 1uL
| U128 -> UInt128.add a (UInt128.uint_to_t 1)
| S8 -> Int8.add a 1y
| S16 -> Int16.add a 1s
| S32 -> Int32.add a 1l
| S64 -> Int64.add a 1L
| S128 -> Int128.add a (Int128.int_to_t 1)
let incr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let mul_mod #t #l a b =
match t with
| U1 -> UInt8.mul_mod a b
| U8 -> UInt8.mul_mod a b
| U16 -> UInt16.mul_mod a b
| U32 -> UInt32.mul_mod a b
| U64 -> UInt64.mul_mod a b
let mul_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let mul #t #l a b =
match t with
| U1 -> UInt8.mul a b
| U8 -> UInt8.mul a b
| U16 -> UInt16.mul a b
| U32 -> UInt32.mul a b
| U64 -> UInt64.mul a b
| S8 -> Int8.mul a b
| S16 -> Int16.mul a b
| S32 -> Int32.mul a b
| S64 -> Int64.mul a b
let mul_lemma #t #l a b = ()
let mul64_wide a b = UInt128.mul_wide a b
let mul64_wide_lemma a b = ()
let mul_s64_wide a b = Int128.mul_wide a b
let mul_s64_wide_lemma a b = ()
[@(strict_on_arguments [0])]
let sub_mod #t #l a b =
match t with
| U1 -> UInt8.rem (UInt8.sub_mod a b) 2uy
| U8 -> UInt8.sub_mod a b
| U16 -> UInt16.sub_mod a b
| U32 -> UInt32.sub_mod a b
| U64 -> UInt64.sub_mod a b
| U128 -> UInt128.sub_mod a b
let sub_mod_lemma #t #l a b = ()
[@(strict_on_arguments [0])]
let sub #t #l a b =
match t with
| U1 -> UInt8.sub a b
| U8 -> UInt8.sub a b
| U16 -> UInt16.sub a b
| U32 -> UInt32.sub a b
| U64 -> UInt64.sub a b
| U128 -> UInt128.sub a b
| S8 -> Int8.sub a b
| S16 -> Int16.sub a b
| S32 -> Int32.sub a b
| S64 -> Int64.sub a b
| S128 -> Int128.sub a b
let sub_lemma #t #l a b = ()
#push-options "--max_fuel 1"
[@(strict_on_arguments [0])]
let decr #t #l a =
match t with
| U1 -> UInt8.sub a 1uy
| U8 -> UInt8.sub a 1uy
| U16 -> UInt16.sub a 1us
| U32 -> UInt32.sub a 1ul
| U64 -> UInt64.sub a 1uL
| U128 -> UInt128.sub a (UInt128.uint_to_t 1)
| S8 -> Int8.sub a 1y
| S16 -> Int16.sub a 1s
| S32 -> Int32.sub a 1l
| S64 -> Int64.sub a 1L
| S128 -> Int128.sub a (Int128.int_to_t 1)
let decr_lemma #t #l a = ()
#pop-options
[@(strict_on_arguments [0])]
let logxor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logxor 0uy 0uy == 0uy);
assert_norm (UInt8.logxor 0uy 1uy == 1uy);
assert_norm (UInt8.logxor 1uy 0uy == 1uy);
assert_norm (UInt8.logxor 1uy 1uy == 0uy);
UInt8.logxor a b
| U8 -> UInt8.logxor a b
| U16 -> UInt16.logxor a b
| U32 -> UInt32.logxor a b
| U64 -> UInt64.logxor a b
| U128 -> UInt128.logxor a b
| S8 -> Int8.logxor a b
| S16 -> Int16.logxor a b
| S32 -> Int32.logxor a b
| S64 -> Int64.logxor a b
| S128 -> Int128.logxor a b
#push-options "--max_fuel 1"
val logxor_lemma_: #t:inttype -> #l:secrecy_level -> a:int_t t l -> b:int_t t l -> Lemma
(v (a `logxor` (a `logxor` b)) == v b)
let logxor_lemma_ #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 ->
UInt.logxor_associative #(bits t) (v a) (v a) (v b);
UInt.logxor_self #(bits t) (v a);
UInt.logxor_commutative #(bits t) 0 (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| S8 | S16 | S32 | S64 | S128 ->
Int.logxor_associative #(bits t) (v a) (v a) (v b);
Int.logxor_self #(bits t) (v a);
Int.logxor_commutative #(bits t) 0 (v b);
Int.logxor_lemma_1 #(bits t) (v b)
let logxor_lemma #t #l a b =
logxor_lemma_ #t a b;
v_extensionality (logxor a (logxor a b)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_commutative #(bits t) (v a) (v b)
end;
v_extensionality (logxor a (logxor b a)) b;
begin
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> UInt.logxor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logxor_lemma_1 #(bits t) (v a)
end;
v_extensionality (logxor a (mk_int #t #l 0)) a
let logxor_lemma1 #t #l a b =
match v a, v b with
| _, 0 ->
UInt.logxor_lemma_1 #(bits t) (v a)
| 0, _ ->
UInt.logxor_commutative #(bits t) (v a) (v b);
UInt.logxor_lemma_1 #(bits t) (v b)
| 1, 1 ->
v_extensionality a b;
UInt.logxor_self #(bits t) (v a)
let logxor_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logxor` u1 0 == u1 0 /\ u1 0 `logxor` u1 1 == u1 1);
assert_norm (u1 1 `logxor` u1 0 == u1 1 /\ u1 1 `logxor` u1 1 == u1 0);
assert_norm (0 `logxor_v #U1` 0 == 0 /\ 0 `logxor_v #U1` 1 == 1);
assert_norm (1 `logxor_v #U1` 0 == 1 /\ 1 `logxor_v #U1` 1 == 0)
| _ -> ()
#pop-options
[@(strict_on_arguments [0])]
let logand #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy);
UInt8.logand a b
| U8 -> UInt8.logand a b
| U16 -> UInt16.logand a b
| U32 -> UInt32.logand a b
| U64 -> UInt64.logand a b
| U128 -> UInt128.logand a b
| S8 -> Int8.logand a b
| S16 -> Int16.logand a b
| S32 -> Int32.logand a b
| S64 -> Int64.logand a b
| S128 -> Int128.logand a b
let logand_zeros #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_1 #(bits t) (v a)
let logand_ones #t #l a =
match t with
| U1 -> assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_lemma_2 #(bits t) (v a)
let logand_lemma #t #l a b =
logand_zeros #t #l b;
logand_ones #t #l b;
match t with
| U1 ->
assert_norm (u1 0 `logand` zeros U1 l == u1 0 /\ u1 1 `logand` zeros U1 l == u1 0);
assert_norm (u1 0 `logand` ones U1 l == u1 0 /\ u1 1 `logand` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logand_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.logand_commutative #(bits t) (v a) (v b)
let logand_spec #t #l a b =
match t with
| U1 ->
assert_norm (u1 0 `logand` u1 0 == u1 0 /\ u1 0 `logand` u1 1 == u1 0);
assert_norm (u1 1 `logand` u1 0 == u1 0 /\ u1 1 `logand` u1 1 == u1 1);
assert_norm (0 `logand_v #U1` 0 == 0 /\ 0 `logand_v #U1` 1 == 0);
assert_norm (1 `logand_v #U1` 0 == 0 /\ 1 `logand_v #U1` 1 == 1)
| _ -> ()
let logand_le #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_le (UInt.to_uint_t 8 (v a)) (UInt.to_uint_t 8 (v b))
| U16 -> UInt.logand_le (UInt.to_uint_t 16 (v a)) (UInt.to_uint_t 16 (v b))
| U32 -> UInt.logand_le (UInt.to_uint_t 32 (v a)) (UInt.to_uint_t 32 (v b))
| U64 -> UInt.logand_le (UInt.to_uint_t 64 (v a)) (UInt.to_uint_t 64 (v b))
| U128 -> UInt.logand_le (UInt.to_uint_t 128 (v a)) (UInt.to_uint_t 128 (v b))
let logand_mask #t #l a b m =
match t with
| U1 ->
assert_norm (UInt8.logand 0uy 0uy == 0uy);
assert_norm (UInt8.logand 0uy 1uy == 0uy);
assert_norm (UInt8.logand 1uy 0uy == 0uy);
assert_norm (UInt8.logand 1uy 1uy == 1uy)
| U8 -> UInt.logand_mask (UInt.to_uint_t 8 (v a)) m
| U16 -> UInt.logand_mask (UInt.to_uint_t 16 (v a)) m
| U32 -> UInt.logand_mask (UInt.to_uint_t 32 (v a)) m
| U64 -> UInt.logand_mask (UInt.to_uint_t 64 (v a)) m
| U128 -> UInt.logand_mask (UInt.to_uint_t 128 (v a)) m
[@(strict_on_arguments [0])]
let logor #t #l a b =
match t with
| U1 ->
assert_norm (UInt8.logor 0uy 0uy == 0uy);
assert_norm (UInt8.logor 0uy 1uy == 1uy);
assert_norm (UInt8.logor 1uy 0uy == 1uy);
assert_norm (UInt8.logor 1uy 1uy == 1uy);
UInt8.logor a b
| U8 -> UInt8.logor a b
| U16 -> UInt16.logor a b
| U32 -> UInt32.logor a b
| U64 -> UInt64.logor a b
| U128 -> UInt128.logor a b
| S8 -> Int8.logor a b
| S16 -> Int16.logor a b
| S32 -> Int32.logor a b
| S64 -> Int64.logor a b
| S128 -> Int128.logor a b
#push-options "--max_fuel 1"
let logor_disjoint #t #l a b m =
if m > 0 then
begin
UInt.logor_disjoint #(bits t) (v b) (v a) m;
UInt.logor_commutative #(bits t) (v b) (v a)
end
else
begin
UInt.logor_commutative #(bits t) (v a) (v b);
UInt.logor_lemma_1 #(bits t) (v b)
end
#pop-options
let logor_zeros #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_1 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (Int.zero (bits t))) (v a)
let logor_ones #t #l a =
match t with
|U1 -> assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_lemma_2 #(bits t) (v a)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma (Int.logor #(bits t) (v a) (Int.ones (bits t))) (Int.ones (bits t))
let logor_lemma #t #l a b =
logor_zeros #t #l b;
logor_ones #t #l b;
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1)
| U8 | U16 | U32 | U64 | U128 -> UInt.logor_commutative #(bits t) (v a) (v b)
| S8 | S16 | S32 | S64 | S128 -> Int.nth_lemma #(bits t) (Int.logor #(bits t) (v a) (v b)) (Int.logor #(bits t) (v b) (v a))
let logor_spec #t #l a b =
match t with
| U1 ->
assert_norm(u1 0 `logor` ones U1 l == u1 1 /\ u1 1 `logor` ones U1 l == u1 1);
assert_norm(u1 0 `logor` zeros U1 l == u1 0 /\ u1 1 `logor` zeros U1 l == u1 1);
assert_norm (0 `logor_v #U1` 0 == 0 /\ 0 `logor_v #U1` 1 == 1);
assert_norm (1 `logor_v #U1` 0 == 1 /\ 1 `logor_v #U1` 1 == 1)
| _ -> ()
[@(strict_on_arguments [0])]
let lognot #t #l a =
match t with
| U1 -> UInt8.rem (UInt8.lognot a) 2uy
| U8 -> UInt8.lognot a
| U16 -> UInt16.lognot a
| U32 -> UInt32.lognot a
| U64 -> UInt64.lognot a
| U128 -> UInt128.lognot a
| S8 -> Int8.lognot a
| S16 -> Int16.lognot a
| S32 -> Int32.lognot a
| S64 -> Int64.lognot a
| S128 -> Int128.lognot a
let lognot_lemma #t #l a =
match t with
|U1 -> assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0)
| U8 | U16 | U32 | U64 | U128 ->
FStar.UInt.lognot_lemma_1 #(bits t);
UInt.nth_lemma (FStar.UInt.lognot #(bits t) (UInt.ones (bits t))) (UInt.zero (bits t))
| S8 | S16 | S32 | S64 | S128 ->
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.zero (bits t))) (Int.ones (bits t));
Int.nth_lemma (FStar.Int.lognot #(bits t) (Int.ones (bits t))) (Int.zero (bits t))
let lognot_spec #t #l a =
match t with
| U1 ->
assert_norm(lognot (u1 0) == u1 1 /\ lognot (u1 1) == u1 0);
assert_norm(lognot_v #U1 0 == 1 /\ lognot_v #U1 1 == 0)
| _ -> ()
[@(strict_on_arguments [0])]
let shift_right #t #l a b =
match t with
| U1 -> UInt8.shift_right a b
| U8 -> UInt8.shift_right a b
| U16 -> UInt16.shift_right a b
| U32 -> UInt32.shift_right a b
| U64 -> UInt64.shift_right a b
| U128 -> UInt128.shift_right a b
| S8 -> Int8.shift_arithmetic_right a b
| S16 -> Int16.shift_arithmetic_right a b
| S32 -> Int32.shift_arithmetic_right a b
| S64 -> Int64.shift_arithmetic_right a b
| S128 -> Int128.shift_arithmetic_right a b
val shift_right_value_aux_1: #n:pos{1 < n} -> a:Int.int_t n -> s:nat{n <= s} ->
Lemma (Int.shift_arithmetic_right #n a s = a / pow2 s)
let shift_right_value_aux_1 #n a s =
pow2_le_compat s n;
if a >= 0 then Int.sign_bit_positive a else Int.sign_bit_negative a
#push-options "--z3rlimit 200"
val shift_right_value_aux_2: #n:pos{1 < n} -> a:Int.int_t n ->
Lemma (Int.shift_arithmetic_right #n a 1 = a / 2)
let shift_right_value_aux_2 #n a =
if a >= 0 then
begin
Int.sign_bit_positive a;
UInt.shift_right_value_aux_3 #n a 1
end
else
begin
Int.sign_bit_negative a;
let a1 = Int.to_vec a in
let au = Int.to_uint a in
let sar = Int.shift_arithmetic_right #n a 1 in
let sar1 = Int.to_vec sar in
let sr = UInt.shift_right #n au 1 in
let sr1 = UInt.to_vec sr in
assert (Seq.equal (Seq.slice sar1 1 n) (Seq.slice sr1 1 n));
assert (Seq.equal sar1 (Seq.append (BitVector.ones_vec #1) (Seq.slice sr1 1 n)));
UInt.append_lemma #1 #(n-1) (BitVector.ones_vec #1) (Seq.slice sr1 1 n);
assert (Seq.equal (Seq.slice a1 0 (n-1)) (Seq.slice sar1 1 n));
UInt.slice_left_lemma a1 (n-1);
assert (sar + pow2 n = pow2 (n-1) + (au / 2));
pow2_double_sum (n-1);
assert (sar + pow2 (n-1) = (a + pow2 n) / 2);
pow2_double_mult (n-1);
lemma_div_plus a (pow2 (n-1)) 2;
assert (sar = a / 2)
end
val shift_right_value_aux_3: #n:pos -> a:Int.int_t n -> s:pos{s < n} ->
Lemma (ensures Int.shift_arithmetic_right #n a s = a / pow2 s)
(decreases s)
let rec shift_right_value_aux_3 #n a s =
if s = 1 then
shift_right_value_aux_2 #n a
else
begin
let a1 = Int.to_vec a in
assert (Seq.equal (BitVector.shift_arithmetic_right_vec #n a1 s)
(BitVector.shift_arithmetic_right_vec #n
(BitVector.shift_arithmetic_right_vec #n a1 (s-1)) 1));
assert (Int.shift_arithmetic_right #n a s =
Int.shift_arithmetic_right #n (Int.shift_arithmetic_right #n a (s-1)) 1);
shift_right_value_aux_3 #n a (s-1);
shift_right_value_aux_2 #n (Int.shift_arithmetic_right #n a (s-1));
assert (Int.shift_arithmetic_right #n a s = (a / pow2 (s-1)) / 2);
pow2_double_mult (s-1);
division_multiplication_lemma a (pow2 (s-1)) 2
end
let shift_right_lemma #t #l a b =
match t with
| U1 | U8 | U16 | U32 | U64 | U128 -> ()
| S8 | S16 | S32 | S64 | S128 ->
if v b = 0 then ()
else if v b >= bits t then
shift_right_value_aux_1 #(bits t) (v a) (v b)
else
shift_right_value_aux_3 #(bits t) (v a) (v b)
[@(strict_on_arguments [0])]
let shift_left #t #l a b =
match t with
| U1 -> UInt8.shift_left a b
| U8 -> UInt8.shift_left a b
| U16 -> UInt16.shift_left a b
| U32 -> UInt32.shift_left a b
| U64 -> UInt64.shift_left a b
| U128 -> UInt128.shift_left a b
| S8 -> Int8.shift_left a b
| S16 -> Int16.shift_left a b
| S32 -> Int32.shift_left a b
| S64 -> Int64.shift_left a b
| S128 -> Int128.shift_left a b
#push-options "--max_fuel 1"
let shift_left_lemma #t #l a b = ()
let rotate_right #t #l a b =
logor (shift_right a b) (shift_left a (sub #U32 (size (bits t)) b))
let rotate_left #t #l a b =
logor (shift_left a b) (shift_right a (sub #U32 (size (bits t)) b))
[@(strict_on_arguments [0])]
let ct_abs #t #l a =
match t with
| S8 -> Int8.ct_abs a
| S16 -> Int16.ct_abs a
| S32 -> Int32.ct_abs a
| S64 -> Int64.ct_abs a
#pop-options
[@(strict_on_arguments [0])] | false | false | Lib.IntTypes.fst | {
"detail_errors": false,
"detail_hint_replay": false,
"initial_fuel": 2,
"initial_ifuel": 1,
"max_fuel": 0,
"max_ifuel": 1,
"no_plugins": false,
"no_smt": false,
"no_tactics": false,
"quake_hi": 1,
"quake_keep": false,
"quake_lo": 1,
"retry": false,
"reuse_hint_for": null,
"smtencoding_elim_box": false,
"smtencoding_l_arith_repr": "boxwrap",
"smtencoding_nl_arith_repr": "boxwrap",
"smtencoding_valid_elim": false,
"smtencoding_valid_intro": true,
"tcnorm": true,
"trivial_pre_for_unannotated_effectful_fns": false,
"z3cliopt": [],
"z3refresh": false,
"z3rlimit": 200,
"z3rlimit_factor": 1,
"z3seed": 0,
"z3smtopt": [],
"z3version": "4.8.5"
} | null | val eq_mask: #t:inttype{~(S128? t)} -> int_t t SEC -> int_t t SEC -> int_t t SEC | [] | Lib.IntTypes.eq_mask | {
"file_name": "lib/Lib.IntTypes.fst",
"git_rev": "12c5e9539c7e3c366c26409d3b86493548c4483e",
"git_url": "https://github.com/hacl-star/hacl-star.git",
"project_name": "hacl-star"
} | a: Lib.IntTypes.int_t t Lib.IntTypes.SEC -> b: Lib.IntTypes.int_t t Lib.IntTypes.SEC
-> Lib.IntTypes.int_t t Lib.IntTypes.SEC | {
"end_col": 75,
"end_line": 802,
"start_col": 2,
"start_line": 792
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.